Skip to content

Commit

Permalink
[DEV] Don't use .ONESHELL in Makefile (#5775)
Browse files Browse the repository at this point in the history
I had used `.ONESHELL` to allow `cd` to effect the other commands, but
it seems this also prevents the error status from propagating from
anything but the last command in a rule.

e.g. see
#5673 (comment)
  • Loading branch information
peterbell10 authored Jan 31, 2025
1 parent 2926ae7 commit 0af2f62
Showing 1 changed file with 9 additions and 12 deletions.
21 changes: 9 additions & 12 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,27 +28,24 @@ test-cpp:
ninja -C $(BUILD_DIR) check-triton-unit-tests

.PHONY: test-python
.ONESHELL:
test-unit: all
cd python/test/unit
$(PYTEST) -s -n 8 --ignore=cuda/test_flashattention.py --ignore=language/test_line_info.py --ignore=language/test_subprocess.py --ignore=test_debug.py
$(PYTEST) -s -n 8 language/test_subprocess.py
$(PYTEST) -s -n 8 test_debug.py --forked
TRITON_DISABLE_LINE_INFO=0 $(PYTEST) -s language/test_line_info.py
cd python/test/unit && $(PYTEST) -s -n 8 --ignore=cuda/test_flashattention.py \
--ignore=language/test_line_info.py --ignore=language/test_subprocess.py --ignore=test_debug.py
$(PYTEST) -s -n 8 python/test/unit/language/test_subprocess.py
$(PYTEST) -s -n 8 python/test/unit/test_debug.py --forked
TRITON_DISABLE_LINE_INFO=0 $(PYTEST) -s python/test/unit/language/test_line_info.py
# Run cuda/test_flashattention.py separately to avoid out of gpu memory
$(PYTEST) -s cuda/test_flashattention.py
TRITON_ALWAYS_COMPILE=1 TRITON_DISABLE_LINE_INFO=0 LLVM_PASS_PLUGIN_PATH=../../triton/instrumentation/libGPUInstrumentationTestLib.so \
$(PYTEST) --capture=tee-sys -rfs -vvv instrumentation/test_gpuhello.py
$(PYTEST) -s python/test/unit/cuda/test_flashattention.py
TRITON_ALWAYS_COMPILE=1 TRITON_DISABLE_LINE_INFO=0 LLVM_PASS_PLUGIN_PATH=python/triton/instrumentation/libGPUInstrumentationTestLib.so \
$(PYTEST) --capture=tee-sys -rfs -vvv python/test/unit/instrumentation/test_gpuhello.py

.PHONY: test-regression
test-regression: all
$(PYTEST) -s -n 8 python/test/regression

.PHONY: test-interpret
.ONESHELL:
test-interpret: all
cd python/test/unit
TRITON_INTERPRET=1 $(PYTEST) -s -n 16 -m interpreter language/test_core.py language/test_standard.py \
cd python/test/unit && TRITON_INTERPRET=1 $(PYTEST) -s -n 16 -m interpreter language/test_core.py language/test_standard.py \
language/test_random.py language/test_block_pointer.py language/test_subprocess.py language/test_line_info.py \
runtime/test_autotuner.py::test_kwargs[False] \
../../tutorials/06-fused-attention.py::test_op --device=cpu
Expand Down

0 comments on commit 0af2f62

Please sign in to comment.