Skip to content

Commit

Permalink
Skip impls
Browse files Browse the repository at this point in the history
  • Loading branch information
xuzhao9 committed Nov 9, 2024
1 parent b8cb1a4 commit d59ed4e
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 21 deletions.
2 changes: 1 addition & 1 deletion test/test_gpu/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def _run_one_operator(
):
if tb_args.op in skip_tests:
# If the op itself is in the skip list, skip all tests
if skip_tests[tb_args.op] is None:
if not skip_tests[tb_args.op]:
return
tb_args.skip = ",".join(skip_tests[tb_args.op])
Operator = load_opbench_by_name(tb_args.op)
Expand Down
42 changes: 22 additions & 20 deletions test/test_gpu/skip_tests_h100_pytorch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,25 @@
# This file is regarding to the Triton version bundled with pytorch
# Use <op-name> to skip an entire operator
# Use <op-name/impl-name> to skip an impl
- bf16xint16_gemm/bf16xint16
- fp8_attention/colfax_fmha
- fp8_fused_quant_gemm_rowwise
- fp8_gemm/triton_persistent_fp8_gemm
- fp8_gemm/triton_tma_persistent_fp8_gemm
- fp8_gemm_rowwise
- gemm
- grouped_gemm
- int4_gemm
- jagged_layer_norm
- jagged_mean
- jagged_softmax
- jagged_sum
- layer_norm
- low_mem_dropout
- rms_norm
- rope
- swiglu
- template_attention
- test_op
bf16xint16_gemm:
- bf16xint16
fp8_attention:
- colfax_fmha
fp8_fused_quant_gemm_rowwise:
fp8_gemm:
- triton_persistent_fp8_gemm
- triton_tma_persistent_fp8_gemm
fp8_gemm_rowwise:
gemm:
grouped_gemm:
int4_gemm:
jagged_layer_norm:
jagged_mean:
jagged_softmax:
jagged_sum:
layer_norm:
low_mem_dropout:
rms_norm:
rope:
template_attention:
test_op:

0 comments on commit d59ed4e

Please sign in to comment.