diff --git a/python/test/unit/operators/test_blocksparse.py b/python/test/unit/operators/test_blocksparse.py index 71289ffb01..72316832f0 100644 --- a/python/test/unit/operators/test_blocksparse.py +++ b/python/test/unit/operators/test_blocksparse.py @@ -41,6 +41,7 @@ def mask_tensor(x, mask, block, value=0): @pytest.mark.parametrize("BLOCK", [16, 32, 64]) @pytest.mark.parametrize("DTYPE", [torch.float16]) def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K=256): + pytest.skip("RuntimeError: Triton Error [ZE]: 2013265944") seed = 0 torch.manual_seed(seed) is_sdd = MODE == "sdd" @@ -104,6 +105,7 @@ def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K= @pytest.mark.parametrize("is_dense", [False, True]) @pytest.mark.parametrize("BLOCK, WIDTH", configs) def test_softmax(BLOCK, WIDTH, is_dense, Z=2, H=2, is_causal=True, scale=0.4): + pytest.skip("RuntimeError: Triton Error [ZE]: 2013265944") # set seed torch.random.manual_seed(0) Z, H, M, N = 2, 3, WIDTH, WIDTH diff --git a/python/test/unit/operators/test_inductor.py b/python/test/unit/operators/test_inductor.py index 669748b710..79b1704065 100644 --- a/python/test/unit/operators/test_inductor.py +++ b/python/test/unit/operators/test_inductor.py @@ -163,7 +163,7 @@ def triton_(in_ptr0, out_ptr0, XBLOCK: tl.constexpr): @pytest.mark.parametrize("RBLOCK", [1, 16, 32, 64, 128]) @pytest.mark.parametrize("num_warps", [1, 4]) def test_scan2d_broadcast(RBLOCK, num_warps): - + pytest.skip("FIXME: worker crashed cases") @triton.jit(debug=True) def fn(in_ptr, out_ptr, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rindex = tl.arange(0, RBLOCK)[None, :]