diff --git a/python/tutorials/06-fused-attention.py b/python/tutorials/06-fused-attention.py index d20ea3cd1..8892b5529 100644 --- a/python/tutorials/06-fused-attention.py +++ b/python/tutorials/06-fused-attention.py @@ -288,8 +288,8 @@ class _attention(torch.autograd.Function): BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8, num_stages=1, ) - print(pgm.asm["ttgir"]) - exit() + # print(pgm.asm["ttgir"]) + # exit() return dq, dk, dv, None