From e817fdf1b9cd34d8f2a19819d0fcb2abc5f1da50 Mon Sep 17 00:00:00 2001 From: Jokeren Date: Tue, 6 Dec 2022 13:46:21 -0800 Subject: [PATCH] Fix --- lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp | 2 +- test/Conversion/tritongpu_to_llvm.mlir | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp index 9c954929d..6314e7a8e 100644 --- a/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp +++ b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp @@ -4074,7 +4074,7 @@ struct InsertSliceAsyncOpConversion ptxBuilder.newAddrOperand(srcElems[elemIdx + wordElemIdx], "l"); auto *copySize = ptxBuilder.newConstantOperand(byteWidth); auto *srcSize = copySize; - if (op.mask()) { + if (llMask) { // We don't use predicate in this case, setting src-size to 0 // if there's any mask. cp.async will automatically fill the // remaining slots with 0 if cp-size > src-size. diff --git a/test/Conversion/tritongpu_to_llvm.mlir b/test/Conversion/tritongpu_to_llvm.mlir index 44993d215..394ba1d0c 100644 --- a/test/Conversion/tritongpu_to_llvm.mlir +++ b/test/Conversion/tritongpu_to_llvm.mlir @@ -499,10 +499,10 @@ module attributes {"triton_gpu.num-warps" = 4 : i32} { // CHECK: llvm.select // CHECK: llvm.inline_asm has_side_effects asm_dialect = att - // CHECK-SAME: cp.async.cg.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x10, 0x10 + // CHECK-SAME: cp.async.cg.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x10, ${{.*}} // CHECK: llvm.select // CHECK: llvm.inline_asm has_side_effects asm_dialect = att - // CHECK-SAME: cp.async.cg.shared.global [ ${{.*}} + 16 ], [ ${{.*}} + 0 ], 0x10, 0x10 + // CHECK-SAME: cp.async.cg.shared.global [ ${{.*}} + 16 ], [ ${{.*}} + 0 ], 0x10, ${{.*}} // CHECK: llvm.inline_asm has_side_effects asm_dialect = att // CHECK-SAME: cp.async.commit_group %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index, %true_tensor {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x64x!tt.ptr, #AL> -> tensor<2x16x64xf32, #A>