[BACKEND] Keren/shared memory barrier (#59)
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
// RUN: triton-opt %s --mlir-disable-threading -test-print-allocation 2>&1 | FileCheck %s
|
||||
// RUN: triton-opt %s -split-input-file --mlir-disable-threading -test-print-allocation 2>&1 | FileCheck %s
|
||||
|
||||
#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}>
|
||||
#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}>
|
||||
@@ -6,6 +6,7 @@
|
||||
#B = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}>
|
||||
#C = #triton_gpu.mma<{version = 2, warpsPerCTA = [4, 1]}>
|
||||
|
||||
// CHECK-LABEL: matmul_loop
|
||||
func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.ptr<f16>) {
|
||||
%a_ptr_init = tt.broadcast %A : (!tt.ptr<f16>) -> tensor<128x32x!tt.ptr<f16>, #AL>
|
||||
%b_ptr_init = tt.broadcast %B : (!tt.ptr<f16>) -> tensor<32x128x!tt.ptr<f16>, #BL>
|
||||
@@ -24,7 +25,7 @@ func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B
|
||||
// CHECK: offset = 0, size = 8192
|
||||
%a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
%b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<32x128xf16, #BL>
|
||||
// CHECK: offset = 8192, size = 8192
|
||||
// CHECK-NEXT: offset = 8192, size = 8192
|
||||
%b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B>
|
||||
|
||||
%c = tt.dot %a, %b, %prev_c {allowTF32 = true} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C>
|
||||
@@ -34,11 +35,12 @@ func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B
|
||||
scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr<f16>, #AL>, tensor<32x128x!tt.ptr<f16>, #BL>, tensor<128x128xf32, #C>
|
||||
}
|
||||
return
|
||||
// CHECK: size = 16384
|
||||
// CHECK-NEXT: size = 16384
|
||||
}
|
||||
|
||||
// Shared memory is available after a tensor's liveness range ends
|
||||
func @synthesized_reusable(%A : !tt.ptr<f16>) {
|
||||
// CHECK-LABEL: reusable
|
||||
func @reusable(%A : !tt.ptr<f16>) {
|
||||
%cst1 = arith.constant dense<true> : tensor<128x32xi1, #AL>
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<128x32xf16, #AL>
|
||||
%cst3 = arith.constant dense<true> : tensor<32x128xi1, #AL>
|
||||
@@ -51,95 +53,162 @@ func @synthesized_reusable(%A : !tt.ptr<f16>) {
|
||||
// CHECK: offset = 0, size = 8192
|
||||
%a1 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
%a2_ = tt.load %b_ptr, %cst3, %cst4 {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<32x128xf16, #AL>
|
||||
// CHECK: offset = 8192, size = 8192
|
||||
// CHECK-NEXT: offset = 8192, size = 8192
|
||||
%a2 = triton_gpu.convert_layout %a2_ : (tensor<32x128xf16, #AL>) -> tensor<32x128xf16, #A>
|
||||
%a3_ = tt.load %a_ptr, %cst1, %cst2 {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<128x32xf16, #AL>
|
||||
// CHECK: offset = 16384, size = 8192
|
||||
// CHECK-NEXT: offset = 16384, size = 8192
|
||||
%a3 = triton_gpu.convert_layout %a3_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
%c = tt.dot %a1, %a2, %c_init {allowTF32 = true} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C>
|
||||
%a4_ = tt.load %b_ptr, %cst3, %cst4 {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<32x128xf16, #AL>
|
||||
// CHECK: offset = 0, size = 8192
|
||||
// CHECK-NEXT: offset = 0, size = 8192
|
||||
%a4 = triton_gpu.convert_layout %a4_ : (tensor<32x128xf16, #AL>) -> tensor<32x128xf16, #A>
|
||||
%c1 = tt.dot %a3, %a4, %c {allowTF32 = true} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C>
|
||||
return
|
||||
// CHECK: size = 24576
|
||||
// CHECK-NEXT: size = 24576
|
||||
}
|
||||
|
||||
// A tensor's shared memory offset is larger than it needs to accommodate further tensors
|
||||
// %cst0->%c
|
||||
// %cst1->%cst4
|
||||
// %cst3->%g->%h->%i
|
||||
func @synthesize_preallocate(%A : !tt.ptr<f16>) {
|
||||
// CHECK-LABEL: preallocate
|
||||
func @preallocate(%A : !tt.ptr<f16>) {
|
||||
// CHECK: offset = 0, size = 512
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1024, size = 512
|
||||
// CHECK-NEXT: offset = 1024, size = 512
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1536, size = 512
|
||||
// CHECK-NEXT: offset = 1536, size = 512
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 2048, size = 1024
|
||||
// CHECK-NEXT: offset = 2048, size = 1024
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 3072, size = 1024
|
||||
// CHECK-NEXT: offset = 3072, size = 1024
|
||||
%b = tt.cat %cst0, %cst2 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 0, size = 1024
|
||||
// CHECK-NEXT: offset = 0, size = 1024
|
||||
%c = tt.cat %cst1, %cst2 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 1024, size = 1024
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%cst4 = arith.constant dense<0.000000e+00> : tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 6144, size = 2048
|
||||
// CHECK-NEXT: offset = 6144, size = 2048
|
||||
%e = tt.cat %a, %cst4 {axis = 0} : (tensor<32x16xf16, #A>, tensor<32x16xf16, #A>) -> tensor<64x16xf16, #A>
|
||||
// CHECK: offset = 8192, size = 2048
|
||||
// CHECK-NEXT: offset = 8192, size = 2048
|
||||
%d = tt.cat %b, %cst4 {axis = 0} : (tensor<32x16xf16, #A>, tensor<32x16xf16, #A>) -> tensor<64x16xf16, #A>
|
||||
// CHECK: offset = 10240, size = 2048
|
||||
// CHECK-NEXT: offset = 10240, size = 2048
|
||||
%f = tt.cat %c, %cst4 {axis = 0} : (tensor<32x16xf16, #A>, tensor<32x16xf16, #A>) -> tensor<64x16xf16, #A>
|
||||
// CHECK: offset = 0, size = 2048
|
||||
// CHECK-NEXT: offset = 0, size = 2048
|
||||
%cst5 = arith.constant dense<0.000000e+00> : tensor<64x16xf16, #A>
|
||||
// CHECK: offset = 2048, size = 4096
|
||||
// CHECK-NEXT: offset = 2048, size = 4096
|
||||
%g = tt.cat %e, %cst5 {axis = 0} : (tensor<64x16xf16, #A>, tensor<64x16xf16, #A>) -> tensor<128x16xf16, #A>
|
||||
// CHECK: offset = 2048, size = 4096
|
||||
// CHECK-NEXT: offset = 2048, size = 4096
|
||||
%h = tt.cat %d, %cst5 {axis = 0} : (tensor<64x16xf16, #A>, tensor<64x16xf16, #A>) -> tensor<128x16xf16, #A>
|
||||
// CHECK: offset = 2048, size = 4096
|
||||
// CHECK-NEXT: offset = 2048, size = 4096
|
||||
%i = tt.cat %f, %cst5 {axis = 0} : (tensor<64x16xf16, #A>, tensor<64x16xf16, #A>) -> tensor<128x16xf16, #A>
|
||||
return
|
||||
// CHECK: size = 12288
|
||||
// CHECK-NEXT: size = 12288
|
||||
}
|
||||
|
||||
// Unused tensors are immediately released
|
||||
func @synthesize_unused(%A : !tt.ptr<f16>) {
|
||||
// CHECK-LABEL: unused
|
||||
func @unused(%A : !tt.ptr<f16>) {
|
||||
// CHECK: offset = 0, size = 1024
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 0, size = 512
|
||||
// CHECK-NEXT: offset = 0, size = 512
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 512, size = 512
|
||||
// CHECK-NEXT: offset = 512, size = 512
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1024, size = 1024
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%a = tt.cat %cst1, %cst2 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
return
|
||||
// CHECK: size = 2048
|
||||
}
|
||||
|
||||
// cst0 is alive through the entire function, it cannot be released before the end of the function
|
||||
func @synthesize_longlive(%A : !tt.ptr<f16>) {
|
||||
// CHECK-LABEL: longlive
|
||||
func @longlive(%A : !tt.ptr<f16>) {
|
||||
// CHECK: offset = 0, size = 512
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 512, size = 512
|
||||
// CHECK-NEXT: offset = 512, size = 512
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1024, size = 512
|
||||
// CHECK-NEXT: offset = 1024, size = 512
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1536, size = 1024
|
||||
// CHECK-NEXT: offset = 1536, size = 1024
|
||||
%a = tt.cat %cst1, %cst2 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 512, size = 512
|
||||
// CHECK-NEXT: offset = 512, size = 512
|
||||
%cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1024, size = 512
|
||||
// CHECK-NEXT: offset = 1024, size = 512
|
||||
%cst4 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1536, size = 1024
|
||||
// CHECK-NEXT: offset = 1536, size = 1024
|
||||
%b = tt.cat %cst3, %cst4 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 1536, size = 512
|
||||
// CHECK-NEXT: offset = 1536, size = 512
|
||||
%cst5 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1536, size = 512
|
||||
// CHECK-NEXT: offset = 1536, size = 512
|
||||
%cst6 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: offset = 1536, size = 1024
|
||||
// CHECK-NEXT: offset = 1536, size = 1024
|
||||
%c = tt.cat %cst3, %cst4 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK: offset = 512, size = 1024
|
||||
// CHECK-NEXT: offset = 512, size = 1024
|
||||
%d = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
return
|
||||
// CHECK: size = 2560
|
||||
// CHECK-NEXT: size = 2560
|
||||
}
|
||||
|
||||
// CHECK-LABEL: scratch
|
||||
func @scratch() {
|
||||
// CHECK: offset = 0, size = 512
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 1056, size = 1024
|
||||
%a = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK-NEXT: scratch offset = 32, size = 1024
|
||||
// CHECK-NEXT: offset = 0, size = 32
|
||||
%b = tt.reduce %a {redOp = 1 : i32, axis = 0 : i32} : tensor<32x16xf16, #A> -> tensor<16xf16, #A>
|
||||
return
|
||||
// CHECK-NEXT: size = 2080
|
||||
}
|
||||
|
||||
// B0 -> (B1) -> B0
|
||||
// Memory used by B1 can be reused by B0.
|
||||
// CHECK-LABEL: multi_blocks_reuse
|
||||
func @multi_blocks_reuse(%i1 : i1) {
|
||||
// CHECK: offset = 0, size = 512
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 512, size = 512
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
scf.if %i1 {
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
}
|
||||
// CHECK-NEXT: offset = 0, size = 512
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 512, size = 512
|
||||
%cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%a = tt.cat %cst2, %cst3 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
return
|
||||
// CHECK-NEXT: size = 2048
|
||||
}
|
||||
|
||||
// B0 -> (B1) -> (B2) -> B0
|
||||
// Memory used by B0 cannot be reused by B1 or B2.
|
||||
// CHECK-LABEL: multi_blocks_noreuse
|
||||
func @multi_blocks_noreuse(%i1 : i1) {
|
||||
// CHECK: offset = 0, size = 512
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 512, size = 512
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
scf.if %i1 {
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
} else {
|
||||
// CHECK-NEXT: offset = 1024, size = 512
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 1536, size = 512
|
||||
%cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: offset = 2048, size = 1024
|
||||
%a = tt.cat %cst2, %cst3 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
}
|
||||
// CHECK-NEXT: offset = 1024, size = 1024
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
return
|
||||
// CHECK-NEXT: size = 3072
|
||||
}
|
||||
|
178
test/Analysis/test-membar.mlir
Normal file
178
test/Analysis/test-membar.mlir
Normal file
@@ -0,0 +1,178 @@
|
||||
// RUN: triton-opt %s -split-input-file --mlir-disable-threading -test-print-membar 2>&1 | FileCheck %s
|
||||
|
||||
#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}>
|
||||
#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}>
|
||||
#A = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}>
|
||||
#B = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}>
|
||||
#C = #triton_gpu.mma<{version = 2, warpsPerCTA = [4, 1]}>
|
||||
|
||||
// CHECK-LABEL: matmul_loop
|
||||
func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.ptr<f16>) {
|
||||
%a_ptr_init = tt.broadcast %A : (!tt.ptr<f16>) -> tensor<128x32x!tt.ptr<f16>, #AL>
|
||||
%b_ptr_init = tt.broadcast %B : (!tt.ptr<f16>) -> tensor<32x128x!tt.ptr<f16>, #BL>
|
||||
|
||||
%a_mask = arith.constant dense<true> : tensor<128x32xi1, #AL>
|
||||
%a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL>
|
||||
%b_mask = arith.constant dense<true> : tensor<32x128xi1, #BL>
|
||||
%b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL>
|
||||
%c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C>
|
||||
|
||||
%a_off = arith.constant dense<4> : tensor<128x32xi32, #AL>
|
||||
%b_off = arith.constant dense<4> : tensor<32x128xi32, #BL>
|
||||
|
||||
scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr<f16>, #AL>, tensor<32x128x!tt.ptr<f16>, #BL>, tensor<128x128xf32, #C>) {
|
||||
%a_ = tt.load %a_ptr, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<128x32xf16, #AL>
|
||||
%a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
%b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<32x128xf16, #BL>
|
||||
%b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B>
|
||||
// CHECK: Membar 13
|
||||
%c = tt.dot %a, %b, %prev_c {allowTF32 = true} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C>
|
||||
|
||||
%next_a_ptr = tt.getelementptr %a_ptr, %a_off : tensor<128x32x!tt.ptr<f16>, #AL>
|
||||
%next_b_ptr = tt.getelementptr %b_ptr, %b_off : tensor<32x128x!tt.ptr<f16>, #BL>
|
||||
scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr<f16>, #AL>, tensor<32x128x!tt.ptr<f16>, #BL>, tensor<128x128xf32, #C>
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: raw_single_block
|
||||
func @raw_single_block(%A : !tt.ptr<f16>) {
|
||||
%cst1 = arith.constant dense<true> : tensor<128x32xi1, #AL>
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<128x32xf16, #AL>
|
||||
%a_ptr = tt.broadcast %A : (!tt.ptr<f16>) -> tensor<128x32x!tt.ptr<f16>, #AL>
|
||||
%a1_ = tt.load %a_ptr, %cst1, %cst2 {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<128x32xf16, #AL>
|
||||
%a1 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
// CHECK: Membar 5
|
||||
%a2 = triton_gpu.convert_layout %a1 : (tensor<128x32xf16, #A>) -> tensor<128x32xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: war_single_block
|
||||
func @war_single_block(%A : !tt.ptr<f16>) {
|
||||
%cst1 = arith.constant dense<true> : tensor<128x32xi1, #AL>
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<128x32xf16, #AL>
|
||||
%a_ptr = tt.broadcast %A : (!tt.ptr<f16>) -> tensor<128x32x!tt.ptr<f16>, #AL>
|
||||
%a1_ = tt.load %a_ptr, %cst1, %cst2 {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<128x32xf16, #AL>
|
||||
%a1 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
// CHECK: Membar 5
|
||||
%a2 = triton_gpu.convert_layout %a1 : (tensor<128x32xf16, #A>) -> tensor<128x32xf16, #AL>
|
||||
// CHECK-NEXT: Membar 7
|
||||
%a3 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: scratch
|
||||
func @scratch() {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: Membar 1
|
||||
%a = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK-NEXT: Membar 3
|
||||
%b = tt.reduce %a {redOp = 1 : i32, axis = 0 : i32} : tensor<32x16xf16, #A> -> tensor<16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: async_wait
|
||||
func @async_wait() {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK: Membar 1
|
||||
%a = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
triton_gpu.async_wait {num = 4 : i32}
|
||||
// CHECK-NEXT: Membar 4
|
||||
%a_ = triton_gpu.convert_layout %a : (tensor<32x16xf16, #A>) -> tensor<32x16xf16, #AL>
|
||||
return
|
||||
}
|
||||
|
||||
// If branch inserted a barrier for %cst0 and %cst1, but else didn't, then the barrier should be inserted in the parent region
|
||||
// CHECK-LABEL: multi_blocks
|
||||
func @multi_blocks(%i1 : i1) {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
scf.if %i1 {
|
||||
// CHECK: Membar 2
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield
|
||||
} else {
|
||||
%cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// CHECK-NEXT: Membar 7
|
||||
%b = tt.cat %cst2, %cst3 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield
|
||||
}
|
||||
// CHECK-NEXT: Membar 10
|
||||
%c = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// Both branches inserted a barrier for %cst0 and %cst1, then the barrier doesn't need to be inserted in the parent region
|
||||
// CHECK-LABEL: multi_blocks_join_barrier
|
||||
func @multi_blocks_join_barrier(%i1 : i1) {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
scf.if %i1 {
|
||||
// CHECK: Membar 2
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield
|
||||
} else {
|
||||
// CHECK-NEXT: Membar 5
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield
|
||||
}
|
||||
%a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A>) -> tensor<16x16xf16, #AL>
|
||||
return
|
||||
}
|
||||
|
||||
// Read yielded tensor requires a barrier
|
||||
// CHECK-LABEL: multi_blocks_yield
|
||||
func @multi_blocks_yield(%i1 : i1) {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%a = scf.if %i1 -> (tensor<32x16xf16, #A>) {
|
||||
// CHECK: Membar 2
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield %a : tensor<32x16xf16, #A>
|
||||
} else {
|
||||
// CHECK-NEXT: Membar 5
|
||||
%b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield %b : tensor<32x16xf16, #A>
|
||||
}
|
||||
%a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A>) -> tensor<16x16xf16, #AL>
|
||||
// CHECK-NEXT: Membar 9
|
||||
%b = tt.cat %a, %a {axis = 0} : (tensor<32x16xf16, #A>, tensor<32x16xf16, #A>) -> tensor<64x16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// Conservatively add a barrier as if the branch (%i1) is never taken
|
||||
// CHECK-LABEL: multi_blocks_noelse
|
||||
func @multi_blocks_noelse(%i1 : i1) {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
scf.if %i1 {
|
||||
// CHECK: Membar 2
|
||||
%a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield
|
||||
}
|
||||
%a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A>) -> tensor<16x16xf16, #AL>
|
||||
return
|
||||
}
|
||||
|
||||
// Conservatively add a barrier as if the branch (%i2) is never taken
|
||||
// CHECK-LABEL: multi_blocks_nested_scf
|
||||
func @multi_blocks_nested_scf(%i1 : i1, %i2 : i1) {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
scf.if %i1 {
|
||||
scf.if %i2 {
|
||||
// CHECK: Membar 2
|
||||
%b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield
|
||||
}
|
||||
scf.yield
|
||||
} else {
|
||||
// CHECK-NEXT: Membar 6
|
||||
%b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
scf.yield
|
||||
}
|
||||
// CHECK-NEXT: Membar 9
|
||||
%a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A>) -> tensor<16x16xf16, #AL>
|
||||
return
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
add_mlir_library(TritonTestAnalysis
|
||||
TestAxisInfo.cpp
|
||||
TestAllocation.cpp
|
||||
TestMembar.cpp
|
||||
|
||||
LINK_LIBS PUBLIC
|
||||
TritonAnalysis
|
||||
|
@@ -19,24 +19,29 @@ struct TestAllocationPass
|
||||
void runOnOperation() override {
|
||||
Operation *operation = getOperation();
|
||||
auto &os = llvm::errs();
|
||||
os << "Testing: " << operation->getName() << "\n";
|
||||
AllocationAnalysis analysis(operation);
|
||||
// Convert to std::string can remove quotes from op_name
|
||||
auto op_name = SymbolTable::getSymbolName(operation).getValue().str();
|
||||
os << op_name << "\n";
|
||||
Allocation allocation(operation);
|
||||
operation->walk([&](Operation *op) {
|
||||
auto scratchBufferId = allocation.getBufferId(op);
|
||||
if (scratchBufferId != Allocation::InvalidBufferId) {
|
||||
size_t offset = allocation.getOffset(scratchBufferId);
|
||||
size_t size = allocation.getAllocatedSize(scratchBufferId);
|
||||
os << "scratch offset = " << offset << ", size = " << size << "\n";
|
||||
}
|
||||
if (op->getNumResults() < 1)
|
||||
return;
|
||||
for (Value result : op->getResults()) {
|
||||
Type type = result.getType();
|
||||
if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
|
||||
Attribute encoding = tensorType.getEncoding();
|
||||
if (encoding.isa<triton::gpu::TritonGPUSharedEncodingAttr>()) {
|
||||
size_t offset = analysis.getOffset(result);
|
||||
size_t size = analysis.getAllocatedSize(result);
|
||||
os << "offset = " << offset << ", size = " << size << "\n";
|
||||
}
|
||||
auto bufferId = allocation.getBufferId(result);
|
||||
if (bufferId != Allocation::InvalidBufferId) {
|
||||
size_t offset = allocation.getOffset(bufferId);
|
||||
size_t size = allocation.getAllocatedSize(bufferId);
|
||||
os << "offset = " << offset << ", size = " << size << "\n";
|
||||
}
|
||||
}
|
||||
});
|
||||
os << "size = " << analysis.getSharedMemorySize() << "\n";
|
||||
os << "size = " << allocation.getSharedMemorySize() << "\n";
|
||||
}
|
||||
};
|
||||
|
||||
|
50
test/lib/Analysis/TestMembar.cpp
Normal file
50
test/lib/Analysis/TestMembar.cpp
Normal file
@@ -0,0 +1,50 @@
|
||||
#include "mlir/Dialect/GPU/GPUDialect.h"
|
||||
#include "mlir/IR/Dialect.h"
|
||||
#include "mlir/Pass/Pass.h"
|
||||
#include "triton/Analysis/Allocation.h"
|
||||
#include "triton/Analysis/Membar.h"
|
||||
|
||||
using namespace mlir;
|
||||
|
||||
namespace {
|
||||
|
||||
struct TestMembarPass
|
||||
: public PassWrapper<TestMembarPass, OperationPass<FuncOp>> {
|
||||
|
||||
// LLVM15+
|
||||
// MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestMembarPass);
|
||||
|
||||
StringRef getArgument() const final { return "test-print-membar"; }
|
||||
StringRef getDescription() const final {
|
||||
return "print the result of the allocation pass";
|
||||
}
|
||||
|
||||
void runOnOperation() override {
|
||||
Operation *operation = getOperation();
|
||||
auto &os = llvm::errs();
|
||||
// Convert to std::string can remove quotes from op_name
|
||||
auto op_name = SymbolTable::getSymbolName(operation).getValue().str();
|
||||
os << op_name << "\n";
|
||||
Allocation allocation(operation);
|
||||
MembarAnalysis analysis(&allocation);
|
||||
size_t operationId = 0;
|
||||
operation->walk<WalkOrder::PreOrder>([&](Operation *op) {
|
||||
if (dyn_cast<gpu::BarrierOp>(op)) {
|
||||
os << "Membar " << operationId << "\n";
|
||||
}
|
||||
if (op->getNumRegions() == 0) {
|
||||
// Don't count parent Operation to simplify the test.
|
||||
operationId++;
|
||||
}
|
||||
return;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace mlir {
|
||||
namespace test {
|
||||
void registerTestMembarPass() { PassRegistration<TestMembarPass>(); }
|
||||
} // namespace test
|
||||
} // namespace mlir
|
Reference in New Issue
Block a user