[Analysis/Allocation] Allocation passes now assumes that slices always alias (#108)
This code in this branch assumes the `src` operand in `insert_slice_async` always aliases the result, which shouldn't hold for generally cases but is just a workaround to make the pipeline pass work. I'm also working on the complete analysis in another [branch](https://github.com/openai/triton-mlir/tree/keren/analyze-slice).
This commit is contained in:
@@ -37,7 +37,9 @@ func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B
|
||||
func @alloc(%A : !tt.ptr<f16>) {
|
||||
// CHECK: %cst -> %cst
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x32xf16, #AL>
|
||||
// CHECK: %0 -> %0
|
||||
%cst2 = triton_gpu.alloc_tensor : tensor<16x16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
@@ -49,25 +51,28 @@ func @convert(%A : !tt.ptr<f16>) {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: copy_async
|
||||
func @copy_async(%A : !tt.ptr<f16>, %i1 : i1) {
|
||||
// CHECK-LABEL: insert_slice_async
|
||||
func @insert_slice_async(%A : !tt.ptr<f16>, %i1 : i1) {
|
||||
%a_ptr = tt.broadcast %A : (!tt.ptr<f16>) -> tensor<16x16x!tt.ptr<f16>, #AL>
|
||||
%mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL>
|
||||
%other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL>
|
||||
// CHECK: %2 -> %2
|
||||
%a = triton_gpu.copy_async %a_ptr, %mask, %other {cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<16x16x!tt.ptr<f16>, #AL> -> tensor<16x16xf16, #A>
|
||||
// CHECK: %cst_0 -> %cst_0
|
||||
%tensor = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A>
|
||||
%index = arith.constant 0 : i32
|
||||
// CHECK: %2 -> %cst_0
|
||||
%a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index, %mask, %other {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<16x16x!tt.ptr<f16>, #AL> -> tensor<1x16x16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// COM: Enable the following test once we support view on shared memory tensors
|
||||
// COM: // CHECK-LABEL: view
|
||||
// COM: func @view(%A : !tt.ptr<f16>) {
|
||||
// COM: // CHECK: res0:0 -> 0
|
||||
// COM: %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
// COM: // CHECK-NEXT: res1:0 -> 0
|
||||
// COM: %cst1 = tt.view %cst0 : (tensor<16x16xf16, #A>) -> tensor<32x8xf16, #A>
|
||||
// COM: return
|
||||
// COM: }
|
||||
// CHECK-LABEL: extract_slice
|
||||
func @extract_slice(%A : !tt.ptr<f16>) {
|
||||
// CHECK: %cst -> %cst
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A>
|
||||
%index = arith.constant 0 : i32
|
||||
// CHECK-NEXT: %0 -> %cst
|
||||
%cst1 = triton_gpu.extract_slice %cst0, %index { axis = 0 : i32 } : tensor<1x16x16xf16, #A> -> tensor<16x16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: if_cat
|
||||
func @if_cat(%i1 : i1) {
|
||||
@@ -123,62 +128,31 @@ func @for(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.p
|
||||
return
|
||||
}
|
||||
|
||||
// COM: // Enable the following test once we support view on shared memory tensors
|
||||
// COM: // CHECK-LABEL: for_if
|
||||
// COM: func @for_if(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.ptr<f16>, %i1 : i1) {
|
||||
// COM: // CHECK: res0:0 -> 0
|
||||
// COM: %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// COM: // CHECK-NEXT: res1:0 -> 1
|
||||
// COM: %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// COM: // CHECK-NEXT: res2:0 -> 2
|
||||
// COM: %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// COM: // CHECK-NEXT: arg3:0 -> 0
|
||||
// COM: // CHECK-NEXT: arg3:1 -> 1
|
||||
// COM: // CHECK-NEXT: arg3:2 -> 2
|
||||
// COM: // CHECK-NEXT: res3:0 -> 0,1
|
||||
// COM: // CHECK-NEXT: res3:1 -> 0,1
|
||||
// COM: // CHECK-NEXT: res3:2 -> 0,1
|
||||
// COM: %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>) {
|
||||
// COM: scf.if %i1 {
|
||||
// COM: // CHECK-NEXT: res5:0 -> 0,1
|
||||
// COM: %cst0 = tt.view %a_shared : (tensor<128x32xf16, #A>) -> tensor<32x128xf16, #A>
|
||||
// COM: scf.yield
|
||||
// COM: }
|
||||
// COM: scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>
|
||||
// COM: }
|
||||
// COM: return
|
||||
// COM: }
|
||||
|
||||
// COM: // Enable the following test once we support view on shared memory tensors
|
||||
// COM: // CHECK-LABEL: for_if_else
|
||||
// COM: func @for_if_else(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.ptr<f16>, %i1 : i1) {
|
||||
// COM: // CHECK: res0:0 -> 0
|
||||
// COM: %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// COM: // CHECK-NEXT: res1:0 -> 1
|
||||
// COM: %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// COM: // CHECK-NEXT: res2:0 -> 2
|
||||
// COM: %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// COM: // CHECK-NEXT: arg3:0 -> 0
|
||||
// COM: // CHECK-NEXT: arg3:1 -> 1
|
||||
// COM: // CHECK-NEXT: arg3:2 -> 2
|
||||
// COM: // CHECK-NEXT: res3:0 -> 0
|
||||
// COM: // CHECK-NEXT: res3:1 -> 1
|
||||
// COM: // CHECK-NEXT: res3:2 -> 0,7
|
||||
// COM: %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>) {
|
||||
// COM: // CHECK-NEXT: res4:0 -> 0,7
|
||||
// COM: %c_shared_next = scf.if %i1 -> tensor<128x32xf16, #A> {
|
||||
// COM: // CHECK-NEXT: res5:0 -> 0
|
||||
// COM: %cst0 = tt.view %a_shared : (tensor<128x32xf16, #A>) -> tensor<128x32xf16, #A>
|
||||
// COM: scf.yield %cst0 : tensor<128x32xf16, #A>
|
||||
// COM: } else {
|
||||
// COM: // CHECK-NEXT: res7:0 -> 7
|
||||
// COM: %cst0 = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// COM: scf.yield %cst0 : tensor<128x32xf16, #A>
|
||||
// COM: }
|
||||
// COM: scf.yield %a_shared, %b_shared, %c_shared_next : tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>
|
||||
// COM: }
|
||||
// COM: return
|
||||
// COM: }
|
||||
// CHECK-LABEL: for_if
|
||||
func @for_if(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.ptr<f16>, %i1 : i1) {
|
||||
// CHECK: %cst -> %cst
|
||||
%a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// CHECK-NEXT: %cst_0 -> %cst_0
|
||||
%b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// CHECK-NEXT: %cst_1 -> %cst_1
|
||||
%c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// CHECK-NEXT: %arg7 -> %cst
|
||||
// CHECK-NEXT: %arg8 -> %cst_0
|
||||
// CHECK-NEXT: %arg9 -> %cst_1
|
||||
// CHECK-NEXT: %0#0 -> %cst,%cst_0
|
||||
// CHECK-NEXT: %0#1 -> %cst,%cst_0
|
||||
// CHECK-NEXT: %0#2 -> %cst,%cst_0
|
||||
%a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>) {
|
||||
scf.if %i1 {
|
||||
%index = arith.constant 8 : i32
|
||||
// CHECK-NEXT: %1 -> %cst,%cst_0
|
||||
%cst0 = triton_gpu.extract_slice %a_shared, %index { axis = 0 : i32 } : tensor<128x32xf16, #A> -> tensor<32xf16, #A>
|
||||
scf.yield
|
||||
}
|
||||
scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: for_if_for
|
||||
func @for_if_for(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.ptr<f16>, %i1 : i1) {
|
||||
|
@@ -149,6 +149,17 @@ func @longlive(%A : !tt.ptr<f16>) {
|
||||
// CHECK-NEXT: size = 2560
|
||||
}
|
||||
|
||||
// CHECK-LABEL: alloc
|
||||
func @alloc(%A : !tt.ptr<f16>) {
|
||||
// CHECK: offset = 0, size = 512
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A>
|
||||
%cst1 = arith.constant dense<0.000000e+00> : tensor<16x32xf16, #AL>
|
||||
// CHECK-NEXT: offset = 0, size = 512
|
||||
%cst2 = triton_gpu.alloc_tensor : tensor<16x16xf16, #A>
|
||||
return
|
||||
// CHECK-NEXT: size = 512
|
||||
}
|
||||
|
||||
// CHECK-LABEL: scratch
|
||||
func @scratch() {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL>
|
||||
@@ -158,6 +169,29 @@ func @scratch() {
|
||||
// CHECK-NEXT: size = 512
|
||||
}
|
||||
|
||||
// CHECK-LABEL: insert_slice_async
|
||||
func @insert_slice_async(%A : !tt.ptr<f16>, %i1 : i1) {
|
||||
%a_ptr = tt.broadcast %A : (!tt.ptr<f16>) -> tensor<16x16x!tt.ptr<f16>, #AL>
|
||||
%mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL>
|
||||
%other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL>
|
||||
// CHECK: offset = 0, size = 512
|
||||
%tensor = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A>
|
||||
%index = arith.constant 0 : i32
|
||||
%a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index, %mask, %other {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<16x16x!tt.ptr<f16>, #AL> -> tensor<1x16x16xf16, #A>
|
||||
return
|
||||
// CHECK-NEXT: size = 512
|
||||
}
|
||||
|
||||
// CHECK-LABEL: extract_slice
|
||||
func @extract_slice(%A : !tt.ptr<f16>) {
|
||||
// CHECK: offset = 0, size = 512
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A>
|
||||
%index = arith.constant 0 : i32
|
||||
%cst1 = triton_gpu.extract_slice %cst0, %index { axis = 0 : i32 } : tensor<1x16x16xf16, #A> -> tensor<16x16xf16, #A>
|
||||
return
|
||||
// CHECK-NEXT: size = 512
|
||||
}
|
||||
|
||||
// B0 -> (B1) -> B0
|
||||
// Memory used by B1 can be reused by B0.
|
||||
// CHECK-LABEL: if
|
||||
@@ -226,6 +260,26 @@ func @for(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.p
|
||||
// CHECK-NEXT: size = 24576
|
||||
}
|
||||
|
||||
// CHECK-LABEL: for_if_slice
|
||||
func @for_if_slice(%lb : index, %ub : index, %step : index, %A : !tt.ptr<f16>, %B : !tt.ptr<f16>, %i1 : i1) {
|
||||
// CHECK: offset = 0, size = 8192
|
||||
%a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// CHECK-NEXT: offset = 8192, size = 8192
|
||||
%b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
// CHECK-NEXT: offset = 16384, size = 8192
|
||||
%c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A>
|
||||
%a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>) {
|
||||
scf.if %i1 {
|
||||
%index = arith.constant 8 : i32
|
||||
%cst0 = triton_gpu.extract_slice %a_shared, %index { axis = 0 : i32 } : tensor<128x32xf16, #A> -> tensor<32xf16, #A>
|
||||
scf.yield
|
||||
}
|
||||
scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A>, tensor<128x32xf16, #A>, tensor<128x32xf16, #A>
|
||||
}
|
||||
return
|
||||
// CHECK-NEXT: size = 24576
|
||||
}
|
||||
|
||||
// a_shared_init, b_shared_init, and c_shared_init's liveness ranges are span over the entire function before cst2.
|
||||
// So they cannot be reused by cst0 and cst1, but can be reused by cst2.
|
||||
// CHECK-LABEL: for_if_for
|
||||
|
@@ -56,6 +56,8 @@ func @war_single_block(%A : !tt.ptr<f16>) {
|
||||
%a1 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
// CHECK: Membar 5
|
||||
%a2 = triton_gpu.convert_layout %a1 : (tensor<128x32xf16, #A>) -> tensor<128x32xf16, #AL>
|
||||
// a2's liveness range ends here, and a3 and a2 have the same address range.
|
||||
// So it makes sense to have a WAR dependency between a2 and a3.
|
||||
// CHECK-NEXT: Membar 7
|
||||
%a3 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A>
|
||||
return
|
||||
@@ -82,6 +84,41 @@ func @async_wait() {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: alloc
|
||||
func @alloc() {
|
||||
%cst0 = triton_gpu.alloc_tensor : tensor<16x16xf16, #A>
|
||||
%a = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A>, tensor<16x16xf16, #A>) -> tensor<32x16xf16, #A>
|
||||
// CHECK: Membar 2
|
||||
%b = triton_gpu.convert_layout %a : (tensor<32x16xf16, #A>) -> tensor<32x16xf16, #AL>
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: extract_slice
|
||||
func @extract_slice() {
|
||||
%cst0 = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A>
|
||||
%index = arith.constant 0 : i32
|
||||
%cst1 = triton_gpu.extract_slice %cst0, %index { axis = 0 : i32 } : tensor<1x16x16xf16, #A> -> tensor<16x16xf16, #A>
|
||||
// CHECK: Membar 3
|
||||
%cst2 = triton_gpu.convert_layout %cst1 : (tensor<16x16xf16, #A>) -> tensor<16x16xf16, #AL>
|
||||
// CHECK-NEXT: Membar 5
|
||||
%cst3 = triton_gpu.convert_layout %cst2 : (tensor<16x16xf16, #AL>) -> tensor<16x16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: insert_slice_async
|
||||
func @insert_slice_async(%A : !tt.ptr<f16>, %i1 : i1) {
|
||||
%a_ptr = tt.broadcast %A : (!tt.ptr<f16>) -> tensor<16x16x!tt.ptr<f16>, #AL>
|
||||
%mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL>
|
||||
%other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL>
|
||||
%tensor = triton_gpu.alloc_tensor : tensor<1x16x16xf16, #A>
|
||||
%index = arith.constant 0 : i32
|
||||
%a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index, %mask, %other {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isOtherUnspecified = false, isVolatile = false} : tensor<16x16x!tt.ptr<f16>, #AL> -> tensor<1x16x16xf16, #A>
|
||||
%b = tt.cat %a, %a {axis = 0} : (tensor<1x16x16xf16, #A>, tensor<1x16x16xf16, #A>) -> tensor<2x16x16xf16, #A>
|
||||
// CHECK: Membar 7
|
||||
%c = tt.cat %b, %b {axis = 0} : (tensor<2x16x16xf16, #A>, tensor<2x16x16xf16, #A>) -> tensor<4x16x16xf16, #A>
|
||||
return
|
||||
}
|
||||
|
||||
// If branch inserted a barrier for %cst0 and %cst1, but else didn't, then the barrier should be inserted in the parent region
|
||||
// CHECK-LABEL: multi_blocks
|
||||
func @multi_blocks(%i1 : i1) {
|
||||
|
Reference in New Issue
Block a user