193 lines
7.2 KiB
MLIR
193 lines
7.2 KiB
MLIR
// RUN: triton-opt %s -split-input-file --convert-triton-gpu-to-llvm | FileCheck %s
|
|
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
|
|
// CHECK: llvm.func @test_empty_kernel(%arg0: i32, %arg1: !llvm.ptr<f16, 1>)
|
|
// Here the 128 comes from the 4 in module attribute multiples 32
|
|
// CHECK: attributes {nvvm.kernel = 1 : ui1, nvvm.maxntid = 128 : si32} {{.*}}
|
|
func @test_empty_kernel(%lb : index, %A : !tt.ptr<f16>) {
|
|
|
|
// CHECK: llvm.return
|
|
return
|
|
}
|
|
|
|
} // end module
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_load
|
|
func @basic_load(%a_ptr_init : tensor<256x!tt.ptr<f32>, #blocked0>, %cst : tensor<256xi1, #blocked0>, %cst_0 : tensor<256xf32, #blocked0>) {
|
|
// CHECK: llvm.inline_asm
|
|
// CHECK: llvm.inline_asm
|
|
%1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [4], threadsPerWarp = [8], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: vectorized_load
|
|
func @vectorized_load(%a_ptr_init : tensor<256x!tt.ptr<f32>, #blocked0>, %cst : tensor<256xi1, #blocked0>, %cst_0 : tensor<256xf32, #blocked0>) {
|
|
// CHECK: llvm.inline_asm
|
|
// CHECK-SAME: ld.global.v4.b32
|
|
// CHECK: llvm.inline_asm
|
|
// CHECK-SAME: ld.global.v4.b32
|
|
%1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [4], threadsPerWarp = [8], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: vectorized_load_f16
|
|
func @vectorized_load_f16(%a_ptr_init : tensor<256x!tt.ptr<f16>, #blocked0>, %cst : tensor<256xi1, #blocked0>, %cst_0 : tensor<256xf16, #blocked0>) {
|
|
// CHECK: llvm.inline_asm
|
|
// CHECK-SAME: ld.global.v2.b32
|
|
// CHECK: llvm.inline_asm
|
|
// CHECK-SAME: ld.global.v2.b32
|
|
%1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf16, #blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
// TODO: Pending on the support of isSplat constant
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: masked_load_const_other
|
|
func @masked_load_const_other(%a_ptr_init : tensor<256x!tt.ptr<f32>, #blocked0>, %cst : tensor<256xi1, #blocked0>) {
|
|
%cst_0 = arith.constant dense<0.000000e+00> : tensor<256xf32, #blocked0>
|
|
%1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// TODO: Add a testcase to verify the optimization when ptr of the LoadOp
|
|
// is from a GEP with const idx
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
#blocked2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 1], order = [0, 1]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_view_broadcast
|
|
func @basic_view_broadcast(%arg : tensor<256xf32,#blocked0>) {
|
|
// CHECK: llvm.mlir.undef
|
|
// CHECK: %[[T0:.*]] = llvm.extractvalue
|
|
// CHECK: %[[T1:.*]] = llvm.extractvalue
|
|
%0 = tt.view %arg : (tensor<256xf32, #blocked0>) -> tensor<256x1xf32,#blocked2>
|
|
// CHECK: llvm.mlir.undef
|
|
// CHECK: llvm.insertvalue %[[T0]]
|
|
// CHECK: llvm.insertvalue %[[T0]]
|
|
// CHECK: llvm.insertvalue %[[T0]]
|
|
// CHECK: llvm.insertvalue %[[T0]]
|
|
// CHECK: llvm.insertvalue %[[T1]]
|
|
// CHECK: llvm.insertvalue %[[T1]]
|
|
// CHECK: llvm.insertvalue %[[T1]]
|
|
// CHECK: llvm.insertvalue %[[T1]]
|
|
%1 = tt.broadcast %0 : (tensor<256x1xf32,#blocked2>) -> tensor<256x4xf32, #blocked2>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_make_range
|
|
func @basic_make_range() {
|
|
// CHECK: nvvm.read.ptx.sreg.tid.x
|
|
// CHECK: llvm.mlir.undef
|
|
// CHECK: llvm.insertvalue
|
|
// CHECK: llvm.insertvalue
|
|
%0 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_addf
|
|
func @basic_addf(%arg0 : tensor<256xf32,#blocked0>, %arg1 : tensor<256xf32,#blocked0>) {
|
|
// CHECK: llvm.fadd
|
|
// CHECK: llvm.fadd
|
|
%1 = arith.addf %arg0, %arg1 : tensor<256xf32,#blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_addi
|
|
func @basic_addi(%arg0 : tensor<256xi32,#blocked0>, %arg1 : tensor<256xi32,#blocked0>) {
|
|
// CHECK: llvm.add
|
|
// CHECK: llvm.add
|
|
%1 = arith.addi %arg0, %arg1 : tensor<256xi32,#blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_program_id
|
|
func @basic_program_id() {
|
|
// CHECK: nvvm.read.ptx.sreg.ctaid.x : i32
|
|
%0 = tt.get_program_id {axis = 0 : i32} : i32
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_gep
|
|
func @basic_gep(%arg0 : tensor<256x!tt.ptr<f32>,#blocked0>, %arg1 : tensor<256xi32,#blocked0>) {
|
|
// CHECK: llvm.getelementptr
|
|
// CHECK: llvm.getelementptr
|
|
%0 = tt.getelementptr %arg0, %arg1 : tensor<256x!tt.ptr<f32>, #blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK: basic_splat
|
|
func @basic_splat(%ptr: !tt.ptr<f32>) {
|
|
// CHECK: llvm.mlir.undef
|
|
// CHECK: llvm.insertvalue
|
|
// CHECK: llvm.insertvalue
|
|
%0 = tt.splat %ptr : (!tt.ptr<f32>) -> tensor<256x!tt.ptr<f32>,#blocked0>
|
|
return
|
|
}
|
|
}
|
|
|
|
// -----
|
|
|
|
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}>
|
|
module attributes {"triton_gpu.num-warps" = 4 : i32} {
|
|
// CHECK-LABEL: basic_store
|
|
func @basic_store(%ptrs: tensor<256x!tt.ptr<f32>, #blocked0>, %vals: tensor<256xf32, #blocked0>, %mask: tensor<256xi1, #blocked0>) {
|
|
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att
|
|
// CHECK-SAME: st.global.v32.b1 [ ${{.*}} + 0 ], { ${{.*}} };", "b,l,r" %{{.*}}, %{{.*}}, %{{.*}} : (i1, !llvm.ptr<f32, 1>, i32) -> !llvm.struct<()>
|
|
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att
|
|
// CHECK-SAME: st.global.v32.b1 [ ${{.*}} + 0 ], { ${{.*}} };", "b,l,r" %{{.*}}, %{{.*}}, %{{.*}} : (i1, !llvm.ptr<f32, 1>, i32) -> !llvm.struct<()>
|
|
tt.store %ptrs, %vals, %mask, {} : tensor<256xf32, #blocked0>
|
|
return
|
|
}
|
|
}
|