From 0e1143544824037a450ac0996fb29dc3a4c5cdf9 Mon Sep 17 00:00:00 2001 From: Yan Da Date: Mon, 6 Jun 2022 21:10:28 +0800 Subject: [PATCH] more tests --- test/TritonGPU/loop-pipeline.mlir | 74 +++++++++++++++++++++++++++++++ test/TritonGPU/ops.mlir | 5 +++ 2 files changed, 79 insertions(+) create mode 100644 test/TritonGPU/loop-pipeline.mlir create mode 100644 test/TritonGPU/ops.mlir diff --git a/test/TritonGPU/loop-pipeline.mlir b/test/TritonGPU/loop-pipeline.mlir new file mode 100644 index 000000000..174c56e34 --- /dev/null +++ b/test/TritonGPU/loop-pipeline.mlir @@ -0,0 +1,74 @@ +// RUN: triton-opt %s -tritongpu-pipeline=num-stages=3 +// RUN: triton-opt %s -tritongpu-pipeline=num=stages=3 -tritongpu-verifier + +// 4 warps +#AL = #triton_gpu.blocked_layout<{ + threadTileSize = [1, 4], + warpTileSize = [4, 32], + blockTileSize = [16, 32], + order = [1, 0] +}> + +#BL = #triton_gpu.blocked_layout<{ + threadTileSize = [1, 4], + warpTileSize = [1, 128], + blockTileSize = [4, 128], + order = [1, 0] +}> + +#A = #triton_gpu.shared_layout<{ + vec = 2, + perPhase = 2, + maxPhase = 4, + order = [1, 0] +}> + +#B = #triton_gpu.shared_layout<{ + vec = 2, + perPhase = 2, + maxPhase = 4, + order = [1, 0] +}> + +// TODO: check this +#C = #triton_gpu.mma_layout<{ + fragmentPerWarp = [1, 1], + shapePerWarp = [16, 8], + warpPerTile = [2, 2], + shapePerTile = [32, 16], + repetitions = [4, 4], + contigPerThread = [1, 8] +}> + +// matmul: 128x32 @ 32x128 -> 128x128 +func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + + scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C>) { + %a_ = tt.load %a_ptr, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A> + %b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B> + + %c = tt.dot %a, %b, %prev_c {allowTF32 = true} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C> + // %c = tt.dot %a_, %b_, %prev_c {allowTF32 = true} : tensor<128x32xf16, #AL> * tensor<32x128xf16, #BL> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.getelementptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL> + %next_b_ptr = tt.getelementptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL> + scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C> + } + return +} + + +// nested loop \ No newline at end of file diff --git a/test/TritonGPU/ops.mlir b/test/TritonGPU/ops.mlir new file mode 100644 index 000000000..20f4892d9 --- /dev/null +++ b/test/TritonGPU/ops.mlir @@ -0,0 +1,5 @@ +// RUN: triton-opt %s -tritongpu-verifier +func @test_dot(%a : tensor<128x32xf16>, %b : tensor<32x128xf16>, %c : tensor<128x128xf16>) { + %d = tt.dot %a, %b, %c {allowTF32 = true} : tensor<128x32xf16> * tensor<32x128xf16> -> tensor<128x128xf16> + return +} \ No newline at end of file