diff --git a/rewrite-test/jit/matmul/matmul.py b/rewrite-test/jit/matmul/matmul.py index 821098f09..f16cf8b26 100644 --- a/rewrite-test/jit/matmul/matmul.py +++ b/rewrite-test/jit/matmul/matmul.py @@ -96,10 +96,10 @@ mod, ctx = matmul_kernel.compile_to_ttir( ) assert mod.verify() -mod.dump() +# mod.dump() res = matmul_kernel.compile_ttir_to_llir(mod, ctx) assert mod.verify() -assert res +# assert res mod.dump() diff --git a/rewrite-test/jit/vecadd-loop.py b/rewrite-test/jit/vecadd/vecadd-loop.py similarity index 91% rename from rewrite-test/jit/vecadd-loop.py rename to rewrite-test/jit/vecadd/vecadd-loop.py index 49a81a230..069875cc9 100644 --- a/rewrite-test/jit/vecadd-loop.py +++ b/rewrite-test/jit/vecadd/vecadd-loop.py @@ -46,6 +46,11 @@ y = torch.rand(size, device='cuda') z = torch.empty_like(x) # add_kernel[(1,)](x, y, z, size, 256) # print(add_kernel[(1,)].kernel.compile_to_ttir()) -mod, ctx = add_kernel.compile_to_ttir(x, y, z, size, 128, 8, grid=(1,)) +mod, ctx = add_kernel.compile_to_ttir( + x, y, z, size, 128, 8, grid=(1,), num_stages=4) mod.dump() # print(mod) + +res = add_kernel.compile_ttir_to_llir(mod, ctx) + +mod.dump() diff --git a/rewrite-test/jit/vecadd/vecadd.mlir b/rewrite-test/jit/vecadd/vecadd.mlir new file mode 100644 index 000000000..1e4434346 --- /dev/null +++ b/rewrite-test/jit/vecadd/vecadd.mlir @@ -0,0 +1,128 @@ +module { + func @add_kernel__Pfp32_Pfp32_Pfp32_i32_i32_i32__(%arg0: !tt.ptr, %arg1: !tt.ptr, %arg2: !tt.ptr, %arg3: i32, %arg4: i32, %arg5: i32) { + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %c256_i32 = arith.constant 256 : i32 + %1 = arith.muli %0, %c256_i32 : i32 + %2 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32> + %3 = tt.broadcast %1 : (i32) -> tensor<256xi32> + %4 = arith.addi %3, %2 : tensor<256xi32> + %5 = tt.broadcast %arg3 : (i32) -> tensor<256xi32> + %6 = arith.cmpi slt, %4, %5 : tensor<256xi32> + %7 = tt.broadcast %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr> + %8 = tt.getelementptr %7, %4, : tensor<256x!tt.ptr> + %9 = tt.broadcast %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr> + %10 = tt.getelementptr %9, %4, : tensor<256x!tt.ptr> + %cst = arith.constant 0.000000e+00 : f32 + %11 = tt.broadcast %cst : (f32) -> tensor<256xf32> + %c0_i32 = arith.constant 0 : i32 + %c32_i32 = arith.constant 32 : i32 + %12 = arith.index_cast %c0_i32 : i32 to index + %13 = arith.index_cast %arg4 : i32 to index + %14 = arith.index_cast %c32_i32 : i32 to index + %15:3 = scf.for %arg6 = %12 to %13 step %14 iter_args(%arg7 = %11, %arg8 = %8, %arg9 = %10) -> (tensor<256xf32>, tensor<256x!tt.ptr>, tensor<256x!tt.ptr>) { + %cst_0 = arith.constant 0.000000e+00 : f32 + %18 = tt.broadcast %cst_0 : (f32) -> tensor<256xf32> + %19 = tt.load %arg8, %6, %18 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32> + %cst_1 = arith.constant 0.000000e+00 : f32 + %20 = tt.broadcast %cst_1 : (f32) -> tensor<256xf32> + %21 = tt.load %arg9, %6, %20 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32> + %22 = arith.addf %19, %21 : tensor<256xf32> + %23 = arith.addf %arg7, %22 : tensor<256xf32> + %24 = tt.broadcast %arg5 : (i32) -> tensor<256xi32> + %25 = tt.getelementptr %arg8, %24, : tensor<256x!tt.ptr> + %26 = tt.broadcast %arg5 : (i32) -> tensor<256xi32> + %27 = tt.getelementptr %arg9, %26, : tensor<256x!tt.ptr> + scf.yield %23, %25, %27 : tensor<256xf32>, tensor<256x!tt.ptr>, tensor<256x!tt.ptr> + } + %16 = tt.broadcast %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr> + %17 = tt.getelementptr %16, %4, : tensor<256x!tt.ptr> + tt.store %17, %15#0, %6, : tensor<256xf32> + return + } +} +module { + func @add_kernel__Pfp32_Pfp32_Pfp32_i32_i32_i32__(%arg0: !tt.ptr, %arg1: !tt.ptr, %arg2: !tt.ptr, %arg3: i32, %arg4: i32, %arg5: i32) { + %c64 = arith.constant 64 : index + %c32 = arith.constant 32 : index + %c0 = arith.constant 0 : index + %cst = arith.constant 0.000000e+00 : f32 + %c256_i32 = arith.constant 256 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c256_i32 : i32 + %2 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #triton_gpu<"coalesced encoding">> + %3 = tt.broadcast %1 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %4 = arith.addi %3, %2 : tensor<256xi32, #triton_gpu<"coalesced encoding">> + %5 = tt.broadcast %arg3 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %6 = "triton_gpu.cmpi"(%4, %5) {predicate = 2 : i64} : (tensor<256xi32, #triton_gpu<"coalesced encoding">>, tensor<256xi32, #triton_gpu<"coalesced encoding">>) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %7 = tt.broadcast %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %8 = tt.getelementptr %7, %4, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %9 = tt.broadcast %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %10 = tt.getelementptr %9, %4, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %11 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %12 = arith.index_cast %arg4 : i32 to index + %13 = arith.cmpi slt, %c0, %12 : index + %14 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %15 = tt.broadcast %13 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %16 = arith.andi %6, %15 : tensor<256xi1, #triton_gpu<"coalesced encoding">> + %17 = triton_gpu.copy_async %8, %16, %14 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %18 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %19 = tt.broadcast %13 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %20 = arith.andi %6, %19 : tensor<256xi1, #triton_gpu<"coalesced encoding">> + %21 = triton_gpu.copy_async %10, %20, %18 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %22 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %23 = tt.getelementptr %8, %22, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %24 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %25 = tt.getelementptr %10, %24, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %26 = arith.cmpi slt, %c32, %12 : index + %27 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %28 = tt.broadcast %26 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %29 = arith.andi %6, %28 : tensor<256xi1, #triton_gpu<"coalesced encoding">> + %30 = triton_gpu.copy_async %23, %29, %27 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %31 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %32 = tt.broadcast %26 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %33 = arith.andi %6, %32 : tensor<256xi1, #triton_gpu<"coalesced encoding">> + %34 = triton_gpu.copy_async %25, %33, %31 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %35 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %36 = tt.getelementptr %23, %35, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %37 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %38 = tt.getelementptr %25, %37, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %39 = arith.cmpi slt, %c64, %12 : index + %40 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %41 = tt.broadcast %39 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %42 = arith.andi %6, %41 : tensor<256xi1, #triton_gpu<"coalesced encoding">> + %43 = triton_gpu.copy_async %36, %42, %40 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %44 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %45 = tt.broadcast %39 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %46 = arith.andi %6, %45 : tensor<256xi1, #triton_gpu<"coalesced encoding">> + %47 = triton_gpu.copy_async %38, %46, %44 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %48 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %49 = tt.getelementptr %36, %48, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %50 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %51 = tt.getelementptr %38, %50, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %52:12 = scf.for %arg6 = %c0 to %12 step %c32 iter_args(%arg7 = %11, %arg8 = %8, %arg9 = %10, %arg10 = %17, %arg11 = %30, %arg12 = %43, %arg13 = %21, %arg14 = %34, %arg15 = %47, %arg16 = %51, %arg17 = %49, %arg18 = %c64) -> (tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, index) { + %55 = arith.addf %arg10, %arg13 : tensor<256xf32, #triton_gpu<"coalesced encoding">> + %56 = arith.addf %arg7, %55 : tensor<256xf32, #triton_gpu<"coalesced encoding">> + %57 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %58 = tt.getelementptr %arg8, %57, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %59 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %60 = tt.getelementptr %arg9, %59, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %61 = arith.addi %arg18, %c32 : index + %62 = arith.cmpi slt, %61, %12 : index + %63 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %64 = tt.broadcast %62 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> + %65 = arith.andi %64, %6 : tensor<256xi1, #triton_gpu<"coalesced encoding">> + %66 = triton_gpu.copy_async %arg17, %65, %63 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %67 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %68 = triton_gpu.copy_async %arg16, %65, %67 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> + %69 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %70 = tt.getelementptr %arg17, %69, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %71 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> + %72 = tt.getelementptr %arg16, %71, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + scf.yield %56, %58, %60, %arg11, %arg12, %66, %arg14, %arg15, %68, %72, %70, %61 : tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, index + } + %53 = tt.broadcast %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + %54 = tt.getelementptr %53, %4, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> + tt.store %54, %52#0, %6, : tensor<256xf32, #triton_gpu<"coalesced encoding">> + return + } +}