trying to figure out spilling root cause
This commit is contained in:
2032
python/bwd.ptx
Normal file
2032
python/bwd.ptx
Normal file
File diff suppressed because it is too large
Load Diff
242
python/bwd.ttgir
242
python/bwd.ttgir
@@ -1,22 +1,16 @@
|
||||
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [8], order = [0]}>
|
||||
#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 2], order = [0, 1]}>
|
||||
#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 8], threadsPerWarp = [4, 8], warpsPerCTA = [8, 1], order = [1, 0]}>
|
||||
#blocked2 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [8, 1], order = [1, 0]}>
|
||||
#blocked3 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 2], order = [0, 1]}>
|
||||
#mma0 = #triton_gpu.mma<{versionMajor = 2, versionMinor = 0, warpsPerCTA = [4, 2]}>
|
||||
#mma1 = #triton_gpu.mma<{versionMajor = 2, versionMinor = 0, warpsPerCTA = [8, 1]}>
|
||||
#shared0 = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 1, order = [1, 0]}>
|
||||
#shared1 = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 1, order = [0, 1]}>
|
||||
#shared2 = #triton_gpu.shared<{vec = 8, perPhase = 1, maxPhase = 8, order = [1, 0]}>
|
||||
module attributes {"triton_gpu.num-warps" = 8 : i32} {
|
||||
func public @_bwd_kernel_0d1d2d34d5d6d7d8d9d10d11d12d13d14d15c16d17d18d19c20d21d22d23c2425d26d27(%arg0: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg1: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg2: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg3: f32, %arg4: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg5: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg6: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %arg7: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg8: !tt.ptr<f16> {tt.divisibility = 16 : i32}, %arg9: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %arg10: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %arg11: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %arg12: i32 {tt.divisibility = 16 : i32}, %arg13: i32 {tt.divisibility = 16 : i32}, %arg14: i32 {tt.divisibility = 16 : i32}, %arg15: i32 {tt.divisibility = 16 : i32}, %arg16: i32 {tt.divisibility = 16 : i32}, %arg17: i32 {tt.divisibility = 16 : i32}, %arg18: i32 {tt.divisibility = 16 : i32}, %arg19: i32 {tt.divisibility = 16 : i32}, %arg20: i32 {tt.divisibility = 16 : i32}, %arg21: i32, %arg22: i32 {tt.divisibility = 16 : i32}, %arg23: i32 {tt.divisibility = 16 : i32}, %arg24: i32) {
|
||||
%cst = arith.constant dense<0.000000e+00> : tensor<128x128xf32, #mma1>
|
||||
%cst_0 = arith.constant dense<0.000000e+00> : tensor<128x64xf32, #mma0>
|
||||
%cst_1 = arith.constant dense<0xFF800000> : tensor<128x128xf32, #blocked3>
|
||||
%c128 = arith.constant 128 : index
|
||||
%c128_i32 = arith.constant 128 : i32
|
||||
%c1 = arith.constant 1 : index
|
||||
%c0 = arith.constant 0 : index
|
||||
%c0_i32 = arith.constant 0 : i32
|
||||
%c128_i32 = arith.constant 128 : i32
|
||||
%0 = tt.get_program_id {axis = 0 : i32} : i32
|
||||
%1 = arith.divsi %0, %arg22 : i32
|
||||
%2 = arith.remsi %0, %arg22 : i32
|
||||
@@ -30,160 +24,88 @@ module attributes {"triton_gpu.num-warps" = 8 : i32} {
|
||||
%10 = tt.addptr %arg6, %5 : !tt.ptr<f32>, i32
|
||||
%11 = tt.addptr %arg7, %5 : !tt.ptr<f16>, i32
|
||||
%12 = tt.addptr %arg8, %5 : !tt.ptr<f16>, i32
|
||||
%13 = arith.index_cast %arg24 : i32 to index
|
||||
%14 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #blocked0>
|
||||
%15 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>>
|
||||
%16 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #blocked3}>>
|
||||
%17 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked2}>>
|
||||
%18 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked3}>>
|
||||
%19 = tt.splat %arg14 : (i32) -> tensor<128x1xi32, #blocked1>
|
||||
%20 = tt.splat %arg14 : (i32) -> tensor<128x1xi32, #blocked2>
|
||||
%13 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked0}>>
|
||||
%14 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>>
|
||||
%15 = tt.expand_dims %13 {axis = 1 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked0}>>) -> tensor<128x1xi32, #blocked0>
|
||||
%16 = tt.expand_dims %14 {axis = 1 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>>) -> tensor<128x1xi32, #blocked1>
|
||||
%17 = tt.splat %arg14 : (i32) -> tensor<128x1xi32, #blocked0>
|
||||
%18 = tt.splat %arg14 : (i32) -> tensor<128x1xi32, #blocked1>
|
||||
%19 = arith.muli %15, %17 : tensor<128x1xi32, #blocked0>
|
||||
%20 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #triton_gpu.slice<{dim = 0, parent = #blocked0}>>
|
||||
%21 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #triton_gpu.slice<{dim = 0, parent = #blocked1}>>
|
||||
%22 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #triton_gpu.slice<{dim = 0, parent = #blocked2}>>
|
||||
%23 = tt.expand_dims %21 {axis = 0 : i32} : (tensor<64xi32, #triton_gpu.slice<{dim = 0, parent = #blocked1}>>) -> tensor<1x64xi32, #blocked1>
|
||||
%24 = tt.broadcast %23 : (tensor<1x64xi32, #blocked1>) -> tensor<128x64xi32, #blocked1>
|
||||
%25 = tt.expand_dims %22 {axis = 0 : i32} : (tensor<64xi32, #triton_gpu.slice<{dim = 0, parent = #blocked2}>>) -> tensor<1x64xi32, #blocked2>
|
||||
%26 = tt.broadcast %25 : (tensor<1x64xi32, #blocked2>) -> tensor<128x64xi32, #blocked2>
|
||||
%27 = tt.splat %6 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%28 = tt.splat %arg17 : (i32) -> tensor<128x1xi32, #blocked1>
|
||||
%29 = tt.splat %7 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%30 = tt.splat %8 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%31 = tt.splat %9 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%32 = tt.splat %10 : (!tt.ptr<f32>) -> tensor<128x64x!tt.ptr<f32>, #blocked2>
|
||||
%33 = arith.muli %0, %arg23 : i32
|
||||
%34 = tt.addptr %arg11, %33 : !tt.ptr<f32>, i32
|
||||
%35 = tt.addptr %arg10, %33 : !tt.ptr<f32>, i32
|
||||
%36 = arith.muli %arg24, %c128_i32 : i32
|
||||
%37 = arith.index_cast %36 : i32 to index
|
||||
%38 = tt.splat %35 : (!tt.ptr<f32>) -> tensor<128x!tt.ptr<f32>, #blocked0>
|
||||
%39 = tt.splat %arg3 : (f32) -> tensor<128x128xf32, #blocked3>
|
||||
%40 = tt.splat %34 : (!tt.ptr<f32>) -> tensor<128x!tt.ptr<f32>, #blocked0>
|
||||
%41 = arith.muli %arg14, %c128_i32 : i32
|
||||
%42 = tt.splat %41 : (i32) -> tensor<128x64xi32, #blocked1>
|
||||
%43 = tt.splat %41 : (i32) -> tensor<128x64xi32, #blocked2>
|
||||
%44 = tt.splat %12 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%45 = tt.splat %11 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
scf.for %arg25 = %c0 to %13 step %c1 {
|
||||
%46 = arith.index_cast %arg25 : index to i32
|
||||
%47 = arith.muli %46, %c128_i32 : i32
|
||||
%48 = tt.splat %47 : (i32) -> tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>>
|
||||
%49 = tt.splat %47 : (i32) -> tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #blocked3}>>
|
||||
%50 = tt.splat %47 : (i32) -> tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked2}>>
|
||||
%51 = arith.addi %48, %15 : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>>
|
||||
%52 = arith.addi %50, %17 : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked2}>>
|
||||
%53 = tt.expand_dims %51 {axis = 1 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>>) -> tensor<128x1xi32, #blocked1>
|
||||
%54 = tt.expand_dims %52 {axis = 1 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked2}>>) -> tensor<128x1xi32, #blocked2>
|
||||
%55 = arith.muli %53, %28 : tensor<128x1xi32, #blocked1>
|
||||
%56 = tt.broadcast %55 : (tensor<128x1xi32, #blocked1>) -> tensor<128x64xi32, #blocked1>
|
||||
%57 = arith.addi %56, %24 : tensor<128x64xi32, #blocked1>
|
||||
%58 = tt.addptr %29, %57 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%59 = tt.load %58 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64xf16, #blocked1>
|
||||
%60 = arith.muli %53, %19 : tensor<128x1xi32, #blocked1>
|
||||
%61 = tt.broadcast %60 : (tensor<128x1xi32, #blocked1>) -> tensor<128x64xi32, #blocked1>
|
||||
%62 = arith.addi %61, %24 : tensor<128x64xi32, #blocked1>
|
||||
%63 = tt.addptr %30, %62 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%64 = tt.load %63 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64xf16, #blocked1>
|
||||
%65 = arith.index_cast %47 : i32 to index
|
||||
%66 = triton_gpu.convert_layout %59 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #shared0>
|
||||
%67 = tt.trans %66 : (tensor<128x64xf16, #shared0>) -> tensor<64x128xf16, #shared1>
|
||||
%68 = arith.addi %49, %16 : tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #blocked3}>>
|
||||
%69 = tt.expand_dims %68 {axis = 0 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #blocked3}>>) -> tensor<1x128xi32, #blocked3>
|
||||
%70 = tt.broadcast %69 : (tensor<1x128xi32, #blocked3>) -> tensor<128x128xi32, #blocked3>
|
||||
%71 = triton_gpu.convert_layout %64 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #shared0>
|
||||
%72 = tt.trans %71 : (tensor<128x64xf16, #shared0>) -> tensor<64x128xf16, #shared1>
|
||||
%73 = arith.muli %54, %20 : tensor<128x1xi32, #blocked2>
|
||||
%74 = tt.broadcast %73 : (tensor<128x1xi32, #blocked2>) -> tensor<128x64xi32, #blocked2>
|
||||
%75 = arith.addi %74, %26 : tensor<128x64xi32, #blocked2>
|
||||
%76 = tt.addptr %32, %75 : tensor<128x64x!tt.ptr<f32>, #blocked2>, tensor<128x64xi32, #blocked2>
|
||||
%77 = tt.addptr %27, %62 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%78 = tt.addptr %31, %62 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%79 = triton_gpu.convert_layout %67 : (tensor<64x128xf16, #shared1>) -> tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>>
|
||||
%80 = triton_gpu.convert_layout %72 : (tensor<64x128xf16, #shared1>) -> tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>>
|
||||
%81 = triton_gpu.alloc_tensor : tensor<1x128x64xf16, #shared2>
|
||||
%82 = triton_gpu.insert_slice_async %58, %81, %c0_i32 {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64x!tt.ptr<f16>, #blocked1> -> tensor<1x128x64xf16, #shared2>
|
||||
triton_gpu.async_wait {num = 0 : i32}
|
||||
%83 = tensor.extract_slice %82[0, 0, 0] [1, 128, 64] [1, 1, 1] : tensor<1x128x64xf16, #shared2> to tensor<128x64xf16, #shared2>
|
||||
%84 = triton_gpu.convert_layout %83 : (tensor<128x64xf16, #shared2>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>>
|
||||
%85:5 = scf.for %arg26 = %65 to %37 step %c128 iter_args(%arg27 = %cst_0, %arg28 = %cst_0, %arg29 = %76, %arg30 = %77, %arg31 = %78) -> (tensor<128x64xf32, #mma0>, tensor<128x64xf32, #mma0>, tensor<128x64x!tt.ptr<f32>, #blocked2>, tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64x!tt.ptr<f16>, #blocked1>) {
|
||||
%92 = arith.index_cast %arg26 : index to i32
|
||||
%93 = tt.splat %92 : (i32) -> tensor<128xi32, #blocked0>
|
||||
%94 = tt.splat %92 : (i32) -> tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked3}>>
|
||||
%95 = arith.addi %93, %14 : tensor<128xi32, #blocked0>
|
||||
%96 = triton_gpu.alloc_tensor : tensor<1x128x64xf16, #shared2>
|
||||
%97 = triton_gpu.insert_slice_async %arg30, %96, %c0_i32 {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64x!tt.ptr<f16>, #blocked1> -> tensor<1x128x64xf16, #shared2>
|
||||
triton_gpu.async_wait {num = 0 : i32}
|
||||
%98 = tensor.extract_slice %97[0, 0, 0] [1, 128, 64] [1, 1, 1] : tensor<1x128x64xf16, #shared2> to tensor<128x64xf16, #shared2>
|
||||
%99 = triton_gpu.convert_layout %98 : (tensor<128x64xf16, #shared2>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>>
|
||||
%100 = tt.dot %99, %79, %cst {allowTF32 = true} : tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>> * tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>> -> tensor<128x128xf32, #mma1>
|
||||
%101 = triton_gpu.convert_layout %100 : (tensor<128x128xf32, #mma1>) -> tensor<128x128xf32, #blocked3>
|
||||
%102 = arith.addi %94, %18 : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked3}>>
|
||||
%103 = tt.expand_dims %102 {axis = 1 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #blocked3}>>) -> tensor<128x1xi32, #blocked3>
|
||||
%104 = tt.broadcast %103 : (tensor<128x1xi32, #blocked3>) -> tensor<128x128xi32, #blocked3>
|
||||
%105 = "triton_gpu.cmpi"(%104, %70) {predicate = 5 : i64} : (tensor<128x128xi32, #blocked3>, tensor<128x128xi32, #blocked3>) -> tensor<128x128xi1, #blocked3>
|
||||
%106 = "triton_gpu.select"(%105, %101, %cst_1) : (tensor<128x128xi1, #blocked3>, tensor<128x128xf32, #blocked3>, tensor<128x128xf32, #blocked3>) -> tensor<128x128xf32, #blocked3>
|
||||
%107 = tt.addptr %38, %95 : tensor<128x!tt.ptr<f32>, #blocked0>, tensor<128xi32, #blocked0>
|
||||
%108 = tt.load %107 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128xf32, #blocked0>
|
||||
%109 = arith.mulf %106, %39 : tensor<128x128xf32, #blocked3>
|
||||
%110 = triton_gpu.convert_layout %108 : (tensor<128xf32, #blocked0>) -> tensor<128xf32, #triton_gpu.slice<{dim = 1, parent = #blocked3}>>
|
||||
%111 = tt.expand_dims %110 {axis = 1 : i32} : (tensor<128xf32, #triton_gpu.slice<{dim = 1, parent = #blocked3}>>) -> tensor<128x1xf32, #blocked3>
|
||||
%112 = tt.broadcast %111 : (tensor<128x1xf32, #blocked3>) -> tensor<128x128xf32, #blocked3>
|
||||
%113 = arith.subf %109, %112 : tensor<128x128xf32, #blocked3>
|
||||
%114 = math.exp %113 : tensor<128x128xf32, #blocked3>
|
||||
%115 = arith.truncf %114 : tensor<128x128xf32, #blocked3> to tensor<128x128xf16, #blocked3>
|
||||
%116 = triton_gpu.convert_layout %115 : (tensor<128x128xf16, #blocked3>) -> tensor<128x128xf16, #shared1>
|
||||
%117 = tt.trans %116 : (tensor<128x128xf16, #shared1>) -> tensor<128x128xf16, #shared0>
|
||||
%118 = triton_gpu.convert_layout %117 : (tensor<128x128xf16, #shared0>) -> tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>>
|
||||
%119 = triton_gpu.alloc_tensor : tensor<1x128x64xf16, #shared2>
|
||||
%120 = triton_gpu.insert_slice_async %arg31, %119, %c0_i32 {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64x!tt.ptr<f16>, #blocked1> -> tensor<1x128x64xf16, #shared2>
|
||||
triton_gpu.async_wait {num = 0 : i32}
|
||||
%121 = tensor.extract_slice %120[0, 0, 0] [1, 128, 64] [1, 1, 1] : tensor<1x128x64xf16, #shared2> to tensor<128x64xf16, #shared2>
|
||||
%122 = triton_gpu.convert_layout %121 : (tensor<128x64xf16, #shared2>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>>
|
||||
%123 = tt.dot %118, %122, %arg27 {allowTF32 = true} : tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>> * tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>> -> tensor<128x64xf32, #mma0>
|
||||
%124 = tt.addptr %40, %95 : tensor<128x!tt.ptr<f32>, #blocked0>, tensor<128xi32, #blocked0>
|
||||
%125 = tt.load %124 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128xf32, #blocked0>
|
||||
%126 = triton_gpu.convert_layout %125 : (tensor<128xf32, #blocked0>) -> tensor<128xf32, #triton_gpu.slice<{dim = 1, parent = #mma1}>>
|
||||
%127 = tt.expand_dims %126 {axis = 1 : i32} : (tensor<128xf32, #triton_gpu.slice<{dim = 1, parent = #mma1}>>) -> tensor<128x1xf32, #mma1>
|
||||
%128 = tt.broadcast %127 : (tensor<128x1xf32, #mma1>) -> tensor<128x128xf32, #mma1>
|
||||
%129 = arith.subf %cst, %128 : tensor<128x128xf32, #mma1>
|
||||
%130 = triton_gpu.alloc_tensor : tensor<1x128x64xf16, #shared2>
|
||||
%131 = triton_gpu.insert_slice_async %arg31, %130, %c0_i32 {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64x!tt.ptr<f16>, #blocked1> -> tensor<1x128x64xf16, #shared2>
|
||||
triton_gpu.async_wait {num = 0 : i32}
|
||||
%132 = tensor.extract_slice %131[0, 0, 0] [1, 128, 64] [1, 1, 1] : tensor<1x128x64xf16, #shared2> to tensor<128x64xf16, #shared2>
|
||||
%133 = triton_gpu.convert_layout %132 : (tensor<128x64xf16, #shared2>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>>
|
||||
%134 = tt.dot %133, %80, %129 {allowTF32 = true} : tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>> * tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>> -> tensor<128x128xf32, #mma1>
|
||||
%135 = triton_gpu.convert_layout %134 : (tensor<128x128xf32, #mma1>) -> tensor<128x128xf32, #blocked3>
|
||||
%136 = arith.mulf %114, %135 : tensor<128x128xf32, #blocked3>
|
||||
%137 = arith.mulf %136, %39 : tensor<128x128xf32, #blocked3>
|
||||
%138 = arith.truncf %137 : tensor<128x128xf32, #blocked3> to tensor<128x128xf16, #blocked3>
|
||||
%139 = triton_gpu.convert_layout %138 : (tensor<128x128xf16, #blocked3>) -> tensor<128x128xf16, #shared1>
|
||||
%140 = tt.trans %139 : (tensor<128x128xf16, #shared1>) -> tensor<128x128xf16, #shared0>
|
||||
%141 = triton_gpu.convert_layout %140 : (tensor<128x128xf16, #shared0>) -> tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>>
|
||||
%142 = triton_gpu.alloc_tensor : tensor<1x128x64xf16, #shared2>
|
||||
%143 = triton_gpu.insert_slice_async %arg30, %142, %c0_i32 {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64x!tt.ptr<f16>, #blocked1> -> tensor<1x128x64xf16, #shared2>
|
||||
triton_gpu.async_wait {num = 0 : i32}
|
||||
%144 = tensor.extract_slice %143[0, 0, 0] [1, 128, 64] [1, 1, 1] : tensor<1x128x64xf16, #shared2> to tensor<128x64xf16, #shared2>
|
||||
%145 = triton_gpu.convert_layout %144 : (tensor<128x64xf16, #shared2>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>>
|
||||
%146 = tt.dot %141, %145, %arg28 {allowTF32 = true} : tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>> * tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>> -> tensor<128x64xf32, #mma0>
|
||||
%147 = tt.load %arg29 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64xf32, #blocked2>
|
||||
%148 = triton_gpu.convert_layout %147 : (tensor<128x64xf32, #blocked2>) -> tensor<128x64xf32, #mma0>
|
||||
%149 = triton_gpu.convert_layout %138 : (tensor<128x128xf16, #blocked3>) -> tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>>
|
||||
%150 = tt.dot %149, %84, %148 {allowTF32 = true} : tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>> * tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>> -> tensor<128x64xf32, #mma0>
|
||||
%151 = triton_gpu.convert_layout %150 : (tensor<128x64xf32, #mma0>) -> tensor<128x64xf32, #blocked2>
|
||||
tt.store %arg29, %151 : tensor<128x64xf32, #blocked2>
|
||||
%152 = tt.addptr %arg29, %43 : tensor<128x64x!tt.ptr<f32>, #blocked2>, tensor<128x64xi32, #blocked2>
|
||||
%153 = tt.addptr %arg30, %42 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%154 = tt.addptr %arg31, %42 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
scf.yield %123, %146, %152, %153, %154 : tensor<128x64xf32, #mma0>, tensor<128x64xf32, #mma0>, tensor<128x64x!tt.ptr<f32>, #blocked2>, tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
}
|
||||
%86 = triton_gpu.convert_layout %85#1 : (tensor<128x64xf32, #mma0>) -> tensor<128x64xf32, #blocked1>
|
||||
%87 = triton_gpu.convert_layout %85#0 : (tensor<128x64xf32, #mma0>) -> tensor<128x64xf32, #blocked1>
|
||||
%88 = tt.addptr %44, %62 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%89 = arith.truncf %87 : tensor<128x64xf32, #blocked1> to tensor<128x64xf16, #blocked1>
|
||||
tt.store %88, %89 : tensor<128x64xf16, #blocked1>
|
||||
%90 = tt.addptr %45, %57 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%91 = arith.truncf %86 : tensor<128x64xf32, #blocked1> to tensor<128x64xf16, #blocked1>
|
||||
tt.store %90, %91 : tensor<128x64xf16, #blocked1>
|
||||
%22 = tt.broadcast %19 : (tensor<128x1xi32, #blocked0>) -> tensor<128x64xi32, #blocked0>
|
||||
%23 = tt.expand_dims %20 {axis = 0 : i32} : (tensor<64xi32, #triton_gpu.slice<{dim = 0, parent = #blocked0}>>) -> tensor<1x64xi32, #blocked0>
|
||||
%24 = tt.broadcast %23 : (tensor<1x64xi32, #blocked0>) -> tensor<128x64xi32, #blocked0>
|
||||
%25 = tt.expand_dims %21 {axis = 0 : i32} : (tensor<64xi32, #triton_gpu.slice<{dim = 0, parent = #blocked1}>>) -> tensor<1x64xi32, #blocked1>
|
||||
%26 = tt.broadcast %25 : (tensor<1x64xi32, #blocked1>) -> tensor<128x64xi32, #blocked1>
|
||||
%27 = arith.addi %22, %24 : tensor<128x64xi32, #blocked0>
|
||||
%28 = tt.splat %6 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%29 = tt.splat %arg17 : (i32) -> tensor<128x1xi32, #blocked1>
|
||||
%30 = tt.splat %7 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%31 = tt.splat %8 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%32 = tt.splat %9 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%33 = tt.splat %10 : (!tt.ptr<f32>) -> tensor<128x64x!tt.ptr<f32>, #blocked0>
|
||||
%34 = tt.addptr %33, %27 : tensor<128x64x!tt.ptr<f32>, #blocked0>, tensor<128x64xi32, #blocked0>
|
||||
%35 = arith.muli %16, %29 : tensor<128x1xi32, #blocked1>
|
||||
%36 = tt.broadcast %35 : (tensor<128x1xi32, #blocked1>) -> tensor<128x64xi32, #blocked1>
|
||||
%37 = arith.addi %36, %26 : tensor<128x64xi32, #blocked1>
|
||||
%38 = tt.addptr %30, %37 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%39 = tt.load %38 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64xf16, #blocked1>
|
||||
%40 = arith.muli %16, %18 : tensor<128x1xi32, #blocked1>
|
||||
%41 = tt.broadcast %40 : (tensor<128x1xi32, #blocked1>) -> tensor<128x64xi32, #blocked1>
|
||||
%42 = arith.addi %41, %26 : tensor<128x64xi32, #blocked1>
|
||||
%43 = tt.addptr %31, %42 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%44 = tt.load %43 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64xf16, #blocked1>
|
||||
%45 = arith.muli %arg24, %c128_i32 : i32
|
||||
%46 = arith.index_cast %45 : i32 to index
|
||||
%47 = triton_gpu.convert_layout %39 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #shared0>
|
||||
%48 = tt.trans %47 : (tensor<128x64xf16, #shared0>) -> tensor<64x128xf16, #shared1>
|
||||
%49 = tt.splat %arg3 : (f32) -> tensor<128x128xf32, #mma1>
|
||||
%50 = triton_gpu.convert_layout %44 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #shared0>
|
||||
%51 = tt.trans %50 : (tensor<128x64xf16, #shared0>) -> tensor<64x128xf16, #shared1>
|
||||
%52 = arith.muli %arg14, %c128_i32 : i32
|
||||
%53 = tt.splat %52 : (i32) -> tensor<128x64xi32, #blocked0>
|
||||
%54 = tt.splat %52 : (i32) -> tensor<128x64xi32, #blocked1>
|
||||
%55 = tt.addptr %28, %42 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%56 = tt.addptr %32, %42 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%57 = triton_gpu.convert_layout %48 : (tensor<64x128xf16, #shared1>) -> tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>>
|
||||
%58 = triton_gpu.convert_layout %51 : (tensor<64x128xf16, #shared1>) -> tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>>
|
||||
%59:5 = scf.for %arg25 = %c0 to %46 step %c128 iter_args(%arg26 = %cst_0, %arg27 = %cst_0, %arg28 = %34, %arg29 = %55, %arg30 = %56) -> (tensor<128x64xf32, #mma0>, tensor<128x64xf32, #mma0>, tensor<128x64x!tt.ptr<f32>, #blocked0>, tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64x!tt.ptr<f16>, #blocked1>) {
|
||||
%68 = tt.load %arg29 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64xf16, #blocked1>
|
||||
%69 = triton_gpu.convert_layout %68 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>>
|
||||
%70 = tt.dot %69, %57, %cst {allowTF32 = true} : tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>> * tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>> -> tensor<128x128xf32, #mma1>
|
||||
%73 = tt.load %arg30 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x64xf16, #blocked1>
|
||||
%74 = arith.truncf %70 : tensor<128x128xf32, #mma1> to tensor<128x128xf16, #mma1>
|
||||
%75 = triton_gpu.convert_layout %74 : (tensor<128x128xf16, #mma1>) -> tensor<128x128xf16, #shared1>
|
||||
%76 = tt.trans %75 : (tensor<128x128xf16, #shared1>) -> tensor<128x128xf16, #shared0>
|
||||
%77 = triton_gpu.convert_layout %76 : (tensor<128x128xf16, #shared0>) -> tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>>
|
||||
%78 = triton_gpu.convert_layout %73 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>>
|
||||
%79 = tt.dot %77, %78, %arg26 {allowTF32 = true} : tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>> * tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>> -> tensor<128x64xf32, #mma0>
|
||||
%80 = triton_gpu.convert_layout %73 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>>
|
||||
%81 = tt.dot %80, %58, %cst {allowTF32 = true} : tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma1}>> * tensor<64x128xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma1}>> -> tensor<128x128xf32, #mma1>
|
||||
%83 = arith.mulf %70, %81 : tensor<128x128xf32, #mma1>
|
||||
%84 = arith.mulf %83, %49 : tensor<128x128xf32, #mma1>
|
||||
%85 = arith.truncf %84 : tensor<128x128xf32, #mma1> to tensor<128x128xf16, #mma1>
|
||||
%86 = triton_gpu.convert_layout %85 : (tensor<128x128xf16, #mma1>) -> tensor<128x128xf16, #shared1>
|
||||
%87 = tt.trans %86 : (tensor<128x128xf16, #shared1>) -> tensor<128x128xf16, #shared0>
|
||||
%88 = triton_gpu.convert_layout %87 : (tensor<128x128xf16, #shared0>) -> tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>>
|
||||
%89 = triton_gpu.convert_layout %68 : (tensor<128x64xf16, #blocked1>) -> tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>>
|
||||
%90 = tt.dot %88, %89, %arg27 {allowTF32 = true} : tensor<128x128xf16, #triton_gpu.dot_op<{opIdx = 0, parent = #mma0}>> * tensor<128x64xf16, #triton_gpu.dot_op<{opIdx = 1, parent = #mma0}>> -> tensor<128x64xf32, #mma0>
|
||||
%91 = tt.addptr %arg28, %53 : tensor<128x64x!tt.ptr<f32>, #blocked0>, tensor<128x64xi32, #blocked0>
|
||||
%92 = tt.addptr %arg29, %54 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%93 = tt.addptr %arg30, %54 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
scf.yield %79, %arg27, %arg28, %arg29, %arg30 : tensor<128x64xf32, #mma0>, tensor<128x64xf32, #mma0>, tensor<128x64x!tt.ptr<f32>, #blocked0>, tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
}
|
||||
%60 = triton_gpu.convert_layout %59#1 : (tensor<128x64xf32, #mma0>) -> tensor<128x64xf32, #blocked1>
|
||||
%61 = triton_gpu.convert_layout %59#0 : (tensor<128x64xf32, #mma0>) -> tensor<128x64xf32, #blocked1>
|
||||
%62 = tt.splat %12 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%63 = tt.splat %11 : (!tt.ptr<f16>) -> tensor<128x64x!tt.ptr<f16>, #blocked1>
|
||||
%64 = tt.addptr %62, %42 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%65 = arith.truncf %61 : tensor<128x64xf32, #blocked1> to tensor<128x64xf16, #blocked1>
|
||||
tt.store %64, %65 : tensor<128x64xf16, #blocked1>
|
||||
%66 = tt.addptr %63, %37 : tensor<128x64x!tt.ptr<f16>, #blocked1>, tensor<128x64xi32, #blocked1>
|
||||
%67 = arith.truncf %60 : tensor<128x64xf32, #blocked1> to tensor<128x64xf16, #blocked1>
|
||||
tt.store %66, %67 : tensor<128x64xf16, #blocked1>
|
||||
return
|
||||
}
|
||||
}
|
@@ -905,7 +905,7 @@ def ttir_to_ttgir(mod, num_warps, num_stages, compute_capability):
|
||||
pm.add_licm_pass()
|
||||
pm.add_tritongpu_combine_pass(compute_capability)
|
||||
pm.add_cse_pass()
|
||||
pm.add_tritongpu_optimize_load_convert_pass()
|
||||
# pm.add_tritongpu_optimize_load_convert_pass()
|
||||
pm.run(mod)
|
||||
return mod
|
||||
|
||||
|
@@ -133,7 +133,8 @@ def _bwd_kernel(
|
||||
DQ += off_z * stride_qz + off_h * stride_qh
|
||||
DK += off_z * stride_qz + off_h * stride_qh
|
||||
DV += off_z * stride_qz + off_h * stride_qh
|
||||
for start_n in range(0, num_block):
|
||||
# for start_n in range(0, num_block):
|
||||
start_n = 0
|
||||
lo = start_n * BLOCK_M
|
||||
# initialize row/col offsets
|
||||
offs_qm = lo + tl.arange(0, BLOCK_M)
|
||||
@@ -147,8 +148,8 @@ def _bwd_kernel(
|
||||
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
|
||||
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
|
||||
# pointer to row-wise quantities in value-like data
|
||||
D_ptrs = D + off_hz * N_CTX
|
||||
m_ptrs = M + off_hz * N_CTX
|
||||
# D_ptrs = D + off_hz * N_CTX
|
||||
# m_ptrs = M + off_hz * N_CTX
|
||||
# initialize dv amd dk
|
||||
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
|
||||
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
|
||||
@@ -157,30 +158,32 @@ def _bwd_kernel(
|
||||
v = tl.load(v_ptrs)
|
||||
# loop over rows
|
||||
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
|
||||
offs_m_curr = start_m + offs_m
|
||||
# offs_m_curr = start_m + offs_m
|
||||
# load q, k, v, do on-chip
|
||||
q = tl.load(q_ptrs)
|
||||
# recompute p = softmax(qk, dim=-1).T
|
||||
# NOTE: `do` is pre-divided by `l`; no normalization here
|
||||
qk = tl.dot(q, tl.trans(k))
|
||||
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
|
||||
m = tl.load(m_ptrs + offs_m_curr)
|
||||
p = tl.exp(qk * sm_scale - m[:, None])
|
||||
# qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
|
||||
# m = tl.load(m_ptrs + offs_m_curr)
|
||||
# p = tl.exp(qk * sm_scale - m[:, None])
|
||||
p = qk * sm_scale
|
||||
# compute dv
|
||||
do = tl.load(do_ptrs)
|
||||
dv += tl.dot(tl.trans(p.to(tl.float16)), do)
|
||||
# compute dp = dot(v, do)
|
||||
Di = tl.load(D_ptrs + offs_m_curr)
|
||||
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
|
||||
# # compute dp = dot(v, do)
|
||||
# Di = tl.load(D_ptrs + offs_m_curr)
|
||||
# dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
|
||||
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
|
||||
dp += tl.dot(do, tl.trans(v))
|
||||
# compute ds = p * (dp - delta[:, None])
|
||||
ds = p * dp * sm_scale
|
||||
# compute dk = dot(ds.T, q)
|
||||
# # compute dk = dot(ds.T, q)
|
||||
dk += tl.dot(tl.trans(ds.to(tl.float16)), q)
|
||||
# compute dq
|
||||
dq = tl.load(dq_ptrs)
|
||||
dq += tl.dot(ds.to(tl.float16), k)
|
||||
tl.store(dq_ptrs, dq)
|
||||
# # compute dq
|
||||
# dq = tl.load(dq_ptrs)
|
||||
# dq += tl.dot(ds.to(tl.float16), k)
|
||||
# tl.store(dq_ptrs, dq)
|
||||
# increment pointers
|
||||
dq_ptrs += BLOCK_M * stride_qm
|
||||
q_ptrs += BLOCK_M * stride_qm
|
||||
@@ -191,7 +194,7 @@ def _bwd_kernel(
|
||||
tl.store(dv_ptrs, dv)
|
||||
tl.store(dk_ptrs, dk)
|
||||
|
||||
_bwd_kernel = triton.compile("./bwd.ttgir", num_warps=8)
|
||||
_bwd_kernel = triton.compile("./bwd.ptx", num_warps=8, shared=32768)
|
||||
# _fwd_kernel = triton.compile("./fails.ptx", num_warps=4, shared=18432)
|
||||
|
||||
empty = torch.empty(128, device="cuda")
|
||||
@@ -285,6 +288,7 @@ class _attention(torch.autograd.Function):
|
||||
# num_stages=1,
|
||||
# )
|
||||
# print(pgm.asm["ttgir"])
|
||||
exit(1)
|
||||
return dq, dk, dv, None
|
||||
|
||||
|
||||
@@ -330,7 +334,7 @@ BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
|
||||
# vary seq length for fixed head and batch=4
|
||||
configs = [triton.testing.Benchmark(
|
||||
x_names=['N_CTX'],
|
||||
x_vals=[2**i for i in range(10, 16)],
|
||||
x_vals=[2**i for i in range(10, 11)],
|
||||
line_arg='provider',
|
||||
line_vals=['triton'],
|
||||
line_names=['Triton'],
|
||||
@@ -338,7 +342,7 @@ configs = [triton.testing.Benchmark(
|
||||
ylabel='ms',
|
||||
plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}',
|
||||
args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.float16, 'mode': mode}
|
||||
) for mode in ['fwd']]
|
||||
) for mode in ['bwd']]
|
||||
|
||||
|
||||
@triton.testing.perf_report(configs)
|
||||
@@ -375,4 +379,4 @@ def bench_flash_attention(BATCH, H, N_CTX, D_HEAD, mode, provider, dtype=torch.f
|
||||
ms = triton.testing.do_bench(fn, percentiles=None, warmup=warmup, rep=rep)
|
||||
return ms
|
||||
|
||||
# bench_flash_attention.run(save_path='.', print_data=True)
|
||||
bench_flash_attention.run(save_path='.', print_data=True)
|
Reference in New Issue
Block a user