[codegen] worked around bug seemingly from nvptx/ptxas by simplifying multiplications by 1:
- Generated LLVM-IR looked correct - Illegal addressing disappeared when running cuda-memcheck - Illegal addressing disappeared when using nvptx-short-pointer
This commit is contained in:
@@ -1,23 +1,43 @@
|
||||
import triton
|
||||
import tensorflow as tf
|
||||
import triton
|
||||
import numpy as np
|
||||
|
||||
src = """
|
||||
#if AT == 1
|
||||
#define USEA ^a
|
||||
#define STRIDE_AK lda
|
||||
#define STRIDE_AM 1
|
||||
#define BROADCAST_AK :, newaxis
|
||||
#define BROADCAST_AM newaxis, :
|
||||
#define SHAPE_A TK, TM
|
||||
#else
|
||||
#define USEA a
|
||||
#define STRIDE_AK 1
|
||||
#define STRIDE_AM lda
|
||||
#define BROADCAST_AK newaxis, :
|
||||
#define BROADCAST_AM :, newaxis
|
||||
#define SHAPE_A TM, TK
|
||||
#endif
|
||||
|
||||
#if BT == 1
|
||||
#define USEB ^b
|
||||
#define STRIDE_BK 1
|
||||
#define STRIDE_BN ldb
|
||||
#define BROADCAST_BK newaxis, :
|
||||
#define BROADCAST_BN :, newaxis
|
||||
#define SHAPE_B TN, TK
|
||||
#else
|
||||
#define USEB b
|
||||
#define STRIDE_BK ldb
|
||||
#define STRIDE_BN 1
|
||||
#define BROADCAST_BK :, newaxis
|
||||
#define BROADCAST_BN newaxis, :
|
||||
#define SHAPE_B TK, TN
|
||||
#endif
|
||||
|
||||
void dot(TYPE * A __noalias __readonly __aligned(16),
|
||||
TYPE * B __noalias __readonly __aligned(16),
|
||||
TYPE * C __noalias __readonly __aligned(16),
|
||||
void dot(TYPE * A,
|
||||
TYPE * B,
|
||||
TYPE * C,
|
||||
int M, int N, int K,
|
||||
int lda __multipleof(8),
|
||||
int ldb __multipleof(8),
|
||||
@@ -31,42 +51,20 @@ void dot(TYPE * A __noalias __readonly __aligned(16),
|
||||
int rka[TK] = 0 ... TK;
|
||||
int rkb[TK] = 0 ... TK;
|
||||
float xc[TM, TN] = 0;
|
||||
|
||||
/* pointers for A */
|
||||
#if AT == 1
|
||||
TYPE* pa[TK, TM] = A + rka[:, newaxis]*lda + rxa[newaxis, :];
|
||||
TYPE a[TK, TM] = *pa;
|
||||
#else
|
||||
TYPE* pa[TM, TK] = A + rka[newaxis, :] + rxa[:, newaxis]*lda;
|
||||
TYPE a[TM, TK] = *pa;
|
||||
#endif
|
||||
|
||||
/* pointers for B */
|
||||
#if BT == 1
|
||||
TYPE* pb[TN, TK] = B + rkb[newaxis, :] + ryb[:, newaxis]*ldb;
|
||||
TYPE b[TN, TK] = *pb;
|
||||
#else
|
||||
TYPE* pb[TK, TN] = B + rkb[:, newaxis]*ldb + ryb[newaxis, :];
|
||||
TYPE b[TK, TN] = *pb;
|
||||
#endif
|
||||
|
||||
/* pointers for operands */
|
||||
TYPE* pa[SHAPE_A] = A + rka[BROADCAST_AK] * STRIDE_AK + rxa[BROADCAST_AM] * STRIDE_AM;
|
||||
TYPE* pb[SHAPE_B] = B + rkb[BROADCAST_BK] * STRIDE_BK + ryb[BROADCAST_BN] * STRIDE_BN;
|
||||
/* prefetches operands */
|
||||
TYPE a[SHAPE_A] = *pa;
|
||||
TYPE b[SHAPE_B] = *pb;
|
||||
/* reduction loop */
|
||||
for(int k = K; k > 0; k = k - TK){
|
||||
xc = USEA @ USEB + xc;
|
||||
#if AT == 1
|
||||
pa = pa + TK*lda;
|
||||
#else
|
||||
pa = pa + TK;
|
||||
#endif
|
||||
#if BT == 1
|
||||
pb = pb + TK;
|
||||
#else
|
||||
pb = pb + TK*ldb;
|
||||
#endif
|
||||
pa = pa + TK * STRIDE_AK;
|
||||
pb = pb + TK * STRIDE_BK;
|
||||
a = *pa;
|
||||
b = *pb;
|
||||
}
|
||||
|
||||
/* epilogue */
|
||||
int rxc[TM] = ridx * TM + (0 ... TM);
|
||||
int ryc[TN] = ridy * TN + (0 ... TN);
|
||||
@@ -75,7 +73,7 @@ void dot(TYPE * A __noalias __readonly __aligned(16),
|
||||
bool checkc0[TM] = rxc < M;
|
||||
bool checkc1[TN] = ryc < N;
|
||||
bool checkc[TM, TN] = checkc0[:, newaxis] && checkc1[newaxis, :];
|
||||
*?(checkc) pc = c;
|
||||
*pc = c;
|
||||
}
|
||||
"""
|
||||
|
||||
@@ -112,10 +110,12 @@ class dot_op:
|
||||
AT = self.trans_a, BT = self.trans_b, TYPE = tf.float16,
|
||||
TM = [128], TN = [ 128], TK = [32])
|
||||
|
||||
dot_nt = dot_op(False, True)
|
||||
dot_nn = dot_op(False, False)
|
||||
dot_tn = dot_op(True, False)
|
||||
dot_tt = dot_op(True, True)
|
||||
|
||||
def dot(a, b, trans_a = False, trans_b = False):
|
||||
if (trans_a, trans_b) not in dot.ops:
|
||||
dot.ops[trans_a, trans_b] = dot_op(trans_a, trans_b)
|
||||
return dot.ops[trans_a, trans_b](a, b)
|
||||
dot.ops = dict()
|
||||
|
||||
# @triton.register_gradient(dot_op)
|
||||
# def _dot_grad(op, dy):
|
||||
@@ -127,9 +127,7 @@ def run_dot():
|
||||
M, N, K = 128, 128, 128
|
||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||
b = tf.placeholder(tf.float16, shape=[N, K])
|
||||
# c = tf.matmul(a, b, transpose_a=True)
|
||||
c = dot_nt(a, b)
|
||||
# grads = tf.gradients(c, [a])
|
||||
c = dot(a, b, trans_a = False, trans_b = True)
|
||||
# Reference
|
||||
ha = np.random.rand(M, K).astype(np.float16)
|
||||
hb = np.random.rand(K, N).astype(np.float16)
|
||||
@@ -142,8 +140,6 @@ def run_dot():
|
||||
hresult = np.dot(ha, hb.T)
|
||||
dif = np.abs(result - hresult)
|
||||
np.savetxt('dif.dat', dif, '%2.4f')
|
||||
print(hresult)
|
||||
print(result)
|
||||
print("dif: %f" % np.max(dif))
|
||||
|
||||
run_dot()
|
Reference in New Issue
Block a user