2019-08-16 15:56:58 -07:00
|
|
|
import tensorflow as tf
|
2019-08-29 21:34:23 -07:00
|
|
|
import triton
|
2019-08-16 20:50:18 -07:00
|
|
|
import numpy as np
|
2019-08-15 20:50:10 -07:00
|
|
|
|
|
|
|
|
2019-09-04 12:47:59 -04:00
|
|
|
class dot(triton.function):
|
2019-08-25 21:26:09 -07:00
|
|
|
|
2019-09-04 12:47:59 -04:00
|
|
|
src = """
|
2019-08-30 17:05:03 -07:00
|
|
|
void dot(TYPE * A, TYPE * B, TYPE * C,
|
2019-08-25 21:26:09 -07:00
|
|
|
int M, int N, int K,
|
|
|
|
int lda __multipleof(8),
|
|
|
|
int ldb __multipleof(8),
|
|
|
|
int ldc) {
|
2019-08-30 17:05:03 -07:00
|
|
|
// prologue
|
2019-08-17 18:18:26 -07:00
|
|
|
int ridx = get_program_id(0);
|
|
|
|
int ridy = get_program_id(1);
|
2019-08-25 21:26:09 -07:00
|
|
|
int rxa[TM] = ridx * TM + 0 ... TM;
|
|
|
|
int ryb[TN] = ridy * TN + 0 ... TN;
|
2019-08-15 20:50:10 -07:00
|
|
|
int rka[TK] = 0 ... TK;
|
|
|
|
int rkb[TK] = 0 ... TK;
|
2019-08-30 17:05:03 -07:00
|
|
|
float c[TM, TN] = 0;
|
|
|
|
// pointers to operands
|
2019-08-29 21:34:23 -07:00
|
|
|
TYPE* pa[SHAPE_A] = A + rka[BROADCAST_AK] * STRIDE_AK + rxa[BROADCAST_AM] * STRIDE_AM;
|
|
|
|
TYPE* pb[SHAPE_B] = B + rkb[BROADCAST_BK] * STRIDE_BK + ryb[BROADCAST_BN] * STRIDE_BN;
|
2019-08-30 17:05:03 -07:00
|
|
|
// prefetches operands
|
2019-08-29 21:34:23 -07:00
|
|
|
TYPE a[SHAPE_A] = *pa;
|
|
|
|
TYPE b[SHAPE_B] = *pb;
|
2019-08-30 17:05:03 -07:00
|
|
|
// reduction loop
|
|
|
|
for(int k = K; k > 0; k-= TK){
|
2019-09-03 20:44:27 -04:00
|
|
|
c += USE_A @ USE_B;
|
2019-08-29 21:34:23 -07:00
|
|
|
pa = pa + TK * STRIDE_AK;
|
|
|
|
pb = pb + TK * STRIDE_BK;
|
2019-08-15 20:50:10 -07:00
|
|
|
a = *pa;
|
|
|
|
b = *pb;
|
|
|
|
}
|
2019-08-30 17:05:03 -07:00
|
|
|
// epilogue
|
|
|
|
int rxc[TM] = ridx * TM + 0 ... TM;
|
|
|
|
int ryc[TN] = ridy * TN + 0 ... TN;
|
2019-08-29 17:06:59 -07:00
|
|
|
TYPE* pc[TM, TN] = C + ryc[newaxis, :] + rxc[:, newaxis] * ldc;
|
2019-08-30 17:05:03 -07:00
|
|
|
bool checkc[TM, TN] = (rxc < M)[:, newaxis] && (ryc < N)[newaxis, :];
|
|
|
|
*?(checkc) pc = c;
|
2019-08-15 20:50:10 -07:00
|
|
|
}
|
|
|
|
"""
|
|
|
|
|
2019-09-04 12:47:59 -04:00
|
|
|
op = triton.op(src, ['C'])
|
2019-08-16 20:50:18 -07:00
|
|
|
|
2019-09-04 12:47:59 -04:00
|
|
|
@staticmethod
|
|
|
|
def _call(a, b, transpose_a, transpose_b):
|
2019-09-03 20:44:27 -04:00
|
|
|
# extract shapes
|
2019-08-26 16:53:49 -07:00
|
|
|
shape_a = triton.shape(a)
|
|
|
|
shape_b = triton.shape(b)
|
2019-09-03 20:44:27 -04:00
|
|
|
M, Ka = shape_a[0], shape_a[1]
|
|
|
|
Kb, N = shape_b[0], shape_b[1]
|
2019-08-29 17:06:59 -07:00
|
|
|
# transpose shapes
|
2019-09-04 12:47:59 -04:00
|
|
|
if transpose_a:
|
2019-08-29 17:06:59 -07:00
|
|
|
M, Ka = Ka, M
|
2019-09-04 12:47:59 -04:00
|
|
|
if transpose_b:
|
2019-08-29 17:06:59 -07:00
|
|
|
Kb, N = N, Kb
|
|
|
|
# contiguous dimensions
|
2019-09-04 12:47:59 -04:00
|
|
|
lda = M if transpose_a else Ka
|
|
|
|
ldb = Kb if transpose_b else N
|
2019-08-17 18:18:26 -07:00
|
|
|
ldc = N
|
2019-09-04 12:47:59 -04:00
|
|
|
# data-type
|
|
|
|
dtype = a.dtype
|
2019-09-03 20:44:27 -04:00
|
|
|
# allocate output
|
2019-09-04 01:54:43 -04:00
|
|
|
c = triton.empty([M, N], dtype = dtype)
|
2019-09-03 20:44:27 -04:00
|
|
|
# compute
|
2019-09-04 12:47:59 -04:00
|
|
|
grid = lambda opt: [triton.cdiv(M, opt.d('TM')), triton.cdiv(N, opt.d('TN'))]
|
|
|
|
# macros -- not necessary but makes kernel source-code simpler
|
|
|
|
macros = {# handle A transposition
|
|
|
|
'USE_A' : '^a' if transpose_a else 'a',
|
|
|
|
'STRIDE_AK' : 'lda' if transpose_a else '1',
|
|
|
|
'STRIDE_AM' : '1' if transpose_a else 'lda',
|
|
|
|
'BROADCAST_AK': ':, newaxis' if transpose_a else 'newaxis, :',
|
|
|
|
'BROADCAST_AM': 'newaxis, :' if transpose_a else ':, newaxis',
|
|
|
|
'SHAPE_A' : 'TK, TM' if transpose_a else 'TM, TK',
|
|
|
|
# handle B transposition
|
|
|
|
'USE_B' : '^b' if transpose_b else 'b',
|
|
|
|
'STRIDE_BK' : '1' if transpose_b else 'ldb',
|
|
|
|
'STRIDE_BN' : 'ldb' if transpose_b else '1',
|
|
|
|
'BROADCAST_BK': 'newaxis, :' if transpose_b else ':, newaxis',
|
|
|
|
'BROADCAST_BN': ':, newaxis' if transpose_b else 'newaxis, :',
|
|
|
|
'SHAPE_B' : 'TN, TK' if transpose_b else 'TK, TN'}
|
|
|
|
return dot.op(a, b, c, M, N, Ka, lda, ldb, ldc, grid,
|
|
|
|
AT = transpose_a, BT = transpose_b, TYPE = dtype,
|
|
|
|
TM = [64, 128], TN = [64, 128], TK = [8], **macros)
|
2019-08-29 21:34:23 -07:00
|
|
|
|
2019-09-04 12:47:59 -04:00
|
|
|
@staticmethod
|
|
|
|
def forward(ctx, a, b, transpose_a = False, transpose_b = False):
|
|
|
|
ctx.save_for_backward(a, b, transpose_a, transpose_b)
|
|
|
|
return dot._call(a, b, transpose_a, transpose_b)
|
2019-09-04 03:12:23 -04:00
|
|
|
|
2019-09-04 12:47:59 -04:00
|
|
|
@staticmethod
|
|
|
|
def backward(ctx, dy):
|
|
|
|
a, b, t_a, t_b = ctx.saved_tensors
|
|
|
|
if not t_a and not t_b:
|
|
|
|
da = dot._call(dy, b, False, True)
|
|
|
|
db = dot._call(a, dy, True, False)
|
|
|
|
elif not t_a and t_b:
|
|
|
|
da = dot._call(dy, b, False, False)
|
|
|
|
db = dot._call(dy, a, True, False)
|
|
|
|
elif t_a and not t_b:
|
|
|
|
da = dot._call(b, dy, False, True)
|
|
|
|
db = dot._call(a, dy, False, False)
|
|
|
|
elif t_a and t_b:
|
|
|
|
da = dot._call(b, dy, True, True)
|
|
|
|
db = dot._call(dy, a, True, True)
|
|
|
|
else:
|
|
|
|
assert False
|
|
|
|
return [da, db, None, None, None, None, None, None, None]
|
2019-09-04 03:12:23 -04:00
|
|
|
|
2019-08-26 19:25:58 -07:00
|
|
|
|
2019-08-25 21:26:09 -07:00
|
|
|
|
2019-08-16 20:50:18 -07:00
|
|
|
def run_dot():
|
|
|
|
M, N, K = 128, 128, 128
|
2019-09-04 01:54:43 -04:00
|
|
|
a = tf.placeholder(tf.float32, shape=[M, K])
|
|
|
|
b = tf.placeholder(tf.float32, shape=[N, K])
|
2019-09-04 12:47:59 -04:00
|
|
|
_dot = dot.apply
|
|
|
|
tr_c = _dot(a, b, transpose_a = False, transpose_b = True)
|
|
|
|
tr_d = _dot(tr_c, b, transpose_a = True, transpose_b = False)
|
|
|
|
tf_c = tf.matmul(a, b, transpose_a = False, transpose_b = True)
|
|
|
|
tf_d = tf.matmul(tf_c, b, transpose_a = True, transpose_b = False)
|
|
|
|
# Gradient
|
|
|
|
tr_da = tf.gradients(tr_d, [a])
|
|
|
|
tf_da = tf.gradients(tf_d, [a])
|
2019-08-16 20:50:18 -07:00
|
|
|
# Reference
|
2019-09-04 01:54:43 -04:00
|
|
|
ha = np.random.rand(M, K).astype(np.float32)
|
|
|
|
hb = np.random.rand(K, N).astype(np.float32)
|
2019-08-16 20:50:18 -07:00
|
|
|
# Run
|
|
|
|
sess = tf.InteractiveSession()
|
|
|
|
sess.run(tf.global_variables_initializer())
|
2019-09-04 12:47:59 -04:00
|
|
|
result = sess.run([tr_da, tf_da], feed_dict = {a: ha,
|
|
|
|
b: hb})
|
2019-08-16 20:50:18 -07:00
|
|
|
# Test
|
2019-09-04 12:47:59 -04:00
|
|
|
print(result[0][0])
|
|
|
|
print(result[1][0])
|
|
|
|
dif = np.abs(result[0][0] - result[1][0])
|
2019-08-16 20:50:18 -07:00
|
|
|
print("dif: %f" % np.max(dif))
|
|
|
|
|
|
|
|
run_dot()
|