more progress
This commit is contained in:
@@ -3,49 +3,89 @@ import tensorflow as tf
|
||||
import numpy as np
|
||||
|
||||
src = """
|
||||
const tunable int TM = {128};
|
||||
const tunable int TN = {128};
|
||||
const tunable int TK = {32};
|
||||
#if AT == 1
|
||||
#define USEA ^a
|
||||
#else
|
||||
#define USEA a
|
||||
#endif
|
||||
|
||||
void matmul(restrict read_only align(16) half *A,
|
||||
restrict read_only align(16) half *B,
|
||||
restrict read_only align(16) half *C,
|
||||
int M, int N, int K,
|
||||
multiple_of(8) int lda, multiple_of(8) int ldb, int ldc)
|
||||
{
|
||||
#if BT == 1
|
||||
#define USEB ^b
|
||||
#else
|
||||
#define USEB b
|
||||
#endif
|
||||
|
||||
void dot(TYPE * A __noalias __readonly __aligned(16),
|
||||
TYPE * B __noalias __readonly __aligned(16),
|
||||
TYPE * C __noalias __readonly __aligned(16),
|
||||
int M, int N, int K,
|
||||
int lda __multipleof(8),
|
||||
int ldb __multipleof(8),
|
||||
int ldc) {
|
||||
int ridx = get_program_id(0);
|
||||
int ridy = get_program_id(1);
|
||||
int rxa[TM] = ridx * TM + (0 ... TM);
|
||||
int ryb[TN] = ridy * TN + (0 ... TN);
|
||||
int rxa[TM] = ridx * TM + 0 ... TM;
|
||||
int ryb[TN] = ridy * TN + 0 ... TN;
|
||||
int rka[TK] = 0 ... TK;
|
||||
int rkb[TK] = 0 ... TK;
|
||||
float xc[TM, TN] = 0;
|
||||
half* pa[TM, TK] = A + rka[newaxis, :]*lda + rxa[:, newaxis];
|
||||
half* pb[TN, TK] = B + rkb[newaxis, :]*ldb + ryb[:, newaxis];
|
||||
half a[TM, TK] = *pa;
|
||||
half b[TN, TK] = *pb;
|
||||
|
||||
/* pointers for A */
|
||||
#if AT == 1
|
||||
TYPE* pa[TK, TM] = A + rka[:, newaxis] + rxa[newaxis, :]*lda;
|
||||
TYPE a[TK, TM] = *pa;
|
||||
#else
|
||||
TYPE* pa[TM, TK] = A + rka[newaxis, :]*lda + rxa[:, newaxis];
|
||||
TYPE a[TM, TK] = *pa;
|
||||
#endif
|
||||
|
||||
/* pointers for B */
|
||||
#if BT == 1
|
||||
TYPE* pb[TN, TK] = B + rkb[newaxis, :]*ldb + ryb[:, newaxis];
|
||||
TYPE b[TN, TK] = *pb;
|
||||
#else
|
||||
TYPE* pb[TK, TN] = B + rkb[:, newaxis] + ryb[newaxis, :]*ldb;
|
||||
TYPE b[TK, TN] = *pb;
|
||||
#endif
|
||||
|
||||
/* reduction loop */
|
||||
for(int k = K; k > 0; k = k - TK){
|
||||
xc = dot(a, trans(b), xc);
|
||||
xc = USEA @ USEB + xc;
|
||||
#if AT == 1
|
||||
pa = pa + TK;
|
||||
#else
|
||||
pa = pa + TK*lda;
|
||||
#endif
|
||||
#if BT == 1
|
||||
pb = pb + TK*ldb;
|
||||
#else
|
||||
pb = pb + TK;
|
||||
#endif
|
||||
a = *pa;
|
||||
b = *pb;
|
||||
}
|
||||
|
||||
/* epilogue */
|
||||
int rxc[TM] = ridx * TM + (0 ... TM);
|
||||
int ryc[TN] = ridy * TN + (0 ... TN);
|
||||
half* pc[TM, TN] = C + ryc[newaxis, :] + rxc[:, newaxis]*ldc;
|
||||
half c[TM, TN] = xc;
|
||||
TYPE* pc[TM, TN] = C + ryc[newaxis, :]*ldc + rxc[:, newaxis];
|
||||
TYPE c[TM, TN] = xc;
|
||||
bool checkc0[TM] = rxc < M;
|
||||
bool checkc1[TN] = ryc < N;
|
||||
bool checkc[TM, TN] = checkc0[:, newaxis] && checkc1[newaxis, :];
|
||||
@checkc *pc = c;
|
||||
*pc = c;
|
||||
}
|
||||
"""
|
||||
|
||||
def cdiv(a, b):
|
||||
return -(-a // b)
|
||||
|
||||
class dot:
|
||||
|
||||
def __init__(self):
|
||||
self.matmul = triton.make_tensorflow_op(src, ['C'], ['(M + #TM - 1)/#TM', '(N + #TN - 1)/#TN'])
|
||||
def __init__(self, trans_a = False, trans_b = True):
|
||||
self.dot = triton.op(src, ['C'])
|
||||
self.trans_a = trans_a
|
||||
self.trans_b = trans_b
|
||||
|
||||
def __call__(self, a, b):
|
||||
shape_a = tf.shape(a)
|
||||
@@ -57,9 +97,13 @@ class dot:
|
||||
ldb = K
|
||||
ldc = N
|
||||
c = triton.empty([M, N])
|
||||
return self.matmul.matmul(a, b, c, M, N, K, lda, ldb, ldc)
|
||||
return self.dot(a, b, c, M, N, K, lda, ldb, ldc,
|
||||
lambda opt: [cdiv(M, opt.D('TM')), cdiv(N, opt.D('TN')), 1],
|
||||
AT = self.trans_a, BT = self.trans_b, TYPE = tf.float16,
|
||||
TM = [128], TN = [128], TK = [32])
|
||||
|
||||
dot_tn = dot()
|
||||
|
||||
def run_dot():
|
||||
M, N, K = 128, 128, 128
|
||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||
|
Reference in New Issue
Block a user