Now doing double-buffering

This commit is contained in:
Philippe Tillet
2019-06-13 19:48:02 -07:00
parent 36e3667a9a
commit f7dcea1187
5 changed files with 25 additions and 25 deletions

View File

@@ -23,7 +23,7 @@ const char* src =
R"(
const tunable int32 TM = {64, 128};
const tunable int32 TN = {64, 128};
const tunable int32 TK = {32};
const tunable int32 TK = {16};
const tunable int32 GZ = {1};
void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
@@ -39,12 +39,14 @@ void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
fp32 c[TM, TN] = 0;
fp16* pa[TM, TK] = A + rka[newaxis, :]*lda + rxa[:, newaxis];
fp16* pb[TN, TK] = B + rkb[newaxis, :]*ldb + ryb[:, newaxis];
for(int32 k = K; k > 0; k = k - TK){
fp16 a[TM, TK] = *pa;
fp16 b[TN, TK] = *pb;
c = dot(a, trans(b), c);
fp16 a[TM, TK] = *pa;
fp16 b[TN, TK] = *pb;
for(int32 k = K; k > TK; k = k - TK){
pa = pa + TK*lda;
pb = pb + TK*ldb;
c = dot(a, trans(b), c);
a = *pa;
b = *pb;
}
int32 rxc[TM] = get_global_range[TM](0);
int32 ryc[TN] = get_global_range[TN](1);
@@ -116,11 +118,12 @@ class BlockSparseGemmOp : public OpKernel {
[&](){ stream->synchronize(); }, ctx->device());
return 2.*M*N*K / ts * 1e-3;
};
// just-in-time compile source-code
// jit.autotune("matmul", src, benchmark);
// just-in-time compile source-code
jit.autotune("matmul", src, benchmark);
// jit.add_module("matmul", src, {4, 2, 8, 4, 2, 32, 1, 4, 1, 1, 8, 8, 8, 1});
jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 1, 4, 2, 2, 8, 32, 8, 1});
// jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 2, 2, 2, 2, 8, 32, 8, 1});
// jit.add_module("matmul", src, {8, 8, 128, 16, 8, 128, 2, 2, 2, 2, 16, 32, 8, 1 });
jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 2, 2, 2, 2, 8, 16, 8, 1});
triton::driver::kernel* kernel = jit.get_function("matmul");
triton::jit::launch_information info = jit.get_launch_info("matmul");
std::cout << benchmark(kernel, info) << std::endl;;

View File

@@ -6,7 +6,7 @@ data_files_path = tf.resource_loader.get_data_files_path()
library_dir = os.path.dirname(os.path.realpath(__file__))
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
M, N, K = 256, 256, 256
M, N, K = 8192, 8192, 8192
a = tf.placeholder(tf.float16, shape=[M, K])
b = tf.placeholder(tf.float16, shape=[N, K])
locks = tf.placeholder(tf.int32, shape=[4096])
@@ -30,9 +30,9 @@ result = sess.run([c], feed_dict = {locks: np.zeros(4096),
# min_iters=100)
#print(end - start)
#print(2*M*N*K / (end - start) * 1e-12)
hresult = np.dot(ha.T, hb).T
dif = np.abs(result - hresult)
print("dif: %f" % np.max(dif))
#hresult = np.dot(ha.T, hb).T
#dif = np.abs(result - hresult)
#print("dif: %f" % np.max(dif))
#np.savetxt("dif.txt", dif, fmt="%5.2f")
#np.savetxt("gpu.txt", result, fmt="%5.2f")