[triton/codegen] added shared memory padding for HMMA arguments and vectorized loads
This commit is contained in:
@@ -7,9 +7,7 @@ if(${TensorFlow_FOUND})
|
||||
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=${TF_ABI})
|
||||
add_library(tf_blocksparse SHARED dot.cpp)
|
||||
target_link_libraries(tf_blocksparse tensorflow_framework triton)
|
||||
add_custom_command(
|
||||
TARGET tf_blocksparse POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/run.py
|
||||
${CMAKE_CURRENT_BINARY_DIR}/run.py)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/run.py
|
||||
${CMAKE_CURRENT_BINARY_DIR}/run.py
|
||||
COPYONLY)
|
||||
endif()
|
||||
|
@@ -21,9 +21,9 @@ using GPUDevice = Eigen::GpuDevice;
|
||||
|
||||
const char* src =
|
||||
R"(
|
||||
const tunable int32 TM = {8, 16, 32, 64, 128};
|
||||
const tunable int32 TN = {8, 16, 32, 64, 128};
|
||||
const tunable int32 TK = {8};
|
||||
const tunable int32 TM = {64, 128};
|
||||
const tunable int32 TN = {64, 128};
|
||||
const tunable int32 TK = {32};
|
||||
const tunable int32 GZ = {1};
|
||||
|
||||
void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
|
||||
@@ -37,20 +37,14 @@ void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
|
||||
int32 rka[TK] = 0 ... TK;
|
||||
int32 rkb[TK] = 0 ... TK;
|
||||
fp32 c[TM, TN] = 0;
|
||||
int32 div = K / GZ;
|
||||
int32 rem = K % GZ;
|
||||
K = select(rz < rem, div - 1, div);
|
||||
int32 offk = select(rz < rem, rz*(div + 1), rz*div + rem);
|
||||
fp16* pa[TM, TK] = A + (offk + rka[newaxis, :])*lda + rxa[:, newaxis];
|
||||
fp16* pb[TN, TK] = B + (offk + rkb[newaxis, :])*ldb + ryb[:, newaxis];
|
||||
fp16 a[TM, TK] = *pa;
|
||||
fp16 b[TN, TK] = *pb;
|
||||
for(int32 k = K; k > 0; k = k - TK){
|
||||
fp16* pa[TM, TK] = A + rka[newaxis, :]*lda + rxa[:, newaxis];
|
||||
fp16* pb[TN, TK] = B + rkb[newaxis, :]*ldb + ryb[:, newaxis];
|
||||
for(int32 k = K; k > TK; k = k - TK){
|
||||
fp16 a[TM, TK] = *pa;
|
||||
fp16 b[TN, TK] = *pb;
|
||||
c = dot(a, trans(b), c);
|
||||
pa = pa + TK*lda;
|
||||
pb = pb + TK*ldb;
|
||||
a = *pa;
|
||||
b = *pb;
|
||||
}
|
||||
int32 rxc[TM] = get_global_range[TM](0);
|
||||
int32 ryc[TN] = get_global_range[TN](1);
|
||||
@@ -123,10 +117,10 @@ class BlockSparseGemmOp : public OpKernel {
|
||||
return 2.*M*N*K / ts * 1e-3;
|
||||
};
|
||||
// just-in-time compile source-code
|
||||
// jit.autotune("matmul", src, benchmark);
|
||||
jit.autotune("matmul", src, benchmark);
|
||||
// jit.add_module("matmul", src, {4, 2, 8, 4, 2, 32, 1, 4, 1, 1, 8, 8, 8, 1});
|
||||
// jit.add_module("matmul", src, {32, 2, 128, 32, 2, 128, 2, 2, 2, 2, 4, 8, 4, 1});
|
||||
jit.add_module("matmul", src, {16, 4, 128, 32, 4, 128, 2, 2, 2, 2, 8, 8, 4, 1});
|
||||
jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 2, 2, 2, 2, 8, 32, 8, 1});
|
||||
triton::driver::kernel* kernel = jit.get_function("matmul");
|
||||
triton::jit::launch_information info = jit.get_launch_info("matmul");
|
||||
std::cout << benchmark(kernel, info) << std::endl;;
|
||||
|
@@ -6,7 +6,7 @@ data_files_path = tf.resource_loader.get_data_files_path()
|
||||
library_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
|
||||
|
||||
M, N, K = 256,256,256
|
||||
M, N, K = 8192,8192,8192
|
||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||
b = tf.placeholder(tf.float16, shape=[N, K])
|
||||
locks = tf.placeholder(tf.int32, shape=[4096])
|
||||
@@ -30,9 +30,9 @@ result = sess.run([c], feed_dict = {locks: np.zeros(4096),
|
||||
# min_iters=100)
|
||||
#print(end - start)
|
||||
#print(2*M*N*K / (end - start) * 1e-12)
|
||||
hresult = np.dot(ha.T, hb).T
|
||||
dif = np.abs(result - hresult)
|
||||
print("dif: %f" % np.max(dif))
|
||||
#hresult = np.dot(ha.T, hb).T
|
||||
#dif = np.abs(result - hresult)
|
||||
#print("dif: %f" % np.max(dif))
|
||||
|
||||
#np.savetxt("dif.txt", dif, fmt="%5.2f")
|
||||
#np.savetxt("gpu.txt", result, fmt="%5.2f")
|
||||
|
Reference in New Issue
Block a user