[dnn/dot] reverted back to peak tensorcores performance
This commit is contained in:
@@ -9,25 +9,23 @@ library_dir = os.path.dirname(os.path.realpath(__file__))
|
|||||||
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
|
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
|
||||||
|
|
||||||
def run_dot():
|
def run_dot():
|
||||||
M, N, K = 128,128,128
|
M, N, K = 8192, 8192, 8192
|
||||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||||
b = tf.placeholder(tf.float16, shape=[N, K])
|
b = tf.placeholder(tf.float16, shape=[N, K])
|
||||||
locks = tf.placeholder(tf.int32, shape=[4096])
|
|
||||||
# c = tf.matmul(a, b, transpose_a=True)
|
# c = tf.matmul(a, b, transpose_a=True)
|
||||||
c = module.dot(a, b, locks)
|
c = module.dot(a, b)
|
||||||
# Reference
|
# Reference
|
||||||
ha = np.random.rand(M, K).astype(np.float16)
|
ha = np.random.rand(M, K).astype(np.float16)
|
||||||
hb = np.random.rand(N, K).astype(np.float16)
|
hb = np.random.rand(N, K).astype(np.float16)
|
||||||
# Run
|
# Run
|
||||||
sess = tf.InteractiveSession()
|
sess = tf.InteractiveSession()
|
||||||
sess.run(tf.global_variables_initializer())
|
sess.run(tf.global_variables_initializer())
|
||||||
result = sess.run([c], feed_dict = {locks: np.zeros(4096),
|
result = sess.run([c], feed_dict = {a: ha,
|
||||||
a: ha,
|
|
||||||
b: hb})[0]
|
b: hb})[0]
|
||||||
# Test
|
# Test
|
||||||
hresult = np.dot(ha.T, hb).T
|
#hresult = np.dot(ha.T, hb).T
|
||||||
dif = np.abs(result - hresult)
|
#dif = np.abs(result - hresult)
|
||||||
print("dif: %f" % np.max(dif))
|
#print("dif: %f" % np.max(dif))
|
||||||
|
|
||||||
def run_conv():
|
def run_conv():
|
||||||
B, C, H, W = 16, 32, 32, 32
|
B, C, H, W = 16, 32, 32, 32
|
||||||
@@ -130,5 +128,6 @@ def run_batchnorm():
|
|||||||
print(np.max(np.abs(dg_t - dg_n)))
|
print(np.max(np.abs(dg_t - dg_n)))
|
||||||
print(np.max(np.abs(db_t - db_n)))
|
print(np.max(np.abs(db_t - db_n)))
|
||||||
|
|
||||||
run_shift()
|
run_dot()
|
||||||
|
#run_shift()
|
||||||
#run_batchnorm()
|
#run_batchnorm()
|
||||||
|
@@ -32,7 +32,7 @@ double bench(OP const & op, SYNC const & sync, const triton::driver::device * de
|
|||||||
double total_time = 0;
|
double total_time = 0;
|
||||||
op();
|
op();
|
||||||
sync();
|
sync();
|
||||||
// while(total_time*1e-9 < 1e-3){
|
while(total_time*1e-9 < 1e-3){
|
||||||
float norm = 1;
|
float norm = 1;
|
||||||
// normalize clock if possible to get roughly constant result
|
// normalize clock if possible to get roughly constant result
|
||||||
if(auto cu_device = dynamic_cast<const triton::driver::cu_device*>(device))
|
if(auto cu_device = dynamic_cast<const triton::driver::cu_device*>(device))
|
||||||
@@ -42,7 +42,7 @@ double bench(OP const & op, SYNC const & sync, const triton::driver::device * de
|
|||||||
sync();
|
sync();
|
||||||
times.push_back(norm*tmr.get().count());
|
times.push_back(norm*tmr.get().count());
|
||||||
total_time+=times.back();
|
total_time+=times.back();
|
||||||
// }
|
}
|
||||||
return *std::min_element(times.begin(), times.end());
|
return *std::min_element(times.begin(), times.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -237,13 +237,13 @@ void tune::run(ir::module &mod) {
|
|||||||
continue;
|
continue;
|
||||||
if(dynamic_cast<ir::load_inst*>(i) && i->get_type()->is_tile_ty()){
|
if(dynamic_cast<ir::load_inst*>(i) && i->get_type()->is_tile_ty()){
|
||||||
ir::type *ty = mod.get_builder().get_int32_ty();
|
ir::type *ty = mod.get_builder().get_int32_ty();
|
||||||
std::unique_ptr<ir::metaparameter> tmp(ir::metaparameter::create(ctx, ty, 2, 2));
|
std::unique_ptr<ir::metaparameter> tmp(ir::metaparameter::create(ctx, ty, 4, 4));
|
||||||
*params_.at(i).at("nts.d0") = *tmp;
|
*params_.at(i).at("nts.d0") = *tmp;
|
||||||
}
|
}
|
||||||
if(dynamic_cast<ir::dot_inst*>(i) && i->get_type()->is_tile_ty()){
|
if(dynamic_cast<ir::dot_inst*>(i) && i->get_type()->is_tile_ty()){
|
||||||
ir::type *ty = mod.get_builder().get_int32_ty();
|
ir::type *ty = mod.get_builder().get_int32_ty();
|
||||||
std::unique_ptr<ir::metaparameter> tmp1(ir::metaparameter::create(ctx, ty, 2, 2));
|
std::unique_ptr<ir::metaparameter> tmp1(ir::metaparameter::create(ctx, ty, 4, 4));
|
||||||
std::unique_ptr<ir::metaparameter> tmp2(ir::metaparameter::create(ctx, ty, 2, 2));
|
std::unique_ptr<ir::metaparameter> tmp2(ir::metaparameter::create(ctx, ty, 4, 4));
|
||||||
*params_.at(i).at("nts.d0") = *tmp1;
|
*params_.at(i).at("nts.d0") = *tmp1;
|
||||||
*params_.at(i).at("nts.d1") = *tmp2;
|
*params_.at(i).at("nts.d1") = *tmp2;
|
||||||
}
|
}
|
||||||
|
@@ -51,7 +51,7 @@ void base::enqueue(driver::stream *stream, std::vector<driver::buffer *> args, b
|
|||||||
jit->add_module(name_.c_str(), src.c_str(), best.params);
|
jit->add_module(name_.c_str(), src.c_str(), best.params);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
jit->add_module(name_.c_str(), src.c_str(), jit->get_valid(name_.c_str(), src.c_str()));
|
jit->add_module(name_.c_str(), src.c_str(), {16, 4, 128, 16, 4, 128, 2, 2, 2, 2, 8, 16, 8, 1});
|
||||||
}
|
}
|
||||||
triton::driver::kernel* kernel = jit->get_function(name_.c_str());
|
triton::driver::kernel* kernel = jit->get_function(name_.c_str());
|
||||||
clone->init_impl(stream, (triton::driver::cu_module*)kernel->module());
|
clone->init_impl(stream, (triton::driver::cu_module*)kernel->module());
|
||||||
|
@@ -109,8 +109,8 @@ const tunable int32 TN = {16, 32, 64, 128};
|
|||||||
const tunable int32 TK = {16};
|
const tunable int32 TK = {16};
|
||||||
const tunable int32 GZ = {1};
|
const tunable int32 GZ = {1};
|
||||||
|
|
||||||
void matmul(restrict read_only )" + a_ty_ + R"( *A,
|
void matmul(restrict read_only align(16) )" + a_ty_ + R"( *A,
|
||||||
restrict read_only )" + b_ty_ + R"( *B,
|
restrict read_only align(16) )" + b_ty_ + R"( *B,
|
||||||
fp32 *C,
|
fp32 *C,
|
||||||
int32 M, int32 N, int32 K,
|
int32 M, int32 N, int32 K,
|
||||||
)" + align_lda_str + R"( int32 lda, )" + align_ldb_str + R"(" int32 ldb, int32 ldc,
|
)" + align_lda_str + R"( int32 lda, )" + align_ldb_str + R"(" int32 ldb, int32 ldc,
|
||||||
@@ -158,20 +158,7 @@ void matmul(restrict read_only )" + a_ty_ + R"( *A,
|
|||||||
int1 checkc1[TN] = ryc < N;
|
int1 checkc1[TN] = ryc < N;
|
||||||
int1 checkc[TM, TN] = checkc0[:, newaxis] && checkc1[newaxis, :];
|
int1 checkc[TM, TN] = checkc0[:, newaxis] && checkc1[newaxis, :];
|
||||||
fp32* pc[TM, TN] = C + ryc[newaxis, :]*ldc + rxc[:, newaxis];
|
fp32* pc[TM, TN] = C + ryc[newaxis, :]*ldc + rxc[:, newaxis];
|
||||||
int32 *plock = locks + ridx + ridy*grid0;
|
@checkc *pc = c;
|
||||||
while(__atomic_cas(plock, 0, 1));
|
|
||||||
int32 *pcount = plock + grid0*grid1;
|
|
||||||
int32 count = *pcount;
|
|
||||||
int32 countp1 = select(count == GZ - 1, 0, count + 1);
|
|
||||||
if(count == 0) {
|
|
||||||
@checkc *pc = c;
|
|
||||||
*pcount = countp1;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
@checkc *pc = c + *pc;
|
|
||||||
*pcount = countp1;
|
|
||||||
}
|
|
||||||
__atomic_cas(plock, 1, 0);
|
|
||||||
}
|
}
|
||||||
)";
|
)";
|
||||||
os << res;
|
os << res;
|
||||||
|
Reference in New Issue
Block a user