[codegen] added fallback when tensor cores cannot be used

This commit is contained in:
Philippe Tillet
2019-06-25 15:49:58 -07:00
parent 62000738f0
commit 64513fb407
6 changed files with 39 additions and 22 deletions

View File

@@ -42,7 +42,12 @@ void matmul(restrict read_only align(16) fp16 *A,
fp16* pb[TN, TK] = B + rkb[newaxis, :]*ldb + ryb[:, newaxis];
fp16 a[TM, TK] = *pa;
fp16 b[TN, TK] = *pb;
for(int32 k = K; k > TK; k = k - TK){
int32 last_a = ((M*K - 1) - (TM*TK + 1)) / lda;
int32 last_b = ((K*N - 1) - (TN*TK + 1)) / ldb;
last_a = last_a / TK * TK;
last_b = last_b / TK * TK;
int32 bound = K - max(last_a, last_b);
for(int32 k = K; k > bound; k = k - TK){
pa = pa + TK*lda;
pb = pb + TK*ldb;
c = dot(a, trans(b), c);
@@ -51,6 +56,15 @@ void matmul(restrict read_only align(16) fp16 *A,
}
int32 rxc[TM] = get_global_range[TM](0);
int32 ryc[TN] = get_global_range[TN](1);
for(int32 k = bound; k > 0; k = k - 1){
int1 checka[TM, 1] = rxc[:, newaxis] < M;
int1 checkb[TN, 1] = ryc[:, newaxis] < N;
fp16* pa[TM, 1] = A + (K - k)*lda + rxc[:, newaxis];
fp16* pb[TN, 1] = B + (K - k)*ldb + ryc[:, newaxis];
fp16 a[TM, 1] = checka ? *pa : 0;
fp16 b[TN, 1] = checkb ? *pb : 0;
c = dot(a, trans(b), c);
}
fp32* pc[TM, TN] = C + ryc[newaxis, :]*ldc + rxc[:, newaxis];
*pc = c;
}

View File

@@ -6,7 +6,7 @@ data_files_path = tf.resource_loader.get_data_files_path()
library_dir = os.path.dirname(os.path.realpath(__file__))
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
M, N, K = 8192, 8192, 8192
M, N, K = 128,128,128
a = tf.placeholder(tf.float16, shape=[M, K])
b = tf.placeholder(tf.float16, shape=[N, K])
locks = tf.placeholder(tf.int32, shape=[4096])
@@ -24,16 +24,6 @@ result = sess.run([c], feed_dict = {locks: np.zeros(4096),
a: ha,
b: hb})[0]
#bench = tf.test.Benchmark().run_op_benchmark(sess=sess,
# op_or_tensor=c,
# feed_dict={a: ha, b: hb},
# min_iters=100)
#print(end - start)
#print(2*M*N*K / (end - start) * 1e-12)
#hresult = np.dot(ha.T, hb).T
#dif = np.abs(result - hresult)
#print("dif: %f" % np.max(dif))
#np.savetxt("dif.txt", dif, fmt="%5.2f")
#np.savetxt("gpu.txt", result, fmt="%5.2f")
#np.savetxt("cpu.txt", hresult, fmt="%5.2f")
hresult = np.dot(ha.T, hb).T
dif = np.abs(result - hresult)
print("dif: %f" % np.max(dif))

View File

@@ -57,21 +57,21 @@ public:
shmem_allocation(&shmem_liveness, &shmem_info, &tune),
shmem_barriers(&shmem_allocation, &shmem_info),
vectorize(&tune),
selection(&shmem_allocation, &tune, &shmem_info, &axis_info, target),
selection(&shmem_allocation, &tune, &shmem_info, &alignment_info, target),
optimize_dot(&tune),
optimize_cse(),
optimize_trans(),
axis_info(),
alignment_info(),
target_(target) { }
void target_independent(ir::module &module) {
optimize_dot.run(module);
optimize_trans.run(module);
ir::print(module, std::cout);
// ir::print(module, std::cout);
}
void target_dependent(ir::module &module) {
axis_info.run(module);
alignment_info.run(module);
if(target_->is_gpu()){
shmem_info.run(module);
shmem_liveness.run(module);
@@ -91,7 +91,7 @@ public:
codegen::optimize_dot optimize_dot;
codegen::optimize_cse optimize_cse;
codegen::optimize_trans optimize_trans;
codegen::alignment_info axis_info;
codegen::alignment_info alignment_info;
codegen::target* target_;
};

View File

@@ -901,7 +901,8 @@ void selection::lower_tile_instruction(ir::instruction *ins, llvm::IRBuilder<> &
bool AT = dot->is_a_trans();
bool BT = dot->is_b_trans();
distributed_tile *TC = (distributed_tile*)tmap_.at(C);
Function *f_mul_add = Intrinsic::getDeclaration(module, Intrinsic::fmuladd, {llvm_type(C->get_type()->get_scalar_ty(), ctx)});
Type *c_ty = llvm_type(C->get_type()->get_scalar_ty(), ctx);
Function *f_mul_add = Intrinsic::getDeclaration(module, Intrinsic::fmuladd, {c_ty});
unsigned NK = A->get_type()->get_tile_shapes()[1]->get_value();
if(NK != 1)
{
@@ -922,6 +923,10 @@ void selection::lower_tile_instruction(ir::instruction *ins, llvm::IRBuilder<> &
std::swap(b_idx[0], b_idx[1]);
Value *a = TA->get_value(a_idx);
Value *b = TB->get_value(b_idx);
if(a->getType() != c_ty)
a = builder.CreateFPCast(a, c_ty);
if(b->getType() != c_ty)
b = builder.CreateFPCast(b, c_ty);
res = builder.CreateCall(f_mul_add, {a, b, res});
}
@@ -1022,6 +1027,10 @@ void selection::lower_tile_instruction(ir::instruction *ins, llvm::IRBuilder<> &
std::swap(b_idx[0], b_idx[1]);
Value *a = TA->get_value(a_idx);
Value *b = TB->get_value(b_idx);
if(a->getType() != c_ty)
a = builder.CreateFPCast(a, c_ty);
if(b->getType() != c_ty)
b = builder.CreateFPCast(b, c_ty);
res = builder.CreateCall(f_mul_add, {a, b, res});
result->set_value(idx, res);
});

View File

@@ -22,8 +22,12 @@ bool is_hmma(ir::value *v){
ir::type *a_ty = a->get_type();
ir::value *b = x->get_operand(1);
ir::type *b_ty = b->get_type();
// only NT supported
result = !x->is_a_trans() && x->is_b_trans();
// inputs have to be FP16
result = result && a_ty->get_scalar_ty()->is_half_ty() && b_ty->get_scalar_ty()->is_half_ty();
// reduction has to be multiple of 4
result = result && ((a_ty->get_tile_shapes()[1]->get_value() % 4) == 0);
}
return result;
}

View File

@@ -255,7 +255,7 @@ std::string cu_module::compile_llvm_module(llvm::Module* module) {
cu_module::cu_module(driver::context * context, llvm::Module* ll_module): cu_module(context, compile_llvm_module(ll_module)) { }
cu_module::cu_module(driver::context * context, std::string const & source) : module(context, CUmodule(), true), source_(source){
std::cout << source << std::endl;
// std::cout << source << std::endl;
cu_context::context_switcher ctx_switch(*context);
// JIT compile source-code
CUjit_option opt[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES, CU_JIT_ERROR_LOG_BUFFER};