Now doing double-buffering
This commit is contained in:
@@ -23,7 +23,7 @@ const char* src =
|
||||
R"(
|
||||
const tunable int32 TM = {64, 128};
|
||||
const tunable int32 TN = {64, 128};
|
||||
const tunable int32 TK = {32};
|
||||
const tunable int32 TK = {16};
|
||||
const tunable int32 GZ = {1};
|
||||
|
||||
void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
|
||||
@@ -39,12 +39,14 @@ void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
|
||||
fp32 c[TM, TN] = 0;
|
||||
fp16* pa[TM, TK] = A + rka[newaxis, :]*lda + rxa[:, newaxis];
|
||||
fp16* pb[TN, TK] = B + rkb[newaxis, :]*ldb + ryb[:, newaxis];
|
||||
for(int32 k = K; k > 0; k = k - TK){
|
||||
fp16 a[TM, TK] = *pa;
|
||||
fp16 b[TN, TK] = *pb;
|
||||
c = dot(a, trans(b), c);
|
||||
fp16 a[TM, TK] = *pa;
|
||||
fp16 b[TN, TK] = *pb;
|
||||
for(int32 k = K; k > TK; k = k - TK){
|
||||
pa = pa + TK*lda;
|
||||
pb = pb + TK*ldb;
|
||||
c = dot(a, trans(b), c);
|
||||
a = *pa;
|
||||
b = *pb;
|
||||
}
|
||||
int32 rxc[TM] = get_global_range[TM](0);
|
||||
int32 ryc[TN] = get_global_range[TN](1);
|
||||
@@ -116,11 +118,12 @@ class BlockSparseGemmOp : public OpKernel {
|
||||
[&](){ stream->synchronize(); }, ctx->device());
|
||||
return 2.*M*N*K / ts * 1e-3;
|
||||
};
|
||||
// just-in-time compile source-code
|
||||
// jit.autotune("matmul", src, benchmark);
|
||||
// just-in-time compile source-code
|
||||
jit.autotune("matmul", src, benchmark);
|
||||
// jit.add_module("matmul", src, {4, 2, 8, 4, 2, 32, 1, 4, 1, 1, 8, 8, 8, 1});
|
||||
jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 1, 4, 2, 2, 8, 32, 8, 1});
|
||||
// jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 2, 2, 2, 2, 8, 32, 8, 1});
|
||||
// jit.add_module("matmul", src, {8, 8, 128, 16, 8, 128, 2, 2, 2, 2, 16, 32, 8, 1 });
|
||||
jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 2, 2, 2, 2, 8, 16, 8, 1});
|
||||
triton::driver::kernel* kernel = jit.get_function("matmul");
|
||||
triton::jit::launch_information info = jit.get_launch_info("matmul");
|
||||
std::cout << benchmark(kernel, info) << std::endl;;
|
||||
|
@@ -6,7 +6,7 @@ data_files_path = tf.resource_loader.get_data_files_path()
|
||||
library_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
|
||||
|
||||
M, N, K = 256, 256, 256
|
||||
M, N, K = 8192, 8192, 8192
|
||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||
b = tf.placeholder(tf.float16, shape=[N, K])
|
||||
locks = tf.placeholder(tf.int32, shape=[4096])
|
||||
@@ -30,9 +30,9 @@ result = sess.run([c], feed_dict = {locks: np.zeros(4096),
|
||||
# min_iters=100)
|
||||
#print(end - start)
|
||||
#print(2*M*N*K / (end - start) * 1e-12)
|
||||
hresult = np.dot(ha.T, hb).T
|
||||
dif = np.abs(result - hresult)
|
||||
print("dif: %f" % np.max(dif))
|
||||
#hresult = np.dot(ha.T, hb).T
|
||||
#dif = np.abs(result - hresult)
|
||||
#print("dif: %f" % np.max(dif))
|
||||
|
||||
#np.savetxt("dif.txt", dif, fmt="%5.2f")
|
||||
#np.savetxt("gpu.txt", result, fmt="%5.2f")
|
||||
|
@@ -160,12 +160,18 @@ Value* shared_tile::get_value(indices_t idx) {
|
||||
vector_size = vector_size / 2;
|
||||
}
|
||||
if(base_ptr == nullptr){
|
||||
// BasicBlock* store = builder_.GetInsertBlock();
|
||||
// if(!non_cst_idx.empty())
|
||||
// if(isa<Instruction>(non_cst_idx.front())){
|
||||
// builder_.SetInsertPoint((Instruction*)non_cst_idx.front());
|
||||
// }
|
||||
base_ptr = builder_.CreateGEP(ptr_, shared_offset(non_cst_idx));
|
||||
if(vector_size_ > 1){
|
||||
Type *vec_ty = VectorType::get(ty, vector_size);
|
||||
Type *vec_ptr_ty = PointerType::get(vec_ty, base_ptr->getType()->getPointerAddressSpace());
|
||||
base_ptr = builder_.CreateBitCast(base_ptr, vec_ptr_ty);
|
||||
}
|
||||
// builder_.SetInsertPoint(store);
|
||||
}
|
||||
Value *offset = shared_offset(cst_idx);
|
||||
Value *div = offset;
|
||||
@@ -534,10 +540,6 @@ void selection::init_axes(ir::value *v, IRBuilder<> &builder, Value *u_thread_id
|
||||
// // b offsets
|
||||
offset_b_j_ = builder.CreateAdd(warp_offset_j, builder.CreateAdd(pair_b_off, in_pair_off_b));
|
||||
offset_b_k_ = builder.CreateAnd(u_thread_id, _3);
|
||||
// offset_a_i_ = builder.getInt32(0);
|
||||
// offset_a_k_ = builder.getInt32(0);
|
||||
// offset_b_j_ = builder.getInt32(0);
|
||||
// offset_b_k_ = builder.getInt32(0);
|
||||
|
||||
// c offsets
|
||||
Value *offset_c_i = builder.CreateAdd(builder.CreateAnd(u_thread_id, _1), offset_a_i_);
|
||||
@@ -957,7 +959,6 @@ void selection::lower_tile_instruction(ir::instruction *ins, llvm::IRBuilder<> &
|
||||
unsigned stride_rep_i = wpt_0 * wts_0;
|
||||
unsigned stride_rep_j = wpt_1 * wts_1;
|
||||
unsigned num_rep_i = shapes[0]->get_value() / stride_rep_i;
|
||||
unsigned num_rep_j = shapes[1]->get_value() / stride_rep_j;
|
||||
unsigned ld_fc = num_rep_i * 2;
|
||||
for(unsigned pack_i = 0; pack_i < num_packs_0_; pack_i++)
|
||||
for(unsigned pack_j = 0; pack_j < num_packs_1_; pack_j++){
|
||||
|
@@ -13,9 +13,9 @@ namespace codegen{
|
||||
|
||||
unsigned shmem_allocation::is_ld_padded(ir::value *x) {
|
||||
if(auto* phi = dynamic_cast<ir::phi_node*>(x)) {
|
||||
bool result = false;
|
||||
unsigned result = 0;
|
||||
for(unsigned i = 0; i < phi->get_num_incoming(); i++)
|
||||
result = result | is_ld_padded(phi->get_incoming_value(i));
|
||||
result = std::max(result, is_ld_padded(phi->get_incoming_value(i)));
|
||||
return result;
|
||||
}
|
||||
if(dynamic_cast<ir::trans_inst*>(x))
|
||||
@@ -23,11 +23,7 @@ unsigned shmem_allocation::is_ld_padded(ir::value *x) {
|
||||
for(ir::user* user: x->get_users())
|
||||
if(dynamic_cast<ir::dot_inst*>(user))
|
||||
if(params_->get_fragment(user, 0) == tune::HMMA_FRAGMENT_C){
|
||||
if(x == user->get_operand(0)){
|
||||
return 16;
|
||||
}
|
||||
else
|
||||
return 16;
|
||||
return 16;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -255,7 +255,7 @@ std::string cu_module::compile_llvm_module(llvm::Module* module) {
|
||||
cu_module::cu_module(driver::context * context, llvm::Module* ll_module): cu_module(context, compile_llvm_module(ll_module)) { }
|
||||
|
||||
cu_module::cu_module(driver::context * context, std::string const & source) : module(context, CUmodule(), true), source_(source){
|
||||
std::cout << source << std::endl;
|
||||
// std::cout << source << std::endl;
|
||||
cu_context::context_switcher ctx_switch(*context);
|
||||
// JIT compile source-code
|
||||
CUjit_option opt[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES, CU_JIT_ERROR_LOG_BUFFER};
|
||||
|
Reference in New Issue
Block a user