[triton/codegen] added shared memory padding for HMMA arguments and vectorized loads
This commit is contained in:
@@ -6,8 +6,8 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
# FLEX/YACC
|
||||
find_package(BISON)
|
||||
find_package(FLEX)
|
||||
BISON_TARGET(Parser ${CMAKE_CURRENT_SOURCE_DIR}/include/triton/lang/parser.y ${CMAKE_CURRENT_BINARY_DIR}/parser.cpp)
|
||||
FLEX_TARGET(Lexer ${CMAKE_CURRENT_SOURCE_DIR}/include/triton/lang/scanner.l ${CMAKE_CURRENT_BINARY_DIR}/scanner.cpp)
|
||||
BISON_TARGET(Parser ${CMAKE_CURRENT_SOURCE_DIR}/include/triton/lang/parser.y ${CMAKE_CURRENT_SOURCE_DIR}/lib/lang/parser.cpp)
|
||||
FLEX_TARGET(Lexer ${CMAKE_CURRENT_SOURCE_DIR}/include/triton/lang/scanner.l ${CMAKE_CURRENT_SOURCE_DIR}/lib/lang/scanner.cpp)
|
||||
get_filename_component(BISON_Parser_INCLUDE_DIRECTORIES ${BISON_Parser_OUTPUT_HEADER} DIRECTORY)
|
||||
include_directories(${BISON_Parser_INCLUDE_DIRECTORIES})
|
||||
|
||||
|
@@ -7,9 +7,7 @@ if(${TensorFlow_FOUND})
|
||||
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=${TF_ABI})
|
||||
add_library(tf_blocksparse SHARED dot.cpp)
|
||||
target_link_libraries(tf_blocksparse tensorflow_framework triton)
|
||||
add_custom_command(
|
||||
TARGET tf_blocksparse POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/run.py
|
||||
${CMAKE_CURRENT_BINARY_DIR}/run.py)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/run.py
|
||||
${CMAKE_CURRENT_BINARY_DIR}/run.py
|
||||
COPYONLY)
|
||||
endif()
|
||||
|
@@ -21,9 +21,9 @@ using GPUDevice = Eigen::GpuDevice;
|
||||
|
||||
const char* src =
|
||||
R"(
|
||||
const tunable int32 TM = {8, 16, 32, 64, 128};
|
||||
const tunable int32 TN = {8, 16, 32, 64, 128};
|
||||
const tunable int32 TK = {8};
|
||||
const tunable int32 TM = {64, 128};
|
||||
const tunable int32 TN = {64, 128};
|
||||
const tunable int32 TK = {32};
|
||||
const tunable int32 GZ = {1};
|
||||
|
||||
void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
|
||||
@@ -37,20 +37,14 @@ void matmul(restrict read_only fp16 *A, restrict read_only fp16 *B,
|
||||
int32 rka[TK] = 0 ... TK;
|
||||
int32 rkb[TK] = 0 ... TK;
|
||||
fp32 c[TM, TN] = 0;
|
||||
int32 div = K / GZ;
|
||||
int32 rem = K % GZ;
|
||||
K = select(rz < rem, div - 1, div);
|
||||
int32 offk = select(rz < rem, rz*(div + 1), rz*div + rem);
|
||||
fp16* pa[TM, TK] = A + (offk + rka[newaxis, :])*lda + rxa[:, newaxis];
|
||||
fp16* pb[TN, TK] = B + (offk + rkb[newaxis, :])*ldb + ryb[:, newaxis];
|
||||
fp16 a[TM, TK] = *pa;
|
||||
fp16 b[TN, TK] = *pb;
|
||||
for(int32 k = K; k > 0; k = k - TK){
|
||||
fp16* pa[TM, TK] = A + rka[newaxis, :]*lda + rxa[:, newaxis];
|
||||
fp16* pb[TN, TK] = B + rkb[newaxis, :]*ldb + ryb[:, newaxis];
|
||||
for(int32 k = K; k > TK; k = k - TK){
|
||||
fp16 a[TM, TK] = *pa;
|
||||
fp16 b[TN, TK] = *pb;
|
||||
c = dot(a, trans(b), c);
|
||||
pa = pa + TK*lda;
|
||||
pb = pb + TK*ldb;
|
||||
a = *pa;
|
||||
b = *pb;
|
||||
}
|
||||
int32 rxc[TM] = get_global_range[TM](0);
|
||||
int32 ryc[TN] = get_global_range[TN](1);
|
||||
@@ -123,10 +117,10 @@ class BlockSparseGemmOp : public OpKernel {
|
||||
return 2.*M*N*K / ts * 1e-3;
|
||||
};
|
||||
// just-in-time compile source-code
|
||||
// jit.autotune("matmul", src, benchmark);
|
||||
jit.autotune("matmul", src, benchmark);
|
||||
// jit.add_module("matmul", src, {4, 2, 8, 4, 2, 32, 1, 4, 1, 1, 8, 8, 8, 1});
|
||||
// jit.add_module("matmul", src, {32, 2, 128, 32, 2, 128, 2, 2, 2, 2, 4, 8, 4, 1});
|
||||
jit.add_module("matmul", src, {16, 4, 128, 32, 4, 128, 2, 2, 2, 2, 8, 8, 4, 1});
|
||||
jit.add_module("matmul", src, {16, 4, 128, 16, 4, 128, 2, 2, 2, 2, 8, 32, 8, 1});
|
||||
triton::driver::kernel* kernel = jit.get_function("matmul");
|
||||
triton::jit::launch_information info = jit.get_launch_info("matmul");
|
||||
std::cout << benchmark(kernel, info) << std::endl;;
|
||||
|
@@ -6,7 +6,7 @@ data_files_path = tf.resource_loader.get_data_files_path()
|
||||
library_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
|
||||
|
||||
M, N, K = 256,256,256
|
||||
M, N, K = 8192,8192,8192
|
||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||
b = tf.placeholder(tf.float16, shape=[N, K])
|
||||
locks = tf.placeholder(tf.int32, shape=[4096])
|
||||
@@ -30,9 +30,9 @@ result = sess.run([c], feed_dict = {locks: np.zeros(4096),
|
||||
# min_iters=100)
|
||||
#print(end - start)
|
||||
#print(2*M*N*K / (end - start) * 1e-12)
|
||||
hresult = np.dot(ha.T, hb).T
|
||||
dif = np.abs(result - hresult)
|
||||
print("dif: %f" % np.max(dif))
|
||||
#hresult = np.dot(ha.T, hb).T
|
||||
#dif = np.abs(result - hresult)
|
||||
#print("dif: %f" % np.max(dif))
|
||||
|
||||
#np.savetxt("dif.txt", dif, fmt="%5.2f")
|
||||
#np.savetxt("gpu.txt", result, fmt="%5.2f")
|
||||
|
@@ -18,15 +18,16 @@ class layout;
|
||||
class target_tuner;
|
||||
class shmem_liveness;
|
||||
class shmem_info;
|
||||
class tune;
|
||||
|
||||
class shmem_allocation {
|
||||
public:
|
||||
shmem_allocation(shmem_liveness *live, shmem_info *buffer_info)
|
||||
: liveness_(live), buffer_info_(buffer_info){ }
|
||||
shmem_allocation(shmem_liveness *live, shmem_info *buffer_info, tune *params)
|
||||
: liveness_(live), buffer_info_(buffer_info), params_(params){ }
|
||||
|
||||
// utilities
|
||||
unsigned get_num_bytes(ir::value *x);
|
||||
bool is_ld_padded(ir::value* x);
|
||||
unsigned is_ld_padded(ir::value* x);
|
||||
|
||||
// accessors
|
||||
unsigned get_offset(ir::value *x) const { return offsets_.at(x); }
|
||||
@@ -42,6 +43,7 @@ private:
|
||||
// dependences
|
||||
shmem_liveness *liveness_;
|
||||
shmem_info *buffer_info_;
|
||||
tune *params_;
|
||||
};
|
||||
|
||||
}
|
||||
|
@@ -53,7 +53,7 @@ public:
|
||||
struct passes_wrapper {
|
||||
passes_wrapper(codegen::target* target)
|
||||
: shmem_liveness(&shmem_info),
|
||||
shmem_allocation(&shmem_liveness, &shmem_info),
|
||||
shmem_allocation(&shmem_liveness, &shmem_info, &tune),
|
||||
shmem_barriers(&shmem_allocation, &shmem_info),
|
||||
vectorize(&tune),
|
||||
selection(&shmem_allocation, &tune, &shmem_info, target),
|
||||
|
@@ -612,8 +612,9 @@ void selection::create_tile(ir::value *v, IRBuilder<> &builder,
|
||||
std::vector<unsigned> shapes;
|
||||
for(ir::constant_int* shape: cshapes)
|
||||
shapes.push_back(shape->get_value());
|
||||
if(alloc_->is_ld_padded(v))
|
||||
shapes[0] += 4;
|
||||
unsigned pad = alloc_->is_ld_padded(v);
|
||||
if(pad > 0)
|
||||
shapes[0] += pad;
|
||||
Type* ty = llvm_type(v->get_type()->get_scalar_ty(), ctx);
|
||||
// create shared tile
|
||||
if(buffer_info_->is_shared(v)){
|
||||
@@ -1040,6 +1041,23 @@ void selection::lower_tile_instruction(ir::instruction *ins, llvm::IRBuilder<> &
|
||||
});
|
||||
}
|
||||
}
|
||||
else if(auto *ld = dynamic_cast<ir::load_inst*>(ins)){
|
||||
unsigned vector_size = result->axis(0).contiguous;
|
||||
std::map<unsigned, Value*> packets;
|
||||
distributed_tile *TP = (distributed_tile*)tmap_.at(ld->get_pointer_operand());
|
||||
result->for_each([&](indices_t idx){
|
||||
set_mask_insert_pt(idx);
|
||||
unsigned linear = result->get_linear_index(idx);
|
||||
unsigned id = linear / vector_size;
|
||||
if(linear % vector_size == 0){
|
||||
Value *ptr = TP->get_value(idx);
|
||||
ptr= builder.CreateBitCast(ptr, PointerType::get(VectorType::get(result->get_ty(), vector_size),
|
||||
ptr->getType()->getPointerAddressSpace()));
|
||||
packets[id] = builder.CreateLoad(ptr);
|
||||
}
|
||||
result->set_value(idx, builder.CreateExtractElement(packets.at(id), linear % vector_size));
|
||||
});
|
||||
}
|
||||
// element-wise
|
||||
else {
|
||||
result->for_each([&](indices_t idx){
|
||||
|
@@ -1,6 +1,7 @@
|
||||
#include "triton/codegen/shmem_allocation.h"
|
||||
#include "triton/codegen/shmem_liveness.h"
|
||||
#include "triton/codegen/shmem_info.h"
|
||||
#include "triton/codegen/tune.h"
|
||||
#include "triton/ir/basic_block.h"
|
||||
#include "triton/ir/type.h"
|
||||
#include "triton/ir/value.h"
|
||||
@@ -10,7 +11,7 @@
|
||||
namespace triton{
|
||||
namespace codegen{
|
||||
|
||||
bool shmem_allocation::is_ld_padded(ir::value *x) {
|
||||
unsigned shmem_allocation::is_ld_padded(ir::value *x) {
|
||||
if(auto* phi = dynamic_cast<ir::phi_node*>(x)) {
|
||||
bool result = false;
|
||||
for(unsigned i = 0; i < phi->get_num_incoming(); i++)
|
||||
@@ -18,15 +19,24 @@ bool shmem_allocation::is_ld_padded(ir::value *x) {
|
||||
return result;
|
||||
}
|
||||
if(dynamic_cast<ir::trans_inst*>(x))
|
||||
return true;
|
||||
return false;
|
||||
return 4;
|
||||
for(ir::user* user: x->get_users())
|
||||
if(dynamic_cast<ir::dot_inst*>(user))
|
||||
if(params_->get_fragment(user, 0) == tune::HMMA_FRAGMENT_C){
|
||||
if(x == user->get_operand(0))
|
||||
return 16;
|
||||
else
|
||||
return 16;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned shmem_allocation::get_num_bytes(ir::value *x) {
|
||||
unsigned result = x->get_type()->get_primitive_size_in_bits() / 8;
|
||||
if(is_ld_padded(x)){
|
||||
unsigned pad = is_ld_padded(x);
|
||||
if(pad > 0){
|
||||
unsigned ld = x->get_type()->get_tile_shapes()[0]->get_value();
|
||||
result += 4 * result / ld;
|
||||
result += pad * result / ld;
|
||||
}
|
||||
if(buffer_info_->is_double(x))
|
||||
result *= 2;
|
||||
|
@@ -255,7 +255,7 @@ std::string cu_module::compile_llvm_module(llvm::Module* module) {
|
||||
cu_module::cu_module(driver::context * context, llvm::Module* ll_module): cu_module(context, compile_llvm_module(ll_module)) { }
|
||||
|
||||
cu_module::cu_module(driver::context * context, std::string const & source) : module(context, CUmodule(), true), source_(source){
|
||||
std::cout << source << std::endl;
|
||||
// std::cout << source << std::endl;
|
||||
cu_context::context_switcher ctx_switch(*context);
|
||||
// JIT compile source-code
|
||||
CUjit_option opt[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES, CU_JIT_ERROR_LOG_BUFFER};
|
||||
|
Reference in New Issue
Block a user