[examples] improved template for testing matrix multiplication

This commit is contained in:
Philippe Tillet
2019-02-08 12:54:20 -05:00
parent dd35277858
commit 90c0474974
4 changed files with 190 additions and 85 deletions

View File

@@ -2,5 +2,5 @@ foreach(PROG matrix)
add_executable(${PROG} ${PROG}.cpp)
set_target_properties(${PROG} PROPERTIES OUTPUT_NAME ${PROG})
include_directories(/usr/local/cuda/include/)
target_link_libraries(${PROG} tdl)
target_link_libraries(${PROG} tdl cuda)
endforeach(PROG)

View File

@@ -1,5 +1,6 @@
#include <cstring>
#include <cstdio>
#include "cuda.h"
#include "ast/ast.h"
#include "ir/context.h"
#include "ir/module.h"
@@ -42,7 +43,7 @@ void test(fp32 *a, fp32 *b, fp32 *c, int32 M, int32 N, int32 K){\
for(k = K; k >= 0; k = k - 8){\
fp32 a[32, 8] = *pa;\
fp32 b[32, 8] = *pb;\
C = dot(a,b,C);\
C = C + 1;\
pa = pa + 8*M;\
pb = pb + 8*K;\
}\
@@ -50,7 +51,7 @@ void test(fp32 *a, fp32 *b, fp32 *c, int32 M, int32 N, int32 K){\
}\
";
static std::string computeDataLayout(bool is64Bit, bool UseShortPointers) {
static std::string compute_data_layout(bool is64Bit, bool UseShortPointers) {
std::string Ret = "e";
if (!is64Bit)
Ret += "-p:32:32";
@@ -60,79 +61,172 @@ static std::string computeDataLayout(bool is64Bit, bool UseShortPointers) {
return Ret;
}
int main() {
YY_BUFFER_STATE buffer = yy_scan_string(src);
yyparse();
yy_delete_buffer(buffer);
translation_unit *program = ast_root;
tdl::ir::context context;
tdl::ir::module module("matrix", context);
program->codegen(&module);
llvm::LLVMContext llvm_context;
llvm::Module llvm_module("test", llvm_context);
// lowering passes
tdl::codegen::place_shared_copy shared;
tdl::codegen::tune tune;
tdl::codegen::liveness liveness;
tdl::codegen::allocation allocation(&liveness);
tdl::codegen::selection selection(&allocation, &tune);
tune.run(module);
std::vector<unsigned> params = {
// asm
2, 8, 1,
// bsn
4, 4, 1,
// pa
2, 4, 1,
// pb
1, 8, 1,
};
std::map<tdl::ir::value*, std::vector<std::string>> errors;
unsigned i = 0;
std::cout << tune.get_params(module).size() << std::endl;
for(unsigned *x: tune.get_params(module))
*x = params[i++];
tune.check_constraints(module, errors);
std::cout << "errors: " << errors.size() << std::endl;
for(auto &x: errors){
for(auto &e: x.second)
std::cout << e << std::endl;
}
shared.run(module);
liveness.run(module);
allocation.run();
selection.run(module, llvm_module);
static std::string generate_machine_code(llvm::Module &module, const std::string &target_triple, const std::string &data_layout) {
llvm::InitializeAllTargetInfos();
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
llvm::InitializeAllAsmParsers();
llvm::InitializeAllAsmPrinters();
// // print LLVM program
// llvm::PrintModulePass print(llvm::outs());
// llvm::AnalysisManager<llvm::Module> analysis;
// print.run(llvm_module, analysis);
module.setTargetTriple(target_triple);
std::string error;
auto target = llvm::TargetRegistry::lookupTarget(module.getTargetTriple(), error);
llvm::TargetMachine *machine = target->createTargetMachine(module.getTargetTriple(), "sm_52", "",
llvm::TargetOptions(), llvm::Reloc::Model(),
llvm::None, llvm::CodeGenOpt::Aggressive);
module.setDataLayout(data_layout);
// create target machine
{
llvm::InitializeAllTargetInfos();
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
llvm::InitializeAllAsmParsers();
llvm::InitializeAllAsmPrinters();
llvm_module.setTargetTriple("nvptx64-nvidia-cuda");
std::string error;
auto target = llvm::TargetRegistry::lookupTarget(llvm_module.getTargetTriple(), error);
llvm::TargetMachine *machine = target->createTargetMachine(llvm_module.getTargetTriple(), "sm_52", "",
llvm::TargetOptions(), llvm::Reloc::Model(),
llvm::None, llvm::CodeGenOpt::Aggressive);
llvm_module.setDataLayout(computeDataLayout(true, true));
// emit machine code
llvm::legacy::PassManager pass;
llvm::SmallVector<char, 0> buffer;
llvm::raw_svector_ostream stream(buffer);
machine->addPassesToEmitFile(pass, stream, nullptr, llvm::TargetMachine::CGFT_AssemblyFile);
pass.run(llvm_module);
std::string src(buffer.begin(), buffer.end());
std::cout << src << std::endl;
}
return 0;
// emit machine code
llvm::legacy::PassManager pass;
llvm::SmallVector<char, 0> buffer;
llvm::raw_svector_ostream stream(buffer);
machine->addPassesToEmitFile(pass, stream, nullptr, llvm::TargetMachine::CGFT_AssemblyFile);
pass.run(module);
std::string src(buffer.begin(), buffer.end());
return src;
}
static void __checkCudaErrors( CUresult err, const char *file, const int line )
{
if( CUDA_SUCCESS != err) {
fprintf(stderr,
"CUDA Driver API error = %04d from file <%s>, line %i.\n",
err, file, line );
exit(-1);
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
static void compile_machine_code(CUdevice &device, CUcontext &context, CUmodule &module,
CUfunction &function, CUstream &stream, int &major, int &minor,
const std::string &src, const std::string &name) {
int numDevices;
// Initialize
checkCudaErrors(cuInit(0));
checkCudaErrors(cuDeviceGetCount(&numDevices));
checkCudaErrors(cuDeviceGet(&device, 0));
checkCudaErrors(cuDeviceComputeCapability(&major, &minor, device));
checkCudaErrors(cuCtxCreate(&context, 0, device));
checkCudaErrors(cuStreamCreate(&stream, 0));
// Compile program
CUjit_option opt[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES, CU_JIT_ERROR_LOG_BUFFER};
unsigned int errbufsize = 8096;
std::string errbuf(errbufsize, 0);
const void *cpterr = static_cast<const void*>(errbuf.data());
void *pterr = const_cast<void*>(cpterr);
void* optval[] = {(void*)(uintptr_t)errbufsize, pterr};
int err = cuModuleLoadDataEx(&module, src.data(), 2, opt, optval);
if(err != CUDA_SUCCESS){
std::cerr << "Compilation Failed! Log: " << std::endl;
std::cerr << errbuf << std::endl;
}
// Get function
checkCudaErrors(cuModuleGetFunction(&function, module, name.c_str()));
}
int main() {
// create AST from Triton-C source
YY_BUFFER_STATE buffer = yy_scan_string(src);
yyparse();
yy_delete_buffer(buffer);
translation_unit *program = ast_root;
// create Triton-IR from AST
tdl::ir::context context;
tdl::ir::module module("matrix", context);
program->codegen(&module);
llvm::LLVMContext llvm_context;
llvm::Module llvm_module("test", llvm_context);
// create passes
tdl::codegen::place_shared_copy shared;
tdl::codegen::tune tune;
tdl::codegen::liveness liveness;
tdl::codegen::allocation allocation(&liveness);
tdl::codegen::selection selection(&allocation, &tune);
// tuning parameters
tune.run(module);
std::vector<unsigned> params = {
// asm
2, 8, 1,
// bsn
4, 4, 1,
// pa
2, 4, 1,
// pb
1, 8, 1,
};
std::map<tdl::ir::value*, std::vector<std::string>> errors;
unsigned i = 0;
std::cout << tune.get_params(module).size() << std::endl;
for(unsigned *x: tune.get_params(module))
*x = params[i++];
tune.check_constraints(module, errors);
std::cout << "errors: " << errors.size() << std::endl;
for(auto &x: errors){
for(auto &e: x.second)
std::cout << e << std::endl;
}
// run passes
shared.run(module);
liveness.run(module);
allocation.run();
selection.run(module, llvm_module);
// llvm source
llvm::PrintModulePass print(llvm::outs());
llvm::AnalysisManager<llvm::Module> analysis;
print.run(llvm_module, analysis);
// generate machine code
std::string src = generate_machine_code(llvm_module, "nvptx64-nvidia-cuda", compute_data_layout(true, true));
std::cout << src << std::endl;
// compile machine code
CUdevice cu_device;
CUcontext cu_context;
CUmodule cu_module;
CUfunction cu_kernel;
CUstream cu_stream;
int major, minor;
compile_machine_code(cu_device, cu_context, cu_module, cu_kernel, cu_stream, major, minor, src, "test");
// execute machine code
// Allocate buffers
typedef float numeric_t;
size_t M = 256, N = 256, K = 256;
std::vector<numeric_t> c(M*N);
std::vector<numeric_t> a(M*K);
std::vector<numeric_t> b(K*N);
for(size_t i = 0; i < a.size(); i++)
a[i] = (float)rand() / RAND_MAX;
for(size_t i = 0; i < b.size(); i++)
b[i] = (float)rand() / RAND_MAX;
for(size_t i = 0; i < c.size(); i++)
c[i] = 0;
CUdeviceptr d_a, d_b, d_c;
checkCudaErrors(cuMemAlloc(&d_a, sizeof(numeric_t) * a.size()));
checkCudaErrors(cuMemAlloc(&d_b, sizeof(numeric_t) * b.size()));
checkCudaErrors(cuMemAlloc(&d_c, sizeof(numeric_t) * c.size()));
// Copy buffers
checkCudaErrors(cuMemcpyHtoD(d_a, a.data(), sizeof(numeric_t) * a.size()));
checkCudaErrors(cuMemcpyHtoD(d_b, b.data(), sizeof(numeric_t) * b.size()));
checkCudaErrors(cuMemcpyHtoD(d_c, c.data(), sizeof(numeric_t) * c.size()));
// Launch kernel
void *args[] = { &d_a, &d_b, &d_c, &M, &N, &K};
int num_regs;
cuFuncGetAttribute(&num_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, cu_kernel);
unsigned TM = params[0]*params[1];
unsigned TN = params[3]*params[4];
unsigned nthreads = params[1]*params[2]*params[7]*params[8];
checkCudaErrors(cuLaunchKernel(cu_kernel, M/TM, N/TN, 1, nthreads, 1, 1, 0, cu_stream, args, NULL));
checkCudaErrors(cuStreamSynchronize(cu_stream));
// Write back
checkCudaErrors(cuMemcpyDtoH(c.data(), d_c, sizeof(numeric_t) * c.size()));
return 0;
}

View File

@@ -78,7 +78,7 @@ void node::implicit_cast(ir::builder &builder, ir::value *&lhs, ir::value *&rhs,
// Both operands are integers
else if(left_ty->is_integer_ty() && right_ty->is_integer_ty()){
is_int = true;
is_signed = false;
is_signed = true; // always signed for now
if(left_ty->get_integer_bitwidth() != right_ty->get_integer_bitwidth()){
ir::value *&to_convert = (left_ty->get_integer_bitwidth() > right_ty->get_integer_bitwidth())?rhs:lhs;
ir::type *dst_ty = (to_convert==lhs)?right_ty:left_ty;

View File

@@ -495,7 +495,15 @@ void selection::run(ir::module &src, Module &dst){
for(ir::function *fn: src.get_function_list()) {
// create LLVM function
FunctionType *fn_ty = (FunctionType*)llvm_type(fn->get_fn_type(), dst_ctx);
Function *dst_fn = Function::Create(fn_ty, Function::ExternalLinkage, "kernel", &dst);
Function *dst_fn = Function::Create(fn_ty, Function::ExternalLinkage, fn->get_name(), &dst);
// Set metadata
llvm::Metadata *md_args[] = {
llvm::ValueAsMetadata::get(dst_fn),
llvm::MDString::get(dst_ctx, "kernel"),
llvm::ValueAsMetadata::get(dst_builder.getInt32(1))
};
dst.getOrInsertNamedMetadata("nvvm.annotations")->addOperand(llvm::MDNode::get(dst_ctx, md_args));
// map parameters
for(unsigned i = 0; i < fn->args().size(); i++)
vmap_[fn->args()[i]] = &*(dst_fn->arg_begin() + i);
@@ -506,13 +514,16 @@ void selection::run(ir::module &src, Module &dst){
}
dst_builder.SetInsertPoint((BasicBlock*)vmap_[fn->blocks()[0]]);
// allocate shared memory
Type *int_8_ty = Type::getInt8Ty(dst_ctx);
ArrayType *array_ty = ArrayType::get(int_8_ty, alloc_->get_allocated_size());
Type *ptr_ty = PointerType::get(int_8_ty, 3);
GlobalVariable *sh_mem_array =
new GlobalVariable(*dst_fn->getParent(), array_ty, false, GlobalVariable::InternalLinkage,
nullptr, "__shared_ptr", nullptr, GlobalVariable::NotThreadLocal, 3);
Value *sh_mem_ptr = dst_builder.CreateBitCast(sh_mem_array, ptr_ty);
Value *sh_mem_ptr = nullptr;
if(unsigned alloc_size = alloc_->get_allocated_size()){
Type *int_8_ty = Type::getInt8Ty(dst_ctx);
ArrayType *array_ty = ArrayType::get(int_8_ty, alloc_size);
Type *ptr_ty = PointerType::get(int_8_ty, 3);
GlobalVariable *sh_mem_array =
new GlobalVariable(*dst_fn->getParent(), array_ty, false, GlobalVariable::InternalLinkage,
nullptr, "__shared_ptr", nullptr, GlobalVariable::NotThreadLocal, 3);
sh_mem_ptr = dst_builder.CreateBitCast(sh_mem_array, ptr_ty);
}
// create grids
init_grids(fn, dst_builder, sh_mem_ptr);
// iterate through block