#include #include #include "common.hpp" #include "triton/jit.h" #include "triton/driver/backend.h" #include "triton/driver/stream.h" #include "triton/dnn/conv.h" int main() { // initialize default compute device auto context = triton::driver::backend::contexts::get_default(); triton::jit jit(context); triton::dnn::conv::type ty = triton::dnn::conv::BPROP; // initialization int32_t B = 4, NF = 32; int32_t D = 1, H = 24, W = 240; int32_t NC = 32, T = 1, R = 3, S = 3; int32_t pad_d = 0, pad_h = 1, pad_w = 1; triton::dnn::conv configuration(B, NC, H, W, R, S, NF, 1, 1, pad_h, pad_w, ty); // convolution configuration std::vector hc(configuration.c_size()); std::vector rc(configuration.c_size()); std::vector ha(configuration.a_size()); std::vector hb(configuration.b_size()); srand(0); for(size_t i = 0; i < ha.size(); i++) ha[i] = (float)rand()/RAND_MAX; for(size_t i = 0; i < hb.size(); i++) hb[i] = (float)rand()/RAND_MAX; for(size_t i = 0; i < hc.size(); i++) hc[i] = 0; rc = hc; triton::driver::buffer* dc = triton::driver::buffer::create(context, hc.size()*4); triton::driver::buffer* da = triton::driver::buffer::create(context, ha.size()*4); triton::driver::buffer* db = triton::driver::buffer::create(context, hb.size()*4); triton::driver::stream* stream = triton::driver::stream::create(context); stream->write(da, true, 0, ha); stream->write(db, true, 0, hb); stream->write(dc, true, 0, hc); stream->synchronize(); // look-up table std::vector h_delta, h_masks; configuration.build_deltas(h_delta); configuration.build_masks(h_masks); // benchmark a given convolution kernel auto benchmark = [&](triton::driver::kernel* kernel, triton::jit::launch_information info) { unsigned TM = info.global_range_size[0]; unsigned TN = info.global_range_size[1]; unsigned nthreads = info.num_threads; std::array grid = configuration.get_grid(TM, TN); triton::driver::buffer* delta = jit.get_buffer("delta"); triton::driver::buffer* masks = jit.get_buffer("masks"); stream->write(delta, false, 0, h_delta.size()*4, h_delta.data()); stream->write(masks, false, 0, h_masks.size()*4, h_masks.data()); stream->synchronize(); configuration.set_arg(kernel, da, db, dc); stream->enqueue(kernel, grid, {nthreads, 1, 1}); stream->synchronize(); double ts = bench([&](){stream->enqueue(kernel, grid, {nthreads, 1, 1});}, [&](){ stream->synchronize(); }, *context->device()); return configuration.get_nflops() / ts * 1e-3; }; std::string src = configuration.src(); // jit.autotune("conv", src.c_str(), benchmark); jit.add_module("conv", src.c_str(), configuration.default_params()); triton::driver::kernel* kernel = jit.get_function("conv"); triton::jit::launch_information info = jit.get_launch_info("conv"); std::cout << "Performance: " << benchmark(kernel, info) << " TFLOPS " << std::endl; stream->read(dc, true, 0, hc); configuration.cpu_ref(rc.data(), ha.data(), hb.data()); // std::cout << c[0] << std::endl; for(size_t i = 0; i < hc.size(); i++) if(std::abs(hc[i] - rc[i])/std::max(hc[i], rc[i]) > 1e-4){ std::cout << i << " " << hc[i] << " " << rc[i] << std::endl; exit(EXIT_FAILURE); } std::cout << "Pass!" << std::endl; }