[general] cleaned tensorflow source code generation
This commit is contained in:
@@ -15,7 +15,7 @@ namespace ir{
|
|||||||
namespace codegen{
|
namespace codegen{
|
||||||
namespace analysis{
|
namespace analysis{
|
||||||
|
|
||||||
class tune;
|
class grids;
|
||||||
|
|
||||||
namespace shmem{
|
namespace shmem{
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ class info;
|
|||||||
|
|
||||||
class allocation {
|
class allocation {
|
||||||
public:
|
public:
|
||||||
allocation(liveness *live, info *buffer_info, tune *params)
|
allocation(liveness *live, info *buffer_info, grids *params)
|
||||||
: liveness_(live), buffer_info_(buffer_info), params_(params){ }
|
: liveness_(live), buffer_info_(buffer_info), params_(params){ }
|
||||||
|
|
||||||
// utilities
|
// utilities
|
||||||
@@ -45,7 +45,7 @@ private:
|
|||||||
// dependences
|
// dependences
|
||||||
liveness *liveness_;
|
liveness *liveness_;
|
||||||
info *buffer_info_;
|
info *buffer_info_;
|
||||||
tune *params_;
|
grids *params_;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -19,7 +19,7 @@ namespace ir{
|
|||||||
namespace codegen{
|
namespace codegen{
|
||||||
namespace analysis{
|
namespace analysis{
|
||||||
|
|
||||||
class tune {
|
class grids {
|
||||||
typedef std::pair<ir::value*, unsigned> node_t;
|
typedef std::pair<ir::value*, unsigned> node_t;
|
||||||
typedef std::map <node_t, std::set<node_t>> graph_t;
|
typedef std::map <node_t, std::set<node_t>> graph_t;
|
||||||
|
|
||||||
@@ -41,12 +41,11 @@ private:
|
|||||||
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
tune(size_t num_warps);
|
grids(size_t num_warps);
|
||||||
ir::metaparameter* get_param(ir::value *value, const std::string &key) { return params_[value][key]; }
|
ir::metaparameter* get_param(ir::value *value, const std::string &key) { return params_[value][key]; }
|
||||||
unsigned get_param_group(ir::value *value, unsigned ax);
|
unsigned get_param_group(ir::value *value, unsigned ax);
|
||||||
fragment_t get_fragment(ir::value *value, unsigned ax) { return fragments_.at({value, ax}); }
|
fragment_t get_fragment(ir::value *value, unsigned ax) { return fragments_.at({value, ax}); }
|
||||||
void copy(ir::value *dst, ir::value *src);
|
void copy(ir::value *dst, ir::value *src);
|
||||||
bool check_constraints(std::map<ir::value *, std::vector<std::string>> &errors);
|
|
||||||
void run(ir::module &mod);
|
void run(ir::module &mod);
|
||||||
unsigned get_num_threads();
|
unsigned get_num_threads();
|
||||||
|
|
||||||
|
@@ -44,7 +44,7 @@ namespace codegen{
|
|||||||
|
|
||||||
namespace analysis{
|
namespace analysis{
|
||||||
|
|
||||||
class tune;
|
class grids;
|
||||||
class alignment_info;
|
class alignment_info;
|
||||||
|
|
||||||
namespace shmem{
|
namespace shmem{
|
||||||
@@ -196,7 +196,7 @@ private:
|
|||||||
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
selection(analysis::shmem::allocation *alloc, analysis::tune *params, analysis::shmem::info *buffer_info, analysis::alignment_info *alignment, target *tgt)
|
selection(analysis::shmem::allocation *alloc, analysis::grids *params, analysis::shmem::info *buffer_info, analysis::alignment_info *alignment, target *tgt)
|
||||||
: alloc_(alloc), params_(params), buffer_info_(buffer_info), alignment_(alignment), tgt_(tgt){ }
|
: alloc_(alloc), params_(params), buffer_info_(buffer_info), alignment_(alignment), tgt_(tgt){ }
|
||||||
|
|
||||||
void run(ir::module &src, Module &dst);
|
void run(ir::module &src, Module &dst);
|
||||||
@@ -205,7 +205,7 @@ private:
|
|||||||
vmap_t vmap_;
|
vmap_t vmap_;
|
||||||
tmap_t tmap_;
|
tmap_t tmap_;
|
||||||
analysis::shmem::allocation *alloc_;
|
analysis::shmem::allocation *alloc_;
|
||||||
analysis::tune *params_;
|
analysis::grids *params_;
|
||||||
analysis::shmem::info *buffer_info_;
|
analysis::shmem::info *buffer_info_;
|
||||||
analysis::alignment_info *alignment_;
|
analysis::alignment_info *alignment_;
|
||||||
target *tgt_;
|
target *tgt_;
|
||||||
|
@@ -19,7 +19,7 @@ class getelementptr_inst;
|
|||||||
namespace codegen{
|
namespace codegen{
|
||||||
|
|
||||||
namespace analysis{
|
namespace analysis{
|
||||||
class tune;
|
class grids;
|
||||||
class alignment_info;
|
class alignment_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -37,11 +37,11 @@ private:
|
|||||||
ir::value *reassociate_ptr(ir::getelementptr_inst* pz, ir::builder &builder, std::map<ir::value*, cst_info> &offsets);
|
ir::value *reassociate_ptr(ir::getelementptr_inst* pz, ir::builder &builder, std::map<ir::value*, cst_info> &offsets);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
reassociate(analysis::tune *params);
|
reassociate(analysis::grids *params);
|
||||||
void run(ir::module& module);
|
void run(ir::module& module);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
analysis::tune* params_;
|
analysis::grids* params_;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -10,18 +10,18 @@ namespace ir {
|
|||||||
namespace codegen{
|
namespace codegen{
|
||||||
|
|
||||||
namespace analysis{
|
namespace analysis{
|
||||||
class tune;
|
class grids;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace transform{
|
namespace transform{
|
||||||
|
|
||||||
class vectorize {
|
class vectorize {
|
||||||
public:
|
public:
|
||||||
vectorize(analysis::tune *params): params_(params){}
|
vectorize(analysis::grids *params): params_(params){}
|
||||||
void run(ir::module &mod);
|
void run(ir::module &mod);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
analysis::tune *params_;
|
analysis::grids *params_;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -42,7 +42,7 @@ class translation_unit;
|
|||||||
|
|
||||||
namespace codegen{
|
namespace codegen{
|
||||||
namespace analysis{
|
namespace analysis{
|
||||||
class tune;
|
class grids;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -21,7 +21,7 @@ unsigned allocation::is_ld_padded(ir::value *x) {
|
|||||||
}
|
}
|
||||||
for(ir::user* user: x->get_users())
|
for(ir::user* user: x->get_users())
|
||||||
if(auto dot = dynamic_cast<ir::dot_inst*>(user)){
|
if(auto dot = dynamic_cast<ir::dot_inst*>(user)){
|
||||||
bool is_hmma = params_->get_fragment(user, 0) == tune::HMMA_FRAGMENT_C;
|
bool is_hmma = params_->get_fragment(user, 0) == grids::HMMA_FRAGMENT_C;
|
||||||
bool is_op_0 = x == dot->get_operand(0);
|
bool is_op_0 = x == dot->get_operand(0);
|
||||||
bool is_op_1 = x == dot->get_operand(1);
|
bool is_op_1 = x == dot->get_operand(1);
|
||||||
if(is_hmma && is_op_0){
|
if(is_hmma && is_op_0){
|
||||||
@@ -57,7 +57,7 @@ unsigned allocation::get_num_bytes(ir::value *x) {
|
|||||||
for(auto x: shapes)
|
for(auto x: shapes)
|
||||||
num_elements *= x->get_value();
|
num_elements *= x->get_value();
|
||||||
size_t depth;
|
size_t depth;
|
||||||
if(params_->get_fragment(x, 0) == tune::HMMA_FRAGMENT_C)
|
if(params_->get_fragment(x, 0) == grids::HMMA_FRAGMENT_C)
|
||||||
depth = params_->get_param(op, "wpt.d" + std::to_string(axis))->get_value();
|
depth = params_->get_param(op, "wpt.d" + std::to_string(axis))->get_value();
|
||||||
else
|
else
|
||||||
depth = params_->get_param(op, "mts.d" + std::to_string(axis))->get_value();
|
depth = params_->get_param(op, "mts.d" + std::to_string(axis))->get_value();
|
||||||
|
@@ -15,7 +15,7 @@ namespace triton{
|
|||||||
namespace codegen{
|
namespace codegen{
|
||||||
namespace analysis{
|
namespace analysis{
|
||||||
|
|
||||||
tune::tune(size_t num_warps): num_warps_(num_warps){
|
grids::grids(size_t num_warps): num_warps_(num_warps){
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_hmma(ir::value *v){
|
bool is_hmma(ir::value *v){
|
||||||
@@ -32,14 +32,14 @@ bool is_hmma(ir::value *v){
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tune::add_constraint(node_t x, node_t y) {
|
void grids::add_constraint(node_t x, node_t y) {
|
||||||
dependencies_[x].insert(y);
|
dependencies_[x].insert(y);
|
||||||
dependencies_[y].insert(x);
|
dependencies_[y].insert(x);
|
||||||
nodes_.insert(x);
|
nodes_.insert(x);
|
||||||
nodes_.insert(y);
|
nodes_.insert(y);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tune::init_c_phi(ir::instruction *v) {
|
void grids::init_c_phi(ir::instruction *v) {
|
||||||
// Phi Nodes: all the incoming value share the result layout
|
// Phi Nodes: all the incoming value share the result layout
|
||||||
if(auto *phi = dynamic_cast<ir::phi_node*>(v))
|
if(auto *phi = dynamic_cast<ir::phi_node*>(v))
|
||||||
for(ir::value *op: phi->ops())
|
for(ir::value *op: phi->ops())
|
||||||
@@ -50,7 +50,7 @@ void tune::init_c_phi(ir::instruction *v) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void tune::init_c_graph(ir::instruction *v) {
|
void grids::init_c_graph(ir::instruction *v) {
|
||||||
// Reference shape
|
// Reference shape
|
||||||
ir::type::tile_shapes_t::value_type one = ir::tile_type::make_one(v->get_parent()->get_context());
|
ir::type::tile_shapes_t::value_type one = ir::tile_type::make_one(v->get_parent()->get_context());
|
||||||
ir::type::tile_shapes_t shapes;
|
ir::type::tile_shapes_t shapes;
|
||||||
@@ -142,7 +142,7 @@ void tune::init_c_graph(ir::instruction *v) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tune::fragment_t tune::get_fragmentation_type(node_t x, graph_t &graph){
|
grids::fragment_t grids::get_fragmentation_type(node_t x, graph_t &graph){
|
||||||
std::list<node_t> work;
|
std::list<node_t> work;
|
||||||
std::set<node_t> seen;
|
std::set<node_t> seen;
|
||||||
work.push_back(x);
|
work.push_back(x);
|
||||||
@@ -160,7 +160,7 @@ tune::fragment_t tune::get_fragmentation_type(node_t x, graph_t &graph){
|
|||||||
return STRIDED_SCAN;
|
return STRIDED_SCAN;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tune::connected_components(node_t x, const std::vector<ir::metaparameter *> mps, const std::vector<std::string> prefixes, std::set<node_t> &nodes, graph_t &graph, unsigned group_id) {
|
void grids::connected_components(node_t x, const std::vector<ir::metaparameter *> mps, const std::vector<std::string> prefixes, std::set<node_t> &nodes, graph_t &graph, unsigned group_id) {
|
||||||
// std::cout << "connected component: " << x.first->get_name() << " " << x.second << std::endl;
|
// std::cout << "connected component: " << x.first->get_name() << " " << x.second << std::endl;
|
||||||
groups_[x.first].insert({x.second, group_id});
|
groups_[x.first].insert({x.second, group_id});
|
||||||
if(nodes.find(x) != nodes.end()){
|
if(nodes.find(x) != nodes.end()){
|
||||||
@@ -183,20 +183,20 @@ void tune::connected_components(node_t x, const std::vector<ir::metaparameter *>
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned tune::get_param_group(ir::value *value, unsigned ax) {
|
unsigned grids::get_param_group(ir::value *value, unsigned ax) {
|
||||||
unsigned result = groups_.at(value).at(ax);
|
unsigned result = groups_.at(value).at(ax);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: This shouldn't exist!
|
//TODO: This shouldn't exist!
|
||||||
void tune::copy(ir::value *dst, ir::value *src) {
|
void grids::copy(ir::value *dst, ir::value *src) {
|
||||||
params_[dst] = params_[src];
|
params_[dst] = params_[src];
|
||||||
groups_[dst] = groups_[src];
|
groups_[dst] = groups_[src];
|
||||||
fragments_[{dst, 0}] = fragments_[{src, 0}];
|
fragments_[{dst, 0}] = fragments_[{src, 0}];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void tune::run(ir::module &mod) {
|
void grids::run(ir::module &mod) {
|
||||||
ir::context &ctx = mod.get_context();
|
ir::context &ctx = mod.get_context();
|
||||||
// Create metaparameters
|
// Create metaparameters
|
||||||
for(ir::function *fn: mod.get_function_list()){
|
for(ir::function *fn: mod.get_function_list()){
|
||||||
@@ -318,7 +318,7 @@ void tune::run(ir::module &mod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void tune::create_grids(std::vector<ir::value*> &grids,
|
void grids::create_grids(std::vector<ir::value*> &grids,
|
||||||
std::map<unsigned, ir::value*> &references,
|
std::map<unsigned, ir::value*> &references,
|
||||||
ir::function *fn) {
|
ir::function *fn) {
|
||||||
// get number of dimensions greater than 1
|
// get number of dimensions greater than 1
|
||||||
@@ -363,11 +363,7 @@ void tune::create_grids(std::vector<ir::value*> &grids,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool tune::check_constraints(std::map<ir::value *, std::vector<std::string>> &errors) {
|
unsigned grids::get_num_threads() {
|
||||||
return errors.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned tune::get_num_threads() {
|
|
||||||
return num_warps_*32;
|
return num_warps_*32;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -573,7 +573,7 @@ inline void to_warps(const std::vector<unsigned> &bs, std::vector<unsigned> &nw,
|
|||||||
void selection::init_axes(ir::value *v, IRBuilder<> &builder, Value *u_thread_id, Value *u_warp_id) {
|
void selection::init_axes(ir::value *v, IRBuilder<> &builder, Value *u_thread_id, Value *u_warp_id) {
|
||||||
const auto& shapes = v->get_type()->get_tile_shapes();
|
const auto& shapes = v->get_type()->get_tile_shapes();
|
||||||
size_t dim = shapes.size();
|
size_t dim = shapes.size();
|
||||||
if(params_->get_fragment(v, 0) == analysis::tune::STRIDED_SCAN){
|
if(params_->get_fragment(v, 0) == analysis::grids::STRIDED_SCAN){
|
||||||
std::vector<unsigned> contiguous(dim);
|
std::vector<unsigned> contiguous(dim);
|
||||||
std::vector<unsigned> block_size(dim);
|
std::vector<unsigned> block_size(dim);
|
||||||
std::vector<unsigned> warp_size(dim);
|
std::vector<unsigned> warp_size(dim);
|
||||||
@@ -1278,7 +1278,7 @@ void selection::lower_dot(ir::dot_inst *dot, LLVMContext &ctx, Function *fn, IRB
|
|||||||
if(NK != 1) {
|
if(NK != 1) {
|
||||||
shared_tile *TA = (shared_tile*)tmap_.at(A);
|
shared_tile *TA = (shared_tile*)tmap_.at(A);
|
||||||
shared_tile *TB = (shared_tile*)tmap_.at(B);
|
shared_tile *TB = (shared_tile*)tmap_.at(B);
|
||||||
if(params_->get_fragment(dot, 0) == analysis::tune::STRIDED_SCAN)
|
if(params_->get_fragment(dot, 0) == analysis::grids::STRIDED_SCAN)
|
||||||
lower_scanline_dot(dot, ctx, fn, builder, TC, TA, TB, TD, NK, c_ty, f_mul_add);
|
lower_scanline_dot(dot, ctx, fn, builder, TC, TA, TB, TD, NK, c_ty, f_mul_add);
|
||||||
else
|
else
|
||||||
lower_hmma_dot(dot, ctx, fn, builder, TC, TA, TB, TD, NK);
|
lower_hmma_dot(dot, ctx, fn, builder, TC, TA, TB, TD, NK);
|
||||||
|
@@ -155,7 +155,7 @@ ir::value *reassociate::reassociate_idx(ir::value *old_value,
|
|||||||
return new_value;
|
return new_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
reassociate::reassociate(analysis::tune* params)
|
reassociate::reassociate(analysis::grids* params)
|
||||||
: params_(params)
|
: params_(params)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
|
@@ -147,7 +147,7 @@ options function::autotune(lang::translation_unit *ast, driver::stream* stream,
|
|||||||
double ts;
|
double ts;
|
||||||
std::vector<unsigned> params;
|
std::vector<unsigned> params;
|
||||||
};
|
};
|
||||||
profile_t best = { INFINITY };
|
profile_t best = { INFINITY, {} };
|
||||||
std::function<void(std::vector<unsigned>)> benchmark =
|
std::function<void(std::vector<unsigned>)> benchmark =
|
||||||
[&](std::vector<unsigned> params) {
|
[&](std::vector<unsigned> params) {
|
||||||
// options
|
// options
|
||||||
@@ -184,7 +184,7 @@ std::unique_ptr<driver::module> function::make_bin(ir::module &module, driver::c
|
|||||||
if(auto* mp = dynamic_cast<ir::metaparameter*>(module.globals().at(x.first)))
|
if(auto* mp = dynamic_cast<ir::metaparameter*>(module.globals().at(x.first)))
|
||||||
mp->set_value(x.second);
|
mp->set_value(x.second);
|
||||||
// create passes
|
// create passes
|
||||||
codegen::analysis::tune tune(opt.num_warps);
|
codegen::analysis::grids tune(opt.num_warps);
|
||||||
codegen::analysis::shmem::info shmem_info;
|
codegen::analysis::shmem::info shmem_info;
|
||||||
codegen::analysis::shmem::liveness shmem_liveness(&shmem_info);
|
codegen::analysis::shmem::liveness shmem_liveness(&shmem_info);
|
||||||
codegen::analysis::shmem::allocation shmem_allocation(&shmem_liveness, &shmem_info, &tune);
|
codegen::analysis::shmem::allocation shmem_allocation(&shmem_liveness, &shmem_info, &tune);
|
||||||
|
@@ -74,49 +74,118 @@ inline std::unique_ptr<ir::module> make_ir(ir::context& ctx, triton::lang::trans
|
|||||||
return std::unique_ptr<ir::module>(module);
|
return std::unique_ptr<ir::module>(module);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void gen_extract_inputs(std::ostream &os, const std::vector<ir::argument*>& args) {
|
||||||
|
for(unsigned i = 0; i < args.size(); i++){
|
||||||
|
ir::value *arg = args[i];
|
||||||
|
std::string suffix = "";
|
||||||
|
ir::type *tr_ty = arg->get_type();
|
||||||
|
std::string tf_ty = ref_to_tf_ty(tr_ty);
|
||||||
|
if(!tr_ty->is_pointer_ty())
|
||||||
|
suffix = ".scalar<" + tf_ty + ">()()";
|
||||||
|
os << " " << tf_ty << " " << arg->get_name() << " = context->input(" << i << ")" << suffix << ";\n ";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void gen_set_outputs(std::ostream &os, const std::vector<std::string>& outputs) {
|
||||||
|
for(unsigned i = 0; i < outputs.size(); i++)
|
||||||
|
os << " context->set_output(" << i << ", " << outputs[i] << ");\n ";
|
||||||
|
}
|
||||||
|
|
||||||
|
void gen_make_handles(std::ostream &os, const std::vector<ir::argument*>& args) {
|
||||||
|
for(unsigned i = 0; i < args.size(); i++){
|
||||||
|
ir::argument *arg = args[i];
|
||||||
|
if(!arg->get_type()->is_pointer_ty())
|
||||||
|
continue;
|
||||||
|
const std::string& name = arg->get_name();
|
||||||
|
os << " drv::cu_buffer cu_" + name + "(ctx, " + name + ".tensor_data().size(), (CUdeviceptr)" + name + ".tensor_data().data(), false);\n ";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void gen_make_spmd_grid(std::ostream &os, const std::vector<std::string>& macros) {
|
||||||
|
std::regex regex("#([a-zA-Z]([a-zA-Z]|[0-9])*)");
|
||||||
|
std::vector<std::string> grids = macros;
|
||||||
|
for(size_t i = grids.size(); i < 3; i++)
|
||||||
|
grids.push_back("1");
|
||||||
|
std::string grid = "rt::grid_t{";
|
||||||
|
for(size_t i = 0; i < grids.size(); i++){
|
||||||
|
if(i > 0)
|
||||||
|
grid += ", ";
|
||||||
|
grid += std::regex_replace(grids[i], regex, "x.at(\"$1\")");
|
||||||
|
}
|
||||||
|
grid += "}";
|
||||||
|
|
||||||
|
os << " auto grid = [&](const rt::params_t& x) { return " << grid << "; };\n ";
|
||||||
|
}
|
||||||
|
|
||||||
|
void gen_make_launch_function(std::ostream &os, const std::vector<ir::argument*>& args) {
|
||||||
|
os << " fn_({";
|
||||||
|
for(unsigned i = 0; i < args.size() ; i++){
|
||||||
|
ir::argument *arg = args[i];
|
||||||
|
std::string name = arg->get_name();
|
||||||
|
if(arg->get_type()->is_pointer_ty())
|
||||||
|
name = "&cu_" + name;
|
||||||
|
if(i > 0)
|
||||||
|
os << ", ";
|
||||||
|
os << name;
|
||||||
|
}
|
||||||
|
os << "}, grid, stream); \n";
|
||||||
|
}
|
||||||
|
|
||||||
|
void gen_register_kernel_builder(std::ostream &os, const std::string &name,
|
||||||
|
const std::string &classname,
|
||||||
|
const std::vector<ir::argument*>& args){
|
||||||
|
os << "REGISTER_KERNEL_BUILDER(Name(\"" + name + "\").Device(DEVICE_GPU)";
|
||||||
|
for(size_t i = 0; i < args.size(); i++){
|
||||||
|
ir::argument *arg = args[i];
|
||||||
|
std::string name = arg->get_name();
|
||||||
|
auto tolower = [](char c) { return std::tolower(c);};
|
||||||
|
std::transform(name.begin(), name.end(), name.begin(), tolower);
|
||||||
|
if(!arg->get_type()->is_pointer_ty())
|
||||||
|
os << ".HostMemory(\"" + name + "\")";
|
||||||
|
}
|
||||||
|
os << ", " + classname << ");\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
void gen_register_op(std::ostream &os, const std::string &name,
|
||||||
|
const std::vector<ir::argument*>& args,
|
||||||
|
const std::vector<std::string>& outputs){
|
||||||
|
os << "REGISTER_OP(\"" << name << "\")\n";
|
||||||
|
for(size_t i = 0; i < args.size(); i++){
|
||||||
|
ir::argument *arg = args[i];
|
||||||
|
std::string name = arg->get_name();
|
||||||
|
auto tolower = [](char c) { return std::tolower(c);};
|
||||||
|
std::transform(name.begin(), name.end(), name.begin(), tolower);
|
||||||
|
os << " .Input(\"" << name << ": " << to_tf_scalar_ty(arg->get_type()) << "\")\n";
|
||||||
|
}
|
||||||
|
for(size_t i = 0; i < outputs.size(); i++){
|
||||||
|
std::string name = outputs[i];
|
||||||
|
size_t idx;
|
||||||
|
for(idx = 0; idx < args.size(); idx++)
|
||||||
|
if(args[idx]->get_name() == name)
|
||||||
|
break;
|
||||||
|
if(idx == args.size())
|
||||||
|
throw std::runtime_error("unknown output");
|
||||||
|
os << " .Output(\"out" << i << ": " << to_tf_scalar_ty(args[idx]->get_type()) << "\")\n";
|
||||||
|
}
|
||||||
|
os << ";\n";
|
||||||
|
}
|
||||||
|
|
||||||
std::string make_tensorflow_src(const std::string src,
|
std::string make_tensorflow_src(const std::string src,
|
||||||
const std::vector<std::string>& outputs,
|
const std::vector<std::string>& outputs,
|
||||||
const std::vector<std::string>& macros) {
|
const std::vector<std::string>& macros) {
|
||||||
triton::lang::translation_unit *ast = make_ast(src.c_str());
|
triton::lang::translation_unit *ast = make_ast(src.c_str());
|
||||||
triton::ir::context context;
|
triton::ir::context context;
|
||||||
std::unique_ptr<ir::module> ir = make_ir(context, ast);
|
std::unique_ptr<ir::module> ir = make_ir(context, ast);
|
||||||
// extract function signature
|
// function
|
||||||
ir::function* fn = ir->get_function_list().front();
|
ir::function* fn = ir->get_function_list().front();
|
||||||
ir::function_type* fn_ty = fn->get_fn_type();
|
|
||||||
// numberof arguments
|
|
||||||
size_t n_args = fn_ty->get_num_params();
|
|
||||||
size_t n_outputs = outputs.size();
|
|
||||||
// extract function name
|
|
||||||
std::string name = fn->get_name();
|
std::string name = fn->get_name();
|
||||||
name[0] = static_cast<char>(std::toupper(name[0]));
|
name[0] = static_cast<char>(std::toupper(name[0]));
|
||||||
std::string classname = name + "Op";
|
std::string classname = name + "Op";
|
||||||
// extract argument name
|
|
||||||
std::vector<std::string> arg_names;
|
|
||||||
for(ir::argument *arg: fn->args())
|
|
||||||
arg_names.push_back(arg->get_name());
|
|
||||||
// cached int to str
|
|
||||||
std::vector<std::string> str_i;
|
|
||||||
for(size_t i = 0; i < fn_ty->get_num_params(); i++)
|
|
||||||
str_i.push_back(std::to_string(i));
|
|
||||||
// index of tensors
|
|
||||||
std::vector<size_t> ptr_idx;
|
|
||||||
for(unsigned i = 0; i < fn_ty->get_num_params(); i++)
|
|
||||||
if(fn_ty->get_param_ty(i)->is_pointer_ty())
|
|
||||||
ptr_idx.push_back(i);
|
|
||||||
// extract tensorflow types
|
|
||||||
std::vector<std::string> tf_scalar_tys;
|
|
||||||
std::transform(fn_ty->params_begin(), fn_ty->params_end(), std::back_inserter(tf_scalar_tys), to_tf_scalar_ty);
|
|
||||||
std::vector<std::string> tf_cref_tys;
|
|
||||||
std::transform(fn_ty->params_begin(), fn_ty->params_end(), std::back_inserter(tf_cref_tys), ref_to_tf_ty);
|
|
||||||
// output indices
|
|
||||||
std::vector<long> out_idx;
|
|
||||||
for(const std::string &name : outputs){
|
|
||||||
auto it = std::find(arg_names.begin(), arg_names.end(), name);
|
|
||||||
out_idx.push_back(std::distance(arg_names.begin(), it));
|
|
||||||
}
|
|
||||||
std::ostringstream oss;
|
std::ostringstream oss;
|
||||||
|
|
||||||
std::string result = R"(
|
oss << R"(
|
||||||
#include "triton/driver/buffer.h"
|
#include "triton/driver/buffer.h"
|
||||||
#include "triton/driver/backend.h"
|
#include "triton/driver/backend.h"
|
||||||
#include "triton/driver/stream.h"
|
#include "triton/driver/stream.h"
|
||||||
@@ -138,106 +207,52 @@ namespace drv = triton::driver;
|
|||||||
|
|
||||||
std::string src = R"TTKERNSRC( )" + src + ")TTKERNSRC\";" + R"(
|
std::string src = R"TTKERNSRC( )" + src + ")TTKERNSRC\";" + R"(
|
||||||
|
|
||||||
class )" + classname + R"(: public OpKernel {
|
class )" << classname << R"(: public OpKernel {
|
||||||
public:
|
public:
|
||||||
explicit )" + classname + R"((OpKernelConstruction* context)
|
explicit )" << classname << R"((OpKernelConstruction* context)
|
||||||
: OpKernel(context), fn_(src) { }
|
: OpKernel(context), fn_(src) { }
|
||||||
|
|
||||||
void Compute(OpKernelContext* context){
|
void Compute(OpKernelContext* context){
|
||||||
|
|
||||||
// get device/stream
|
// get device/stream
|
||||||
GPUDevice device = context->eigen_device<GPUDevice>();
|
GPUDevice device = context->eigen_device<GPUDevice>();
|
||||||
drv::cu_stream sstream(device.stream(), false);
|
drv::cu_stream sstream(device.stream(), false);
|
||||||
drv::context* ctx = sstream.context();
|
drv::context* ctx = sstream.context();
|
||||||
drv::stream* stream = &sstream;
|
drv::stream* stream = &sstream;
|
||||||
|
// extract inputs
|
||||||
// extract inputs)";
|
|
||||||
for(unsigned i = 0; i < n_args; i++){
|
|
||||||
std::string suffix = "";
|
|
||||||
std::string ty = tf_cref_tys[i];
|
|
||||||
if(!fn_ty->get_param_ty(i)->is_pointer_ty())
|
|
||||||
suffix = ".scalar<" + ty + ">()()";
|
|
||||||
result += R"(
|
|
||||||
)" + ty + " " + arg_names[i] + " = context->input(" + str_i[i] + ")" + suffix + ";";
|
|
||||||
}
|
|
||||||
|
|
||||||
result += R"(
|
|
||||||
|
|
||||||
// extract outputs)";
|
|
||||||
for(unsigned i = 0; i < n_outputs; i++)
|
|
||||||
result += R"(
|
|
||||||
context->set_output()" + str_i[i] + ", " + outputs[i] + ");";
|
|
||||||
|
|
||||||
result += R"(
|
|
||||||
|
|
||||||
// wrap tensors)";
|
|
||||||
for(size_t i: ptr_idx)
|
|
||||||
result += R"(
|
|
||||||
drv::cu_buffer cu_)" + arg_names[i] + "(ctx, " + arg_names[i] + ".tensor_data().size(), (CUdeviceptr)" + arg_names[i] + R"(.tensor_data().data(), false);)";
|
|
||||||
|
|
||||||
|
|
||||||
std::regex regex("#([a-zA-Z]([a-zA-Z]|[0-9])*)");
|
|
||||||
std::vector<std::string> grids = macros;
|
|
||||||
for(size_t i = grids.size(); i < 3; i++)
|
|
||||||
grids.push_back("1");
|
|
||||||
std::string grid = "rt::grid_t{";
|
|
||||||
for(size_t i = 0; i < grids.size(); i++){
|
|
||||||
if(i > 0)
|
|
||||||
grid += ", ";
|
|
||||||
grid += std::regex_replace(grids[i], regex, "x.at(\"$1\")");
|
|
||||||
}
|
|
||||||
grid += "}";
|
|
||||||
|
|
||||||
result += R"(
|
|
||||||
|
|
||||||
// create launch grid;
|
|
||||||
auto grid = [&](const rt::params_t& x) { return )" + grid + R"(; };)";
|
|
||||||
|
|
||||||
result += R"(
|
|
||||||
|
|
||||||
// execute function
|
|
||||||
fn_({
|
|
||||||
)";
|
)";
|
||||||
for(unsigned i = 0; i < n_args; i++){
|
gen_extract_inputs(oss, fn->args());
|
||||||
std::string arg = arg_names[i];
|
oss << R"(
|
||||||
if(fn_ty->get_param_ty(i)->is_pointer_ty())
|
// set outputs
|
||||||
arg = "&cu_" + arg;
|
)";
|
||||||
if(i > 0)
|
gen_set_outputs(oss, outputs);
|
||||||
result += ", ";
|
oss << R"(
|
||||||
result += arg;
|
// wrap tensors
|
||||||
}
|
)";
|
||||||
result += R"(
|
gen_make_handles(oss, fn->args());
|
||||||
}, grid, stream);
|
oss << R"(
|
||||||
|
// create spmd grid
|
||||||
|
)";
|
||||||
|
gen_make_spmd_grid(oss, macros);
|
||||||
|
oss << R"(
|
||||||
|
// launch function
|
||||||
|
)";
|
||||||
|
gen_make_launch_function(oss, fn->args());
|
||||||
|
oss << R"(
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
rt::function fn_;
|
rt::function fn_;
|
||||||
};
|
};
|
||||||
|
|
||||||
REGISTER_KERNEL_BUILDER(Name(")" + name + "\").Device(DEVICE_GPU)";
|
// register kernel builder
|
||||||
for(size_t i = 0; i < tf_scalar_tys.size(); i++){
|
)";
|
||||||
std::string arg_name = arg_names[i];
|
gen_register_kernel_builder(oss, name, classname, fn->args());
|
||||||
std::transform(arg_name.begin(), arg_name.end(), arg_name.begin(), [](char c) { return std::tolower(c);});
|
oss << R"(
|
||||||
if(!fn_ty->get_param_ty(i)->is_pointer_ty())
|
// register op
|
||||||
result += ".HostMemory(\"" + arg_name + "\")";
|
)";
|
||||||
}
|
gen_register_op(oss, name, fn->args(), outputs);
|
||||||
result += ", " + classname + R"();
|
|
||||||
|
|
||||||
|
return oss.str();
|
||||||
REGISTER_OP(")" + name + "\")\n";
|
|
||||||
for(size_t i = 0; i < tf_scalar_tys.size(); i++){
|
|
||||||
std::string arg_name = arg_names[i];
|
|
||||||
std::transform(arg_name.begin(), arg_name.end(), arg_name.begin(), [](char c) { return std::tolower(c);});
|
|
||||||
result += " .Input(\"" + arg_name + ": " + tf_scalar_tys[i] + "\")\n";
|
|
||||||
}
|
|
||||||
for(size_t i = 0; i < outputs.size(); i++){
|
|
||||||
result += " .Output(\"out" + std::to_string(i) + ": " + tf_scalar_tys[out_idx[i]] + "\")\n";
|
|
||||||
}
|
|
||||||
result += ";\n";
|
|
||||||
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user