Files
triton/python/src/tensorflow.cc

481 lines
14 KiB
C++
Raw Normal View History

2019-08-25 21:26:09 -07:00
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
2019-08-25 21:26:09 -07:00
#include <pybind11/functional.h>
#include <string>
#include <regex>
#include <algorithm>
#include "triton/codegen/selection.h"
#include "triton/runtime/function.h"
2019-08-25 21:26:09 -07:00
#include "triton/lang/code_gen.h"
#include "triton/lang/parser.h"
#include "triton/lang/cpp.h"
#include "triton/driver/device.h"
#include "triton/driver/stream.h"
#include "triton/driver/kernel.h"
#include "triton/driver/module.h"
#include "triton/ir/module.h"
#include "triton/ir/function.h"
#include "triton/tools/bench.hpp"
using namespace triton;
2019-08-25 21:26:09 -07:00
namespace rt = triton::runtime;
/* TF triton op properties */
2019-08-26 19:25:58 -07:00
std::map<size_t, std::shared_ptr<rt::function::grid_fn_ty>> id_grid_map;
std::map<size_t, std::shared_ptr<rt::function>> id_fn_map;
std::map<size_t, int64_t> i64scalar_map;
2019-08-25 21:26:09 -07:00
void register_grid(size_t id,
const rt::function::grid_fn_ty& grid_fn) {
2019-08-26 19:25:58 -07:00
id_grid_map[id].reset(new rt::function::grid_fn_ty(grid_fn));
2019-08-25 21:26:09 -07:00
}
void delete_grid(size_t id) {
id_grid_map.erase(id);
std::cout << "deleted " << id_grid_map.size() << std::endl;
}
2019-08-26 17:21:09 -07:00
void register_fn(size_t id,
const std::string& src,
2019-08-25 21:26:09 -07:00
const rt::function::options_space_t& opt) {
2019-08-26 19:25:58 -07:00
id_fn_map[id].reset(new rt::function(src, opt));
2019-08-26 17:21:09 -07:00
}
void delete_fn(size_t id) {
id_fn_map.erase(id);
std::cout << "deleted " << id_fn_map.size() << std::endl;
}
void cleanup() {
id_grid_map.clear();
id_fn_map.clear();
i64scalar_map.clear();
}
2019-08-26 17:21:09 -07:00
size_t make_op_id() {
return id_fn_map.size();
2019-08-25 21:26:09 -07:00
}
size_t make_scalar_id() {
return i64scalar_map.size();
}
bool has_scalar(size_t id) {
return i64scalar_map.find(id) != i64scalar_map.end();
}
int64_t retrieve_scalar(size_t id) {
return i64scalar_map.at(id);
}
2019-08-25 21:26:09 -07:00
/* TF source-code generation */
inline std::string to_tf_ty(ir::type *ty) {
if(ty->is_integer_ty(1))
return "bool";
if(ty->is_integer_ty(8))
return "int8";
if(ty->is_integer_ty(16))
return "int16";
if(ty->is_integer_ty(32))
return "int32";
if(ty->is_integer_ty(64))
return "int64";
if(ty->is_half_ty())
return "float16";
if(ty->is_float_ty())
return "float32";
if(ty->is_double_ty())
return "float64";
if(ty->is_pointer_ty())
return "Tensor";
throw std::runtime_error("unknown type");
}
inline std::string to_tf_scalar_ty(ir::type *ty) {
if(ty->is_pointer_ty())
return to_tf_ty(ty->get_pointer_element_ty());
else {
return to_tf_ty(ty);
}
}
inline std::string ref_to_tf_ty(ir::type *ty) {
std::string res = to_tf_ty(ty);
if(ty->is_pointer_ty())
res = "const " + res + "&";
return res;
}
void gen_extract_inputs(std::ostream &os, const std::vector<ir::argument*>& args) {
for(unsigned i = 0; i < args.size(); i++){
ir::value *arg = args[i];
std::string suffix = "";
ir::type *tr_ty = arg->get_type();
std::string tf_ty = ref_to_tf_ty(tr_ty);
if(!tr_ty->is_pointer_ty())
suffix = ".scalar<" + tf_ty + ">()()";
os << " " << tf_ty << " " << arg->get_name() << " = context->input(" << i << ")" << suffix << ";\n ";
}
}
void gen_set_outputs(std::ostream &os, const std::vector<std::string>& outputs) {
for(unsigned i = 0; i < outputs.size(); i++)
os << " context->set_output(" << i << ", " << outputs[i] << ");\n ";
}
void gen_make_handles(std::ostream &os, const std::vector<ir::argument*>& args) {
for(unsigned i = 0; i < args.size(); i++){
ir::argument *arg = args[i];
if(!arg->get_type()->is_pointer_ty())
continue;
const std::string& name = arg->get_name();
os << " drv::cu_buffer cu_" + name + "(ctx, " + name + ".tensor_data().size(), (CUdeviceptr)" + name + ".tensor_data().data(), false);\n ";
}
}
void gen_make_launch_function(std::ostream &os, const std::vector<ir::argument*>& args) {
2019-08-25 21:26:09 -07:00
os << " (*id_fn_map.at(id_))({";
for(unsigned i = 0; i < args.size() ; i++){
ir::argument *arg = args[i];
std::string name = arg->get_name();
if(arg->get_type()->is_pointer_ty())
name = "&cu_" + name;
if(i > 0)
os << ", ";
os << name;
}
2019-08-26 19:25:58 -07:00
os << "}, *id_grid_map.at(id_), stream); \n";
}
void gen_tf_register_kernel_builder(std::ostream &os, const std::string &name,
2019-08-19 20:56:39 -07:00
const std::string &opname,
const std::vector<ir::argument*>& args){
os << "REGISTER_KERNEL_BUILDER(Name(\"" + name + "\").Device(DEVICE_GPU)";
for(size_t i = 0; i < args.size(); i++){
ir::argument *arg = args[i];
std::string name = arg->get_name();
auto tolower = [](char c) { return std::tolower(c);};
std::transform(name.begin(), name.end(), name.begin(), tolower);
if(!arg->get_type()->is_pointer_ty())
os << ".HostMemory(\"" + name + "\")";
}
2019-08-19 20:56:39 -07:00
os << ", " + opname << ");\n";
}
void gen_tf_register_op(std::ostream &os, const std::string &name,
const std::vector<ir::argument*>& args,
const std::vector<std::string>& outputs){
os << "REGISTER_OP(\"" << name << "\")\n";
for(size_t i = 0; i < args.size(); i++){
ir::argument *arg = args[i];
std::string name = arg->get_name();
auto tolower = [](char c) { return std::tolower(c);};
std::transform(name.begin(), name.end(), name.begin(), tolower);
os << " .Attr(\"T" << i << " : {bool, int8, int16, int32, int64, float16, float32, float64}\")" << std::endl;
os << " .Input(\"" << name << ": T" << i << "\")\n";
}
for(size_t i = 0; i < outputs.size(); i++){
std::string name = outputs[i];
size_t idx;
for(idx = 0; idx < args.size(); idx++)
if(args[idx]->get_name() == name)
break;
if(idx == args.size())
throw std::runtime_error("unknown output");
os << " .Output(\"out" << i << ": T" << idx << "\")\n";
}
2019-08-25 21:26:09 -07:00
os << " .Attr(\"id: int\")" << std::endl;
os << ";\n";
}
2019-08-25 21:26:09 -07:00
inline std::string preheader() {
return
R"(
#define bool _Bool
#define true 1
#define false 0
#define __bool_true_false_are_defined 1
#define __readonly __attribute__((readonly))
#define __writeonly __attribute__((writeonly))
#define __noalias __attribute__((noalias))
#define __aligned(A) __attribute__((aligned(A)))
#define __multipleof(A) __attribute__((multipleof(A)))
extern int get_program_id(int);
)";
}
void make_module(const std::string& src, ir::module* ir,
const runtime::function::options_space_t& opt) {
std::string copy = preheader() + src;
2019-08-25 21:26:09 -07:00
// pre-process
TokenSequence tokens;
Preprocessor cpp(&copy, true);
2019-08-25 21:26:09 -07:00
for(auto it: opt.defines){
cpp.AddMacro(it.first, &it.second[0]);
}
cpp.Process(tokens);
// parse
Parser parser(tokens);
parser.Parse();
Generator gen(&parser);
gen.Gen(ir);
}
std::tuple<std::string,
std::string> make_tensorflow_src(const std::string& src,
const std::vector<std::string>& outputs,
const runtime::function::options_space_t& opt)
{
2019-08-25 21:26:09 -07:00
// triton-ir code-gen
ir::context ctx;
2019-08-26 19:25:58 -07:00
auto ir = std::shared_ptr<ir::module>(new ir::module("", ctx));
make_module(src, &*ir, opt);
// function
ir::function* fn = ir->get_function_list().front();
std::string name = fn->get_name();
2019-08-25 21:26:09 -07:00
std::string cc_name = name;
cc_name[0] = static_cast<char>(std::toupper(cc_name[0]));
std::string opname = cc_name + "Op";
std::ostringstream oss;
oss << R"(
#include "triton/driver/buffer.h"
#include "triton/driver/backend.h"
#include "triton/driver/stream.h"
#include "triton/runtime/function.h"
#define EIGEN_USE_GPU
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/op_kernel.h"
using namespace tensorflow;
using GPUDevice = Eigen::GpuDevice;
namespace rt = triton::runtime;
namespace drv = triton::driver;
2019-08-26 19:25:58 -07:00
extern std::map<size_t, std::shared_ptr<rt::function::grid_fn_ty>> id_grid_map;
extern std::map<size_t, std::shared_ptr<rt::function>> id_fn_map;
2019-08-25 21:26:09 -07:00
2019-08-19 20:56:39 -07:00
class )" << opname << R"(: public OpKernel {
public:
2019-08-19 20:56:39 -07:00
explicit )" << opname << R"((OpKernelConstruction* context)
2019-08-25 21:26:09 -07:00
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("id", &id_));
}
void Compute(OpKernelContext* context){
// get device/stream
GPUDevice device = context->eigen_device<GPUDevice>();
drv::cu_stream sstream(device.stream(), false);
drv::context* ctx = sstream.context();
drv::stream* stream = &sstream;
// extract inputs
)";
gen_extract_inputs(oss, fn->args());
oss << R"(
// set outputs
)";
gen_set_outputs(oss, outputs);
oss << R"(
// wrap tensors
)";
gen_make_handles(oss, fn->args());
oss << R"(
)";
oss << R"(
// launch function
)";
gen_make_launch_function(oss, fn->args());
oss << R"(
}
private:
2019-08-25 21:26:09 -07:00
int id_;
};
// register kernel builder
)";
gen_tf_register_kernel_builder(oss, cc_name, opname, fn->args());
oss << R"(
// register op
)";
gen_tf_register_op(oss, cc_name, fn->args(), outputs);
2019-08-26 19:25:58 -07:00
2019-08-25 21:26:09 -07:00
return {oss.str(), name};
}
inline std::string to_torch_ty(ir::type *ty) {
if(ty->is_integer_ty(1))
return "bool";
if(ty->is_integer_ty(8))
return "int8";
if(ty->is_integer_ty(16))
return "int16";
if(ty->is_integer_ty(32))
return "int32";
if(ty->is_integer_ty(64))
return "int64";
if(ty->is_half_ty())
return "float16";
if(ty->is_float_ty())
return "float32";
if(ty->is_double_ty())
return "float64";
if(ty->is_pointer_ty())
return "Tensor";
throw std::runtime_error("unknown type");
}
void gen_torch_signature(std::ostringstream& oss,
ir::function* fn,
const std::vector<std::string>& outputs,
const std::string& name) {
const auto& args = fn->args();
std::vector<ir::type*> out_types;
for(const std::string& out: outputs) {
auto it = std::find_if(args.begin(), args.end(),
[&](ir::argument* arg) { return arg->get_name() == out; });
if(it == args.end())
throw std::runtime_error("unknown argument");
out_types.push_back((*it)->get_type());
}
oss << "std::tuple<";
for(size_t i = 0; i < out_types.size(); i++){
if(i > 0)
oss << ", ";
oss << to_torch_ty(out_types[i]);
}
oss << "> ";
oss << name << "(";
oss << "int64 id" << std::endl;
for(size_t i = 0; i < args.size(); i++) {
ir::argument* arg = args[i];
if(i > 0)
oss << ", ";
oss << to_torch_ty(arg->get_type()) << " " << arg->get_name();
}
oss << ")";
}
void gen_torch_init_driver(std::ostringstream &oss) {
oss << " // Wrap CUDA handles" << std::endl;
oss << " c10::DeviceIndex device = torcha.storage().device().index();" << std::endl;
oss << " // Get stream" << std::endl;
oss << " CUstream custream = (CUstream)at::cuda::getCurrentCUDAStream(device).stream();" << std::endl;
oss << " triton::driver::cu_stream stream(custream, false);" << std::endl;
oss << " triton::driver::context* ctx = stream.context();" << std::endl;
}
void gen_torch_make_handles(std::ostream &os,
const std::vector<ir::argument*>& args) {
for(unsigned i = 0; i < args.size(); i++){
ir::argument *arg = args[i];
if(!arg->get_type()->is_pointer_ty())
continue;
const std::string& name = arg->get_name();
os << " drv::cu_buffer cu_" + name + "(ctx, " + name + ".storage().size(), (CUdeviceptr)" + name + ".storage.data(), false);\n ";
}
}
void gen_torch_make_launch_function(std::ostream &os, const std::vector<ir::argument*>& args) {
os << " (*id_fn_map.at(id))({";
for(unsigned i = 0; i < args.size() ; i++){
ir::argument *arg = args[i];
std::string name = arg->get_name();
if(arg->get_type()->is_pointer_ty())
name = "&cu_" + name;
if(i > 0)
os << ", ";
os << name;
}
os << "}, *id_grid_map.at(id), stream); \n";
}
std::tuple<std::string,
std::string> make_pytorch_src(const std::string& src,
const std::vector<std::string>& outputs,
const runtime::function::options_space_t& opt) {
// triton-ir code-gen
ir::context ctx;
auto ir = std::shared_ptr<ir::module>(new ir::module("", ctx));
make_module(src, &*ir, opt);
// function
ir::function* fn = ir->get_function_list().front();
std::string name = fn->get_name();
// generate framework code
std::ostringstream oss;
oss << R"(
#include "triton/driver/buffer.h"
#include "triton/driver/backend.h"
#include "triton/driver/stream.h"
#include "triton/runtime/function.h"
namespace rt = triton::runtime;
namespace drv = triton::driver;
extern std::map<size_t, std::shared_ptr<rt::function::grid_fn_ty>> id_grid_map;
extern std::map<size_t, std::shared_ptr<rt::function>> id_fn_map;
)";
gen_torch_signature(oss, fn, outputs, name);
oss << " {" << std::endl;
gen_torch_init_driver(oss);
gen_torch_make_handles(oss, fn->args());
gen_torch_make_launch_function(oss, fn->args());
oss << std::endl << "}";
oss << "static auto registry = torch::jit::RegisterOperators(\"triton::" << name << "\", &" << name << ");" << std::endl;
}
2019-08-25 21:26:09 -07:00
typedef triton::runtime::function::options_t options_t;
typedef triton::runtime::function::options_space_t options_space_t;
PYBIND11_MODULE(libtriton, m) {
m.doc() = "Python bindings to the C++ Triton API";
2019-08-25 21:26:09 -07:00
// framework binding source code generation
m.def("make_tensorflow_src", &make_tensorflow_src,
"Creates C++ source code for a custom Tensorflow op "
"corresponding to the specified Triton kernel");
m.def("make_pytorch_src", &make_pytorch_src,
"Creates C++ source code for a custom PyTorch op ");
2019-08-25 21:26:09 -07:00
// bindings for triton classes
pybind11::class_<options_t>(m, "options")
.def(pybind11::init<>())
.def("d", &options_t::D<int>)
.def_readonly("num_warps", &options_t::num_warps);
2019-08-25 21:26:09 -07:00
pybind11::class_<options_space_t>(m, "options_space")
.def(pybind11::init<>())
.def_readwrite("defines", &options_space_t::defines)
.def_readwrite("num_warps", &options_space_t::num_warps);
// hooks into triton constructs since frameworks may not use pybind11
m.def("register_grid", &register_grid);
m.def("delete_grid", &delete_grid);
2019-08-25 21:26:09 -07:00
m.def("register_fn", &register_fn);
m.def("delete_fn", &delete_fn);
2019-08-26 17:21:09 -07:00
m.def("make_op_id", &make_op_id);
m.def("make_scalar_id", &make_scalar_id);
m.def("retrieve_scalar", &retrieve_scalar);
m.def("cleanup", &cleanup);
2019-08-26 17:21:09 -07:00
;
}