[python] added basic tensorflow support
This commit is contained in:
@@ -1,14 +1,6 @@
|
||||
import libtriton
|
||||
import triton
|
||||
import tensorflow as tf
|
||||
import distutils
|
||||
import distutils.log
|
||||
import setuptools.command.build_ext
|
||||
import setuptools
|
||||
import numpy as np
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import hashlib
|
||||
|
||||
src = """
|
||||
const tunable int TM = {128};
|
||||
@@ -20,8 +12,8 @@ void matmul(restrict read_only align(16) half *A,
|
||||
restrict read_only align(16) half *C,
|
||||
int M, int N, int K,
|
||||
multiple_of(8) int lda, multiple_of(8) int ldb, int ldc) {
|
||||
int ridx = get_range_id(0);
|
||||
int ridy = get_range_id(1);
|
||||
int ridx = get_program_id(0);
|
||||
int ridy = get_program_id(1);
|
||||
int rxa[TM] = ridx * TM + (0 ... TM);
|
||||
int ryb[TN] = ridy * TN + (0 ... TN);
|
||||
int rka[TK] = 0 ... TK;
|
||||
@@ -40,7 +32,7 @@ void matmul(restrict read_only align(16) half *A,
|
||||
}
|
||||
int rxc[TM] = ridx * TM + (0 ... TM);
|
||||
int ryc[TN] = ridy * TN + (0 ... TN);
|
||||
half* pc[TM, TN] = C + ryc[newaxis, :]*ldc + rxc[:, newaxis];
|
||||
half* pc[TM, TN] = C + ryc[newaxis, :] + rxc[:, newaxis]*ldc;
|
||||
half c[TM, TN] = xc;
|
||||
bool checkc0[TM] = rxc < M;
|
||||
bool checkc1[TN] = ryc < N;
|
||||
@@ -49,100 +41,10 @@ void matmul(restrict read_only align(16) half *A,
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
extra_ops = tf.load_op_library('/home/philippe/development/triton/python/build/lib.linux-x86_64-3.6/libextra_tf_ops.so')
|
||||
|
||||
|
||||
def make_bindings(src, outputs, grids):
|
||||
return libtriton.make_tensorflow_src(src, outputs, grids)
|
||||
|
||||
def make_cache_path(src):
|
||||
md5 = hashlib.sha1(src.encode())
|
||||
hexhash = md5.hexdigest()
|
||||
home = os.path.expanduser('~')
|
||||
cacheroot = os.path.join(home, '.triton', 'cache')
|
||||
cachepath = os.path.join(cacheroot, str(hexhash))
|
||||
if not os.path.exists(cachepath):
|
||||
os.makedirs(cachepath)
|
||||
print(cachepath)
|
||||
return cachepath
|
||||
|
||||
def write_bindings(src, root):
|
||||
cpp = os.path.join(root, 'tensorflow.cpp')
|
||||
so = os.path.join(root, 'tensorflow.so')
|
||||
recompile = False
|
||||
# recompile if .so does not exist
|
||||
if not os.path.exists(cpp) or not os.path.exists(so):
|
||||
recompile = True
|
||||
# recompile if cpp was modified after .so
|
||||
elif max(cpp, so, key=os.path.getctime) == cpp:
|
||||
recompile = True
|
||||
# write cpp file
|
||||
if recompile:
|
||||
with open(cpp, 'w+') as handle:
|
||||
handle.writelines(src)
|
||||
# return path of cpp file
|
||||
return cpp
|
||||
|
||||
def build(src, path):
|
||||
# include directories
|
||||
triton_include_dirs = ['/home/philippe/development/triton/include']
|
||||
tensorflow_include_dirs = [tf.sysconfig.get_include()]
|
||||
cuda_include_dirs = ['/usr/local/cuda-10.1/targets/x86_64-linux/include/']
|
||||
include_dirs = triton_include_dirs + tensorflow_include_dirs + cuda_include_dirs
|
||||
# library directories
|
||||
triton_library_dirs = [os.path.realpath(os.path.join(libtriton.__file__, os.path.pardir))]
|
||||
tensorflow_library_dirs = [tf.sysconfig.get_lib()]
|
||||
library_dirs = triton_library_dirs + tensorflow_library_dirs
|
||||
# libraries
|
||||
libraries = ['tensorflow_framework', 'triton']
|
||||
# extra arguments
|
||||
extra_compile_args = []
|
||||
extra_link_args = []
|
||||
# create extension module
|
||||
ext = setuptools.Extension(
|
||||
name = 'test',
|
||||
language = 'c++',
|
||||
sources = [src],
|
||||
include_dirs = include_dirs,
|
||||
extra_compile_args = extra_compile_args,
|
||||
extra_link_args = extra_link_args,
|
||||
library_dirs = library_dirs,
|
||||
libraries = libraries
|
||||
)
|
||||
# build extension module
|
||||
args = ['build_ext']
|
||||
tmp = tempfile.mkdtemp()
|
||||
args.append('--build-temp=' + tmp)
|
||||
args.append('--build-lib=' + path)
|
||||
args.append('-q')
|
||||
args = dict(
|
||||
name = 'test',
|
||||
ext_modules = [ext],
|
||||
script_args = args,
|
||||
)
|
||||
setuptools.setup(**args)
|
||||
shutil.rmtree(tmp)
|
||||
|
||||
def make_tensorflow_op(src, outputs, grids):
|
||||
bindings = make_bindings(src, outputs, grids)
|
||||
cache_path = make_cache_path(bindings)
|
||||
cpp = write_bindings(bindings, cache_path)
|
||||
build(cpp, cache_path)
|
||||
result = tf.load_op_library(os.path.join(cache_path, 'test.cpython-36m-x86_64-linux-gnu.so'))
|
||||
return result
|
||||
|
||||
|
||||
library_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
module = make_tensorflow_op(src, ['C'], ['(M + #TM - 1)/#TM', '(N + #TN - 1)/#TN'])
|
||||
print(module.matmul)
|
||||
|
||||
|
||||
class dot:
|
||||
|
||||
def __init__(self):
|
||||
trans_a = True
|
||||
trans_b = False
|
||||
self.matmul = triton.make_tensorflow_op(src, ['C'], ['(M + #TM - 1)/#TM', '(N + #TN - 1)/#TN'])
|
||||
|
||||
def __call__(self, a, b):
|
||||
shape_a = tf.shape(a)
|
||||
@@ -152,17 +54,17 @@ class dot:
|
||||
N = shape_b[0]
|
||||
lda = M
|
||||
ldb = K
|
||||
ldc = M
|
||||
c = extra_ops.alloc_empty(tf.stack([M, N]))
|
||||
return module.matmul(a, b, c, M, N, K, lda, ldb, ldc)
|
||||
ldc = N
|
||||
c = triton.empty([M, N])
|
||||
return self.matmul.matmul(a, b, c, M, N, K, lda, ldb, ldc)
|
||||
|
||||
dot_nt = dot()
|
||||
dot_tn = dot()
|
||||
def run_dot():
|
||||
M, N, K = 128, 128, 128
|
||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||
b = tf.placeholder(tf.float16, shape=[N, K])
|
||||
# c = tf.matmul(a, b, transpose_a=True)
|
||||
c = dot_nt(a, b)
|
||||
c = dot_tn(a, b)
|
||||
# Reference
|
||||
ha = np.random.rand(M, K).astype(np.float16)
|
||||
hb = np.random.rand(N, K).astype(np.float16)
|
||||
@@ -172,7 +74,7 @@ def run_dot():
|
||||
result = sess.run([c], feed_dict = {a: ha,
|
||||
b: hb})[0]
|
||||
# Test
|
||||
hresult = np.dot(ha.T, hb).T
|
||||
hresult = np.dot(ha.T, hb)
|
||||
dif = np.abs(result - hresult)
|
||||
np.savetxt('dif.dat', dif, '%2.4f')
|
||||
print(hresult)
|
||||
|
Reference in New Issue
Block a user