[feature] basic tensor core utilization works

This commit is contained in:
Philippe Tillet
2019-06-08 12:14:37 -07:00
parent 5f3d48c1d0
commit d074a166e2
4 changed files with 66 additions and 49 deletions

View File

@@ -6,14 +6,14 @@ data_files_path = tf.resource_loader.get_data_files_path()
library_dir = '/home/philippe/development/triton/build/examples/python/tensorflow'
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
M, N, K = 16, 16, 16
M, N, K = 256, 256, 256
a = tf.placeholder(tf.float16, shape=[M, K])
b = tf.placeholder(tf.float16, shape=[N, K])
locks = tf.placeholder(tf.int32, shape=[4096])
c = module.dot(a, b, locks)
# Reference
ha = np.ones((M, K)).astype(np.float16)
hb = np.ones((N, K)).astype(np.float16)
ha = np.random.rand(M, K).astype(np.float16)
hb = np.random.rand(N, K).astype(np.float16)
hresult = np.dot(hb.T, ha)
# Run
@@ -22,4 +22,7 @@ sess.run(tf.global_variables_initializer())
result = sess.run([c], feed_dict = {locks: np.zeros(4096),
a: ha,
b: hb})
print(result - hresult)
print(result)
print(hresult)
#print(result - hresult)
print(np.max(np.abs(result - hresult)))