[codegen] added fallback when tensor cores cannot be used

This commit is contained in:
Philippe Tillet
2019-06-25 15:49:58 -07:00
parent 62000738f0
commit 64513fb407
6 changed files with 39 additions and 22 deletions

View File

@@ -6,7 +6,7 @@ data_files_path = tf.resource_loader.get_data_files_path()
library_dir = os.path.dirname(os.path.realpath(__file__))
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
M, N, K = 8192, 8192, 8192
M, N, K = 128,128,128
a = tf.placeholder(tf.float16, shape=[M, K])
b = tf.placeholder(tf.float16, shape=[N, K])
locks = tf.placeholder(tf.int32, shape=[4096])
@@ -24,16 +24,6 @@ result = sess.run([c], feed_dict = {locks: np.zeros(4096),
a: ha,
b: hb})[0]
#bench = tf.test.Benchmark().run_op_benchmark(sess=sess,
# op_or_tensor=c,
# feed_dict={a: ha, b: hb},
# min_iters=100)
#print(end - start)
#print(2*M*N*K / (end - start) * 1e-12)
#hresult = np.dot(ha.T, hb).T
#dif = np.abs(result - hresult)
#print("dif: %f" % np.max(dif))
#np.savetxt("dif.txt", dif, fmt="%5.2f")
#np.savetxt("gpu.txt", result, fmt="%5.2f")
#np.savetxt("cpu.txt", hresult, fmt="%5.2f")
hresult = np.dot(ha.T, hb).T
dif = np.abs(result - hresult)
print("dif: %f" % np.max(dif))