[triton/codegen] added shared memory padding for HMMA arguments and vectorized loads
This commit is contained in:
@@ -6,7 +6,7 @@ data_files_path = tf.resource_loader.get_data_files_path()
|
||||
library_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
module = tf.load_op_library(os.path.join(library_dir, 'libtf_blocksparse.so'))
|
||||
|
||||
M, N, K = 256,256,256
|
||||
M, N, K = 8192,8192,8192
|
||||
a = tf.placeholder(tf.float16, shape=[M, K])
|
||||
b = tf.placeholder(tf.float16, shape=[N, K])
|
||||
locks = tf.placeholder(tf.int32, shape=[4096])
|
||||
@@ -30,9 +30,9 @@ result = sess.run([c], feed_dict = {locks: np.zeros(4096),
|
||||
# min_iters=100)
|
||||
#print(end - start)
|
||||
#print(2*M*N*K / (end - start) * 1e-12)
|
||||
hresult = np.dot(ha.T, hb).T
|
||||
dif = np.abs(result - hresult)
|
||||
print("dif: %f" % np.max(dif))
|
||||
#hresult = np.dot(ha.T, hb).T
|
||||
#dif = np.abs(result - hresult)
|
||||
#print("dif: %f" % np.max(dif))
|
||||
|
||||
#np.savetxt("dif.txt", dif, fmt="%5.2f")
|
||||
#np.savetxt("gpu.txt", result, fmt="%5.2f")
|
||||
|
Reference in New Issue
Block a user