[PYTHON] Made bench_blocksparse and bench_cross_entropy compatible

with the new performance report API
This commit is contained in:
Philippe Tillet
2021-03-08 20:19:10 -05:00
parent 5b9afaa688
commit 58a5c87c53
2 changed files with 16 additions and 9 deletions

View File

@@ -23,6 +23,7 @@ square_confs = [
for op_mode in ['sdd', 'dsd', 'dds'] for layout_mode in ['tril', 'dense']
]
@triton.testing.perf_report(square_confs)
def bench_matmul(M, N, K, block, layout_mode, op_mode, AT, BT, dtype, provider, warmup=5, rep=5):
Z, H = 1, 1
@@ -37,19 +38,20 @@ def bench_matmul(M, N, K, block, layout_mode, op_mode, AT, BT, dtype, provider,
a = torch.randn((Z, H, K, M) if AT else (Z, H, M, K), dtype=dtype, device='cuda')
b = torch.randn((Z, H, N, K) if BT else (Z, H, K, N), dtype=dtype, device='cuda')
# create op
tflops = lambda ms: num_flops / ms * 1e3
if provider == 'triton':
op = triton.ops.blocksparse.matmul(layout, block, op_mode, trans_a=AT, trans_b=BT)
# inputs
a = triton.testing.sparsify_tensor(a, layout, block) if op_mode == 'dsd' else a
b = triton.testing.sparsify_tensor(b, layout, block) if op_mode == 'dds' else b
ms = triton.testing.do_bench(lambda: op(a, b), warmup=warmup, rep=rep)
mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(a, b), warmup=warmup, rep=rep)
num_flops = {
'sdd': 2 * Z * K * float(layout.sum()) * block * block,\
'dsd': 2 * Z * N * float(layout.sum()) * block * block,\
'dds': 2 * Z * M * float(layout.sum()) * block * block
}[op_mode]*1e-12
triton_tflops = num_flops / ms * 1e3
return triton_tflops
return tflops(mean_ms), tflops(min_ms), tflops(max_ms)
# -------------------------------
# Softmax
@@ -70,6 +72,7 @@ square_confs = [
for layout_mode in ['dense', 'tril']
]
@triton.testing.perf_report(square_confs)
def bench_softmax(M, N, block, layout_mode, dtype, provider, warmup=10, rep=50):
Z, H = 1, 1
@@ -82,6 +85,6 @@ def bench_softmax(M, N, block, layout_mode, dtype, provider, warmup=10, rep=50):
if provider == 'triton':
a = triton.testing.sparsify_tensor(a, layout, block)
op = triton.ops.blocksparse.softmax(layout, block)
ms = triton.testing.do_bench(lambda: op(a), warmup=warmup, rep=rep)
gbps = (2 * a.numel() * a.element_size() * 1e-9) / (ms * 1e-3)
return gbps
gbps = lambda ms: (2 * a.numel() * a.element_size() * 1e-9) / (ms * 1e-3)
mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(a), warmup=warmup, rep=rep)
return gbps(mean_ms), gbps(min_ms), gbps(max_ms)

View File

@@ -16,22 +16,26 @@ confs = [
for mode in ['forward', 'backward']
]
@triton.testing.perf_report(confs)
def bench_op(M, N, dtype, mode, provider):
# create inputs
x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)
idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')
num_gb = (2 * x.numel() * x.element_size() * 1e-9)
gbps = lambda ms: num_gb / ms * 1e3
# forward pass
op = {'torch': torch.nn.CrossEntropyLoss(reduction='none'), \
'triton': triton.ops.cross_entropy}[provider]
if mode == 'forward':
ms = triton.testing.do_bench(lambda: op(x, idx))
mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(x, idx))
if mode == 'backward':
y = op(x, idx)
dy = torch.randn_like(y)
ms = triton.testing.do_bench(lambda: y.backward(dy, retain_graph=True), grad_to_none=x)
return num_gb / ms * 1e3
fn = lambda: y.backward(dy, retain_graph=True)
mean_ms, min_ms, max_ms = triton.testing.do_bench(fn, grad_to_none=x)
return gbps(mean_ms), gbps(min_ms), gbps(max_ms)
if __name__ == '__main__':
bench_op.run('tmp', False)