From d1d09566b1c4633a78bac821bd14f25bb4033cad Mon Sep 17 00:00:00 2001 From: Philippe Tillet Date: Sat, 6 Mar 2021 22:04:00 -0500 Subject: [PATCH] [DOCS] Improved tutorials documentation --- python/tutorials/01-vector-add.py | 51 +++++++++++--- python/tutorials/02-fused-softmax.py | 102 +++++++++++++++++---------- 2 files changed, 103 insertions(+), 50 deletions(-) diff --git a/python/tutorials/01-vector-add.py b/python/tutorials/01-vector-add.py index e99216788..f04f1add5 100644 --- a/python/tutorials/01-vector-add.py +++ b/python/tutorials/01-vector-add.py @@ -1,7 +1,7 @@ """ Vector Addition ================= -In this tutorial, you will write a simple, high-performance vector addition using Triton and learn about: +In this tutorial, you will write a simple vector addition using Triton and learn about: - The basic syntax of the Triton programming language - The best practices for creating PyTorch custom operators using the :code:`triton.kernel` Python API @@ -122,9 +122,15 @@ class _add(torch.autograd.Function): # Just like we standard PyTorch ops We use the :code:`.apply` method to create a callable object for our function add = _add.apply +# %% +# We can now use the above function to compute the sum of two `torch.tensor` objects: + # %% # Unit Test # -------------------------- +# +# Of course, the first thing that we should check is that whether kernel is correct. This is pretty easy to test, as shown below: + torch.manual_seed(0) x = torch.rand(98432, device='cuda') y = torch.rand(98432, device='cuda') @@ -134,17 +140,40 @@ print(za) print(zb) print(f'The maximum difference between torch and triton is ' f'{torch.max(torch.abs(za - zb))}') +# %% +# Seems like we're good to go! + # %% # Benchmarking # -------------------------- -# We can now benchmark our custom op for vectors of increasing sizes to get a sense of how it does +# We can now benchmark our custom op for vectors of increasing sizes to get a sense of how it does relative to PyTorch. -warmup = 10 -rep = 200 -for N in [2**i for i in range(17, 26, 1)]: - x = torch.rand(N, device='cuda') - y = torch.rand(N, device='cuda') - triton_ms = triton.testing.do_bench(lambda: add(x, y), warmup=warmup, rep=rep) - torch_ms = triton.testing.do_bench(lambda: x + y, warmup=warmup, rep=rep) - # print the performance of triton and torch as well as the achieved bandwidth - print(f'{N} {triton_ms:.3f} {torch_ms:.3f}') \ No newline at end of file +import matplotlib.pyplot as plt + +# There are three tensors of 4N bytes each. So the bandwidth of a given kernel +# is 12N / time_ms * 1e-6 GB/s +gbps = lambda N, ms: 12 * N / ms * 1e-6 +# We want to benchmark small and large vector alike +sizes = [2**i for i in range(12, 25, 1)] +triton_bw = [] +torch_bw = [] +for N in sizes: + x = torch.rand(N, device='cuda', dtype=torch.float32) + y = torch.rand(N, device='cuda', dtype=torch.float32) + # Triton provide a do_bench utility function that can be used to benchmark + # arbitrary workloads. It supports a `warmup` parameter that is used to stabilize + # GPU clock speeds as well as a `rep` parameter that controls the number of times + # the benchmark is repeated. Importantly, we set `clear_l2 = True` to make sure + # that the L2 cache does not contain any element of x before each kernel call when + # N is small. + do_bench = lambda fn: gbps(N, triton.testing.do_bench(fn, warmup=10, rep=100, clear_l2=True)) + triton_bw += [do_bench(lambda: add(x, y))] + torch_bw += [do_bench(lambda: x + y)] +# We plot the results as a semi-log +plt.semilogx(sizes, triton_bw, label='Triton') +plt.semilogx(sizes, torch_bw, label='Torch') +plt.legend() +plt.show() + +# %% +# Seems like our simple element-wise operation operates at peak bandwidth. While this is a fairly low bar for a custom GPU programming language, this is a good start before we move to more advanced operations. \ No newline at end of file diff --git a/python/tutorials/02-fused-softmax.py b/python/tutorials/02-fused-softmax.py index f715e1af0..af8ca44cf 100644 --- a/python/tutorials/02-fused-softmax.py +++ b/python/tutorials/02-fused-softmax.py @@ -1,7 +1,7 @@ """ Fused Softmax ================= -In this tutorial, you will write a fused softmax layer that outperform's PyTorch implementation and learn about: +In this tutorial, you will write a fused softmax operation (that outperforms PyTorch) and learn about: - The benefits of kernel fusion for bandwidth-bound operations. - The syntax and usage of reduction operators in Triton. @@ -35,14 +35,16 @@ def naive_softmax(x): # %% # When implemented naively in pytorch, computing :code:`y = naive_softmax(x)` for :math:`x \in R^{M \times N}` requires reading :math:`7MN` elements from DRAM and writing back :math:`3MN + 2M` elements. -# Instead, we want to write a custom "fused" pytorch operators that only reads X once and does all the necessary computations on-chip. -# This would require reading and writing back only :math:`MN` bytes, so we could expect a theoretical speed-up of 5x. -# In practice, though, we expect less because our kernel will spend some time computing exponentials and moving data around in shared memory. +# This is obviously wasteful; we'd prefer to have a custom "fused" kernel that only reads X once and does all the necessary computations on-chip. +# In this case, we would be reading and writing back only :math:`MN` bytes, so we could expect a theoretical speed-up of ~5x (i.e., :math:`(10MN + 2M) / 2MN`). +# In practice, though, we would be getting a bit less as our kernel computes exponentials and internally moves data around in shared memory. # %% # Compute Kernel -# ---------------------------- -# Our softmax kernel works as follows: each program loads a row of X and writes back a normalized row of Y. Note that one important limitation of Triton is that each block must have a power-of-two number of elements, which means that we need to guard the memory operations properly if we want to handle any possible input shapes: +# ---------------- +# Our softmax kernel works as follows: each program loads a row of the input X, normalizes it and writes back the result to the output Y. +# Note that one important limitation of Triton is that each block must have a power-of-two number of elements, +# so we need to internally "pad" tiles and guard the memory operations properly if we want to handle any possible input shapes: # # .. code-block:: C # @@ -61,13 +63,14 @@ def naive_softmax(x): # bool check[BLOCK] = n < N; # float x [BLOCK] = check ? *px : -F32_INFINITY; # // syntax for reduction in Triton is: -# // x[..., OPERATOR, ...] +# // x[:, :, OPERATOR, :, :] # // ^ # // index -# // The operators currently supported are {min, max, +} +# // where operator is in {min, max, +} +# // for 1D vectors, this is just x[OPERATOR]. # float z [BLOCK] = x - x[max]; -# // The exponential in Triton is fast but approximate -# // (i.e., like __expf in CUDA) +# // Note that exponentials in Triton are fast +# // but approximate (i.e., think __expf in CUDA) # float num [BLOCK] = exp(z); # float denom = num[+]; # // The result of the reduction is now stored in y @@ -79,10 +82,10 @@ def naive_softmax(x): # %% # Torch Bindings -# ---------------------------- -# We need to make sure that BLOCK is the smallest power of two -# greater than the number of rows N of the input matrix. -# Different values of BLOCK will result in different kernels +# --------------- +# Here our torch bindings is quite similar to that of the vector addition mentioned in the previous tutorial. +# We just need to make sure that BLOCK is the smallest power of two greater than the number of columns N of the input matrix. +# This means that different values of BLOCK will result in different kernels import torch import triton @@ -105,6 +108,7 @@ __global__ void softmax(float* Y, float* X, int stride_ym, int stride_xm, int M, """ +# helper function to get the smaller power-of-two larger than a given number def next_power_of_2(n): n -= 1 n |= n >> 1 @@ -116,16 +120,20 @@ def next_power_of_2(n): return n -_kernels = dict() - - +# kernel caching mechanism def make_kernel(N, device): + cache = make_kernel.cache + # Now are kernels are indexed not only by the provided device but also + # by the rounded number of columns in the input matrix BLOCK = next_power_of_2(N) key = (BLOCK, device) - if key not in _kernels: + if key not in cache: defines = {'BLOCK': BLOCK} - _kernels[key] = triton.kernel(_src, device=device, defines=defines) - return _kernels[key] + cache[key] = triton.kernel(_src, device=device, defines=defines) + return cache[key] + + +make_kernel.cache = dict() class _softmax(torch.autograd.Function): @@ -134,11 +142,10 @@ class _softmax(torch.autograd.Function): # constraints of the op assert x.dtype == torch.float32 y = torch.empty_like(x) - # *create launch grid*: - # here we just launch a grid of M programs + # The launch grid is simple: we have one kernel instance per row of the input matrix M, N = y.shape grid = lambda opt: (M, ) - # *launch kernel*: + # Launch kernel kernel = make_kernel(N, y.device) kernel(y.data_ptr(), x.data_ptr(), y.stride(0), x.stride(0), M, N, grid=grid) return y @@ -146,41 +153,58 @@ class _softmax(torch.autograd.Function): softmax = _softmax.apply +# %% +# We can use the above softmax function to compute the row-wise softmax of a given matrix. + # %% # Unit Test # ---------- +# %% +# We make sure that we test our kernel on a matrix with an irregular number of rows and columns. +# This will allow us to verify that our padding mechanism works. + +torch.manual_seed(0) x = torch.randn(1823, 781, device='cuda') y_tri = softmax(x) y_ref = torch.softmax(x, axis=1) -print(y_tri) -print(y_ref) print(torch.allclose(y_tri, y_ref)) -# %% -# Seems to work! +#%% +# As expected, the results are identical. # %% # Benchmarking -# ---------- +# ------------- +# Here we will benchmark our operation as a function of the number of columns in the input matrix -- assuming 4096 rows. +# We will then compare its performance against (1) :code:`torch.softmax` and (2) the :code:`naive_softmax` defined above. import matplotlib.pyplot as plt M = 4096 -Ns = [128 * i for i in range(2, 50)] -tri_ms = [] -ref_ms = [] -def_ms = [] +Ns = [256 * i for i in range(2, 50)] +tri_bw = [] +ref_bw = [] +def_bw = [] for N in Ns: x = torch.randn(M, N, device='cuda', dtype=torch.float32) gbps = lambda ms: x.nelement() * x.element_size() * 1e-9 / (ms * 1e-3) - tri_ms += [gbps(triton.testing.do_bench(lambda: softmax(x)))] - ref_ms += [gbps(triton.testing.do_bench(lambda: torch.softmax(x, axis=1)))] - def_ms += [gbps(triton.testing.do_bench(lambda: naive_softmax(x)))] + do_bench = lambda fn: gbps(triton.testing.do_bench(fn, warmup=10, rep=100, clear_l2=True)) + tri_bw += [do_bench(lambda: softmax(x))] + ref_bw += [do_bench(lambda: torch.softmax(x, axis=1))] + def_bw += [do_bench(lambda: naive_softmax(x))] plt.xlabel('N') plt.ylabel('Bandwidth (GB/s)') -plt.plot(Ns, tri_ms, label='Triton') -plt.plot(Ns, ref_ms, label='Torch') -plt.plot(Ns, def_ms, label='Naive') +plt.plot(Ns, tri_bw, label='Triton') +plt.plot(Ns, ref_bw, label='Torch') +plt.plot(Ns, def_bw, label='Naive') plt.legend() -plt.show() \ No newline at end of file +plt.show() + +# %% +# In the above plot, we can see that: +# +# - Triton is 4-5x faster than the naive implementation, which is consistent with our theoretical predictions. +# - Triton is significantly faster than :code:`torch.softmax` for very large input matrices. My guess from looking at the source-code of the `PyTorch kernel `_ is that PyTorch only partially fuses the computation of the softmax. +# This means that -- when temporary data is too large to fit entirely in the GPU's cache -- it transfers almost twice the amount of data necessary. +# Note that our Triton kernel is not only faster than PyTorch's CUDA kernel, it is also **easier to read, understand and maintain**. \ No newline at end of file