diff --git a/master/.buildinfo b/master/.buildinfo index 8a7db18c9..2a9b28897 100644 --- a/master/.buildinfo +++ b/master/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 8b7f79e50b33075152c035368a40bf98 +config: 0d28377d21f58f0a2d94ca7d0800011b tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/master/.doctrees/environment.pickle b/master/.doctrees/environment.pickle index 16b09d497..5a6121a88 100644 Binary files a/master/.doctrees/environment.pickle and b/master/.doctrees/environment.pickle differ diff --git a/master/.doctrees/getting-started/installation.doctree b/master/.doctrees/getting-started/installation.doctree index 63c03a2d1..3e5a2c426 100644 Binary files a/master/.doctrees/getting-started/installation.doctree and b/master/.doctrees/getting-started/installation.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/01-vector-add.doctree b/master/.doctrees/getting-started/tutorials/01-vector-add.doctree index 010629f14..aeb789ad5 100644 Binary files a/master/.doctrees/getting-started/tutorials/01-vector-add.doctree and b/master/.doctrees/getting-started/tutorials/01-vector-add.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/02-fused-softmax.doctree b/master/.doctrees/getting-started/tutorials/02-fused-softmax.doctree index 2bd3f383b..e3df005b8 100644 Binary files a/master/.doctrees/getting-started/tutorials/02-fused-softmax.doctree and b/master/.doctrees/getting-started/tutorials/02-fused-softmax.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/03-matrix-multiplication.doctree b/master/.doctrees/getting-started/tutorials/03-matrix-multiplication.doctree index 34a2b176c..1cc1dbfd4 100644 Binary files a/master/.doctrees/getting-started/tutorials/03-matrix-multiplication.doctree and b/master/.doctrees/getting-started/tutorials/03-matrix-multiplication.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/04-low-memory-dropout.doctree b/master/.doctrees/getting-started/tutorials/04-low-memory-dropout.doctree index 2c3f2f2b5..6d68e1058 100644 Binary files a/master/.doctrees/getting-started/tutorials/04-low-memory-dropout.doctree and b/master/.doctrees/getting-started/tutorials/04-low-memory-dropout.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/05-layer-norm.doctree b/master/.doctrees/getting-started/tutorials/05-layer-norm.doctree index 3a5a445e1..10c150fd6 100644 Binary files a/master/.doctrees/getting-started/tutorials/05-layer-norm.doctree and b/master/.doctrees/getting-started/tutorials/05-layer-norm.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/06-fused-attention.doctree b/master/.doctrees/getting-started/tutorials/06-fused-attention.doctree index 86296aa59..682b8d6ca 100644 Binary files a/master/.doctrees/getting-started/tutorials/06-fused-attention.doctree and b/master/.doctrees/getting-started/tutorials/06-fused-attention.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/07-libdevice-function.doctree b/master/.doctrees/getting-started/tutorials/07-libdevice-function.doctree index c3df04131..efa56f2b9 100644 Binary files a/master/.doctrees/getting-started/tutorials/07-libdevice-function.doctree and b/master/.doctrees/getting-started/tutorials/07-libdevice-function.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/index.doctree b/master/.doctrees/getting-started/tutorials/index.doctree index 4e4d909e6..9f04ae9e8 100644 Binary files a/master/.doctrees/getting-started/tutorials/index.doctree and b/master/.doctrees/getting-started/tutorials/index.doctree differ diff --git a/master/.doctrees/getting-started/tutorials/sg_execution_times.doctree b/master/.doctrees/getting-started/tutorials/sg_execution_times.doctree index a98306d80..fc603c7cc 100644 Binary files a/master/.doctrees/getting-started/tutorials/sg_execution_times.doctree and b/master/.doctrees/getting-started/tutorials/sg_execution_times.doctree differ diff --git a/master/.doctrees/index.doctree b/master/.doctrees/index.doctree index 5d4e55625..e93396640 100644 Binary files a/master/.doctrees/index.doctree and b/master/.doctrees/index.doctree differ diff --git a/master/.doctrees/programming-guide/chapter-1/introduction.doctree b/master/.doctrees/programming-guide/chapter-1/introduction.doctree index 50506de5d..a8777eb66 100644 Binary files a/master/.doctrees/programming-guide/chapter-1/introduction.doctree and b/master/.doctrees/programming-guide/chapter-1/introduction.doctree differ diff --git a/master/.doctrees/programming-guide/chapter-2/related-work.doctree b/master/.doctrees/programming-guide/chapter-2/related-work.doctree index 103d14f35..21b81eebe 100644 Binary files a/master/.doctrees/programming-guide/chapter-2/related-work.doctree and b/master/.doctrees/programming-guide/chapter-2/related-work.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.Config.doctree b/master/.doctrees/python-api/generated/triton.Config.doctree index a3d6296f4..b8f3799b8 100644 Binary files a/master/.doctrees/python-api/generated/triton.Config.doctree and b/master/.doctrees/python-api/generated/triton.Config.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.autotune.doctree b/master/.doctrees/python-api/generated/triton.autotune.doctree index 26a299f6a..afe23e890 100644 Binary files a/master/.doctrees/python-api/generated/triton.autotune.doctree and b/master/.doctrees/python-api/generated/triton.autotune.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.heuristics.doctree b/master/.doctrees/python-api/generated/triton.heuristics.doctree index f2c1c093e..d965a9cd0 100644 Binary files a/master/.doctrees/python-api/generated/triton.heuristics.doctree and b/master/.doctrees/python-api/generated/triton.heuristics.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.jit.doctree b/master/.doctrees/python-api/generated/triton.jit.doctree index 678b559c8..7c1d9ca96 100644 Binary files a/master/.doctrees/python-api/generated/triton.jit.doctree and b/master/.doctrees/python-api/generated/triton.jit.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.arange.doctree b/master/.doctrees/python-api/generated/triton.language.arange.doctree index 94306c4c2..a8f352c32 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.arange.doctree and b/master/.doctrees/python-api/generated/triton.language.arange.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_add.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_add.doctree index 69cbca2c0..9c0f31602 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_add.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_add.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_and.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_and.doctree index 3fdb584ba..8ffef7b96 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_and.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_and.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_cas.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_cas.doctree index 347715288..921b8f429 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_cas.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_cas.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_max.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_max.doctree index f156e6f24..02f53a2bd 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_max.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_max.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_min.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_min.doctree index 2893e7652..273195316 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_min.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_min.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_or.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_or.doctree index e8a25b6a0..7bd4bd257 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_or.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_or.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_xchg.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_xchg.doctree index 89e48f462..458599ea1 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_xchg.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_xchg.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.atomic_xor.doctree b/master/.doctrees/python-api/generated/triton.language.atomic_xor.doctree index 7bf29bb77..a317d7721 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.atomic_xor.doctree and b/master/.doctrees/python-api/generated/triton.language.atomic_xor.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.broadcast_to.doctree b/master/.doctrees/python-api/generated/triton.language.broadcast_to.doctree index 3e06e6b92..3266dd598 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.broadcast_to.doctree and b/master/.doctrees/python-api/generated/triton.language.broadcast_to.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.cos.doctree b/master/.doctrees/python-api/generated/triton.language.cos.doctree index 6dd02a993..2a7166e43 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.cos.doctree and b/master/.doctrees/python-api/generated/triton.language.cos.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.dot.doctree b/master/.doctrees/python-api/generated/triton.language.dot.doctree index 8053744ce..af2ebcc02 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.dot.doctree and b/master/.doctrees/python-api/generated/triton.language.dot.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.exp.doctree b/master/.doctrees/python-api/generated/triton.language.exp.doctree index 49b19e01a..8817b7b70 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.exp.doctree and b/master/.doctrees/python-api/generated/triton.language.exp.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.load.doctree b/master/.doctrees/python-api/generated/triton.language.load.doctree index a3c943c6a..fa8e40735 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.load.doctree and b/master/.doctrees/python-api/generated/triton.language.load.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.log.doctree b/master/.doctrees/python-api/generated/triton.language.log.doctree index cfdfc36fe..e27a6b75c 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.log.doctree and b/master/.doctrees/python-api/generated/triton.language.log.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.max.doctree b/master/.doctrees/python-api/generated/triton.language.max.doctree index af70fb91b..2abde8cce 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.max.doctree and b/master/.doctrees/python-api/generated/triton.language.max.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.maximum.doctree b/master/.doctrees/python-api/generated/triton.language.maximum.doctree index e1e4769db..2b307669f 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.maximum.doctree and b/master/.doctrees/python-api/generated/triton.language.maximum.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.min.doctree b/master/.doctrees/python-api/generated/triton.language.min.doctree index a535a74df..190be30b9 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.min.doctree and b/master/.doctrees/python-api/generated/triton.language.min.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.minimum.doctree b/master/.doctrees/python-api/generated/triton.language.minimum.doctree index deefde9fd..f33c007c6 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.minimum.doctree and b/master/.doctrees/python-api/generated/triton.language.minimum.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.multiple_of.doctree b/master/.doctrees/python-api/generated/triton.language.multiple_of.doctree index 0aecfafa6..3e5b3d5ea 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.multiple_of.doctree and b/master/.doctrees/python-api/generated/triton.language.multiple_of.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.num_programs.doctree b/master/.doctrees/python-api/generated/triton.language.num_programs.doctree index e04ec4bdd..3d271a9a7 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.num_programs.doctree and b/master/.doctrees/python-api/generated/triton.language.num_programs.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.program_id.doctree b/master/.doctrees/python-api/generated/triton.language.program_id.doctree index d8a7420e7..94becec6c 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.program_id.doctree and b/master/.doctrees/python-api/generated/triton.language.program_id.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.rand.doctree b/master/.doctrees/python-api/generated/triton.language.rand.doctree index e5dc57dff..a3c0fdf00 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.rand.doctree and b/master/.doctrees/python-api/generated/triton.language.rand.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.randint.doctree b/master/.doctrees/python-api/generated/triton.language.randint.doctree index 910a93332..f80005bcd 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.randint.doctree and b/master/.doctrees/python-api/generated/triton.language.randint.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.randint4x.doctree b/master/.doctrees/python-api/generated/triton.language.randint4x.doctree index a426a3e98..f17634c26 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.randint4x.doctree and b/master/.doctrees/python-api/generated/triton.language.randint4x.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.randn.doctree b/master/.doctrees/python-api/generated/triton.language.randn.doctree index e9ec57d90..22ada4c6a 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.randn.doctree and b/master/.doctrees/python-api/generated/triton.language.randn.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.ravel.doctree b/master/.doctrees/python-api/generated/triton.language.ravel.doctree index f04d53110..0fa9c990d 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.ravel.doctree and b/master/.doctrees/python-api/generated/triton.language.ravel.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.reshape.doctree b/master/.doctrees/python-api/generated/triton.language.reshape.doctree index 2ac7e9881..6ea2cb361 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.reshape.doctree and b/master/.doctrees/python-api/generated/triton.language.reshape.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.sigmoid.doctree b/master/.doctrees/python-api/generated/triton.language.sigmoid.doctree index 25c12879b..69a26848d 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.sigmoid.doctree and b/master/.doctrees/python-api/generated/triton.language.sigmoid.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.sin.doctree b/master/.doctrees/python-api/generated/triton.language.sin.doctree index 16577e739..1292fca49 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.sin.doctree and b/master/.doctrees/python-api/generated/triton.language.sin.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.softmax.doctree b/master/.doctrees/python-api/generated/triton.language.softmax.doctree index 91e2a7c7b..39e0d1011 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.softmax.doctree and b/master/.doctrees/python-api/generated/triton.language.softmax.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.sqrt.doctree b/master/.doctrees/python-api/generated/triton.language.sqrt.doctree index 3080d5a35..ab417cfdb 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.sqrt.doctree and b/master/.doctrees/python-api/generated/triton.language.sqrt.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.store.doctree b/master/.doctrees/python-api/generated/triton.language.store.doctree index 2f40a1d5e..de7148cda 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.store.doctree and b/master/.doctrees/python-api/generated/triton.language.store.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.sum.doctree b/master/.doctrees/python-api/generated/triton.language.sum.doctree index 2483e6693..1bc37863b 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.sum.doctree and b/master/.doctrees/python-api/generated/triton.language.sum.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.where.doctree b/master/.doctrees/python-api/generated/triton.language.where.doctree index 5cb220d40..ce145b107 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.where.doctree and b/master/.doctrees/python-api/generated/triton.language.where.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.language.zeros.doctree b/master/.doctrees/python-api/generated/triton.language.zeros.doctree index 88052bcfb..daab9dee7 100644 Binary files a/master/.doctrees/python-api/generated/triton.language.zeros.doctree and b/master/.doctrees/python-api/generated/triton.language.zeros.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.testing.Benchmark.doctree b/master/.doctrees/python-api/generated/triton.testing.Benchmark.doctree index 13891e60a..a74456d9c 100644 Binary files a/master/.doctrees/python-api/generated/triton.testing.Benchmark.doctree and b/master/.doctrees/python-api/generated/triton.testing.Benchmark.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.testing.do_bench.doctree b/master/.doctrees/python-api/generated/triton.testing.do_bench.doctree index d716859d6..d036fa5ec 100644 Binary files a/master/.doctrees/python-api/generated/triton.testing.do_bench.doctree and b/master/.doctrees/python-api/generated/triton.testing.do_bench.doctree differ diff --git a/master/.doctrees/python-api/generated/triton.testing.perf_report.doctree b/master/.doctrees/python-api/generated/triton.testing.perf_report.doctree index e2e4c1c16..d9c363aff 100644 Binary files a/master/.doctrees/python-api/generated/triton.testing.perf_report.doctree and b/master/.doctrees/python-api/generated/triton.testing.perf_report.doctree differ diff --git a/master/.doctrees/python-api/triton.doctree b/master/.doctrees/python-api/triton.doctree index 91db005fa..fe49e7535 100644 Binary files a/master/.doctrees/python-api/triton.doctree and b/master/.doctrees/python-api/triton.doctree differ diff --git a/master/.doctrees/python-api/triton.language.doctree b/master/.doctrees/python-api/triton.language.doctree index 86c5d716a..0cbb6964a 100644 Binary files a/master/.doctrees/python-api/triton.language.doctree and b/master/.doctrees/python-api/triton.language.doctree differ diff --git a/master/.doctrees/python-api/triton.testing.doctree b/master/.doctrees/python-api/triton.testing.doctree index dbce4788b..5e2aa2eff 100644 Binary files a/master/.doctrees/python-api/triton.testing.doctree and b/master/.doctrees/python-api/triton.testing.doctree differ diff --git a/master/_downloads/662999063954282841dc90b8945f85ce/tutorials_jupyter.zip b/master/_downloads/662999063954282841dc90b8945f85ce/tutorials_jupyter.zip index a346df3ad..e7757d0fe 100644 Binary files a/master/_downloads/662999063954282841dc90b8945f85ce/tutorials_jupyter.zip and b/master/_downloads/662999063954282841dc90b8945f85ce/tutorials_jupyter.zip differ diff --git a/master/_downloads/763344228ae6bc253ed1a6cf586aa30d/tutorials_python.zip b/master/_downloads/763344228ae6bc253ed1a6cf586aa30d/tutorials_python.zip index d1afc471c..6e5d37dc8 100644 Binary files a/master/_downloads/763344228ae6bc253ed1a6cf586aa30d/tutorials_python.zip and b/master/_downloads/763344228ae6bc253ed1a6cf586aa30d/tutorials_python.zip differ diff --git a/master/_downloads/b51b68bc1c6b1a5e509f67800b6235af/03-matrix-multiplication.ipynb b/master/_downloads/b51b68bc1c6b1a5e509f67800b6235af/03-matrix-multiplication.ipynb index 6c72f736b..b2469d437 100644 --- a/master/_downloads/b51b68bc1c6b1a5e509f67800b6235af/03-matrix-multiplication.ipynb +++ b/master/_downloads/b51b68bc1c6b1a5e509f67800b6235af/03-matrix-multiplication.ipynb @@ -47,7 +47,7 @@ }, "outputs": [], "source": [ - "import torch\n\nimport triton\nimport triton.language as tl\n\n# %\n# :code:`triton.jit`'ed functions can be auto-tuned by using the `triton.autotune`\n# decorator, which consumes:\n# - A list of :code:`triton.Config` objects that define different configurations of\n# meta-parameters (e.g., BLOCK_SIZE_M) and compilation options (e.g., num_warps) to try\n# - An autotuning *key* whose change in values will trigger evaluation of all the\n# provided configs\n\n\n@triton.autotune(\n configs=[\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n ],\n key=['M', 'N', 'K'],\n)\n@triton.jit\ndef matmul_kernel(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension. E.g. stride_am is how much to increase a_ptr\n # by to get the element one row down (A has M rows)\n stride_am, stride_ak,\n stride_bk, stride_bn,\n stride_cm, stride_cn,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE_M: tl.constexpr,\n ACTIVATION: tl.constexpr,\n):\n \"\"\"Kernel for computing the matmul C = A x B.\n A has shape (M, K), B has shape (K, N) and C has shape (M, N)\n \"\"\"\n # -----------------------------------------------------------\n # Map program ids `pid` to the block of C it should compute.\n # This is done in a grouped ordering to promote L2 data reuse\n # See above `L2 Cache Optimizations` section for details\n pid = tl.program_id(axis=0)\n num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)\n num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)\n num_pid_in_group = GROUP_SIZE_M * num_pid_n\n group_id = pid // num_pid_in_group\n first_pid_m = group_id * GROUP_SIZE_M\n group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)\n pid_m = first_pid_m + (pid % group_size_m)\n pid_n = (pid % num_pid_in_group) // group_size_m\n\n # ----------------------------------------------------------\n # Create pointers for the first blocks of A and B.\n # We will advance this pointer as we move in the K direction\n # and accumulate\n # a_ptrs is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers\n # b_ptrs is a block of [BLOCK_SIZE_K, BLOCK_SIZE_n] pointers\n # see above `Pointer Arithmetics` section for details\n offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n offs_k = tl.arange(0, BLOCK_SIZE_K)\n a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)\n b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)\n\n # -----------------------------------------------------------\n # Iterate to compute a block of the C matrix\n # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block\n # of fp32 values for higher accuracy.\n # `accumulator` will be converted back to fp16 after the loop\n accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)\n for k in range(0, K, BLOCK_SIZE_K):\n # Note that for simplicity, we don't apply a mask here.\n # This means that if K is not a multiple of BLOCK_SIZE_K,\n # this will access out-of-bounds memory and produce an\n # error or (worse!) incorrect results.\n a = tl.load(a_ptrs)\n b = tl.load(b_ptrs)\n # We accumulate along the K dimension\n accumulator += tl.dot(a, b)\n # Advance the ptrs to the next K block\n a_ptrs += BLOCK_SIZE_K * stride_ak\n b_ptrs += BLOCK_SIZE_K * stride_bk\n # you can fuse arbitrary activation functions here\n # while the accumulator is still in FP32!\n if ACTIVATION:\n accumulator = ACTIVATION(accumulator)\n c = accumulator.to(tl.float16)\n\n # -----------------------------------------------------------\n # Write back the block of the output matrix C\n offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]\n c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)\n tl.store(c_ptrs, c, mask=c_mask)\n\n\n# we can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`\n@triton.jit\ndef leaky_relu(x):\n x = x + 1\n return tl.where(x >= 0, x, 0.01 * x)" + "import torch\n\nimport triton\nimport triton.language as tl\n\n# %\n# :code:`triton.jit`'ed functions can be auto-tuned by using the `triton.autotune`\n# decorator, which consumes:\n# - A list of :code:`triton.Config` objects that define different configurations of\n# meta-parameters (e.g., BLOCK_SIZE_M) and compilation options (e.g., num_warps) to try\n# - An autotuning *key* whose change in values will trigger evaluation of all the\n# provided configs\n\n\n@triton.autotune(\n configs=[\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),\n triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),\n triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),\n ],\n key=['M', 'N', 'K'],\n)\n@triton.jit\ndef matmul_kernel(\n # Pointers to matrices\n a_ptr, b_ptr, c_ptr,\n # Matrix dimensions\n M, N, K,\n # The stride variables represent how much to increase the ptr by when moving by 1\n # element in a particular dimension. E.g. stride_am is how much to increase a_ptr\n # by to get the element one row down (A has M rows)\n stride_am, stride_ak,\n stride_bk, stride_bn,\n stride_cm, stride_cn,\n # Meta-parameters\n BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,\n GROUP_SIZE_M: tl.constexpr,\n ACTIVATION: tl.constexpr,\n):\n \"\"\"Kernel for computing the matmul C = A x B.\n A has shape (M, K), B has shape (K, N) and C has shape (M, N)\n \"\"\"\n # -----------------------------------------------------------\n # Map program ids `pid` to the block of C it should compute.\n # This is done in a grouped ordering to promote L2 data reuse\n # See above `L2 Cache Optimizations` section for details\n pid = tl.program_id(axis=0)\n num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)\n num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)\n num_pid_in_group = GROUP_SIZE_M * num_pid_n\n group_id = pid // num_pid_in_group\n first_pid_m = group_id * GROUP_SIZE_M\n group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)\n pid_m = first_pid_m + (pid % group_size_m)\n pid_n = (pid % num_pid_in_group) // group_size_m\n\n # ----------------------------------------------------------\n # Create pointers for the first blocks of A and B.\n # We will advance this pointer as we move in the K direction\n # and accumulate\n # a_ptrs is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers\n # b_ptrs is a block of [BLOCK_SIZE_K, BLOCK_SIZE_n] pointers\n # see above `Pointer Arithmetics` section for details\n offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n offs_k = tl.arange(0, BLOCK_SIZE_K)\n a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)\n b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)\n\n # -----------------------------------------------------------\n # Iterate to compute a block of the C matrix\n # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block\n # of fp32 values for higher accuracy.\n # `accumulator` will be converted back to fp16 after the loop\n accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)\n for k in range(0, K, BLOCK_SIZE_K):\n # Note that for simplicity, we don't apply a mask here.\n # This means that if K is not a multiple of BLOCK_SIZE_K,\n # this will access out-of-bounds memory and produce an\n # error or (worse!) incorrect results.\n a = tl.load(a_ptrs)\n b = tl.load(b_ptrs)\n # We accumulate along the K dimension\n accumulator += tl.dot(a, b)\n # Advance the ptrs to the next K block\n a_ptrs += BLOCK_SIZE_K * stride_ak\n b_ptrs += BLOCK_SIZE_K * stride_bk\n # you can fuse arbitrary activation functions here\n # while the accumulator is still in FP32!\n if ACTIVATION == \"leaky_relu\":\n accumulator = leaky_relu(accumulator)\n c = accumulator.to(tl.float16)\n\n # -----------------------------------------------------------\n # Write back the block of the output matrix C\n offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)\n offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)\n c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]\n c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)\n tl.store(c_ptrs, c, mask=c_mask)\n\n\n# we can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`\n@triton.jit\ndef leaky_relu(x):\n x = x + 1\n return tl.where(x >= 0, x, 0.01 * x)" ] }, { @@ -65,7 +65,7 @@ }, "outputs": [], "source": [ - "def matmul(a, b, activation=None):\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n assert a.is_contiguous(), \"matrix A must be contiguous\"\n assert b.is_contiguous(), \"matrix B must be contiguous\"\n M, K = a.shape\n K, N = b.shape\n assert (\n K % 32 == 0\n ), \"We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_SIZE_K\"\n # allocates output\n c = torch.empty((M, N), device=a.device, dtype=a.dtype)\n # 1D launch kernel where each block gets its own program.\n grid = lambda META: (\n triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n matmul_kernel[grid](\n a, b, c,\n M, N, K,\n a.stride(0), a.stride(1),\n b.stride(0), b.stride(1),\n c.stride(0), c.stride(1),\n ACTIVATION=activation,\n )\n return c" + "def matmul(a, b, activation=\"\"):\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n assert a.is_contiguous(), \"matrix A must be contiguous\"\n assert b.is_contiguous(), \"matrix B must be contiguous\"\n M, K = a.shape\n K, N = b.shape\n assert (\n K % 32 == 0\n ), \"We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_SIZE_K\"\n # allocates output\n c = torch.empty((M, N), device=a.device, dtype=a.dtype)\n # 1D launch kernel where each block gets its own program.\n grid = lambda META: (\n triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),\n )\n matmul_kernel[grid](\n a, b, c,\n M, N, K,\n a.stride(0), a.stride(1),\n b.stride(0), b.stride(1),\n c.stride(0), c.stride(1),\n ACTIVATION=activation,\n )\n return c" ] }, { @@ -101,7 +101,7 @@ }, "outputs": [], "source": [ - "@triton.testing.perf_report(\n triton.testing.Benchmark(\n x_names=['M', 'N', 'K'], # argument names to use as an x-axis for the plot\n x_vals=[\n 128 * i for i in range(2, 33)\n ], # different possible values for `x_name`\n line_arg='provider', # argument name whose value corresponds to a different line in the plot\n # possible values for `line_arg``\n line_vals=['cublas', 'cublas + relu', 'triton', 'triton + relu'],\n # label name for the lines\n line_names=[\"cuBLAS\", \"cuBLAS (+ torch.nn.LeakyReLU)\", \"Triton\", \"Triton (+ LeakyReLU)\"],\n # line styles\n styles=[('green', '-'), ('green', '--'), ('blue', '-'), ('blue', '--')],\n ylabel=\"TFLOPS\", # label name for the y-axis\n plot_name=\"matmul-performance\", # name for the plot. Used also as a file name for saving the plot.\n args={},\n )\n)\ndef benchmark(M, N, K, provider):\n a = torch.randn((M, K), device='cuda', dtype=torch.float16)\n b = torch.randn((K, N), device='cuda', dtype=torch.float16)\n if provider == 'cublas':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b))\n if provider == 'triton':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b))\n if provider == 'cublas + relu':\n torch_relu = torch.nn.ReLU(inplace=True)\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: torch_relu(torch.matmul(a, b))\n )\n if provider == 'triton + relu':\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: matmul(a, b, activation=leaky_relu)\n )\n perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)\n return perf(ms), perf(max_ms), perf(min_ms)\n\n\nbenchmark.run(show_plots=True, print_data=True)" + "@triton.testing.perf_report(\n triton.testing.Benchmark(\n x_names=['M', 'N', 'K'], # argument names to use as an x-axis for the plot\n x_vals=[\n 128 * i for i in range(2, 33)\n ], # different possible values for `x_name`\n line_arg='provider', # argument name whose value corresponds to a different line in the plot\n # possible values for `line_arg``\n line_vals=['cublas', 'cublas + relu', 'triton', 'triton + relu'],\n # label name for the lines\n line_names=[\"cuBLAS\", \"cuBLAS (+ torch.nn.LeakyReLU)\", \"Triton\", \"Triton (+ LeakyReLU)\"],\n # line styles\n styles=[('green', '-'), ('green', '--'), ('blue', '-'), ('blue', '--')],\n ylabel=\"TFLOPS\", # label name for the y-axis\n plot_name=\"matmul-performance\", # name for the plot. Used also as a file name for saving the plot.\n args={},\n )\n)\ndef benchmark(M, N, K, provider):\n a = torch.randn((M, K), device='cuda', dtype=torch.float16)\n b = torch.randn((K, N), device='cuda', dtype=torch.float16)\n if provider == 'cublas':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b))\n if provider == 'triton':\n ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b))\n if provider == 'cublas + relu':\n torch_relu = torch.nn.ReLU(inplace=True)\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: torch_relu(torch.matmul(a, b))\n )\n if provider == 'triton + relu':\n ms, min_ms, max_ms = triton.testing.do_bench(\n lambda: matmul(a, b, activation=\"leaky_relu\")\n )\n perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)\n return perf(ms), perf(max_ms), perf(min_ms)\n\n\nbenchmark.run(show_plots=True, print_data=True)" ] } ], diff --git a/master/_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py b/master/_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py index 39bf8c46a..231d3371c 100644 --- a/master/_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py +++ b/master/_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py @@ -236,8 +236,8 @@ def matmul_kernel( b_ptrs += BLOCK_SIZE_K * stride_bk # you can fuse arbitrary activation functions here # while the accumulator is still in FP32! - if ACTIVATION: - accumulator = ACTIVATION(accumulator) + if ACTIVATION == "leaky_relu": + accumulator = leaky_relu(accumulator) c = accumulator.to(tl.float16) # ----------------------------------------------------------- @@ -261,7 +261,7 @@ def leaky_relu(x): # and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel -def matmul(a, b, activation=None): +def matmul(a, b, activation=""): # checks constraints assert a.shape[1] == b.shape[0], "incompatible dimensions" assert a.is_contiguous(), "matrix A must be contiguous" @@ -347,7 +347,7 @@ def benchmark(M, N, K, provider): ) if provider == 'triton + relu': ms, min_ms, max_ms = triton.testing.do_bench( - lambda: matmul(a, b, activation=leaky_relu) + lambda: matmul(a, b, activation="leaky_relu") ) perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3) return perf(ms), perf(max_ms), perf(min_ms) diff --git a/master/_images/sphx_glr_01-vector-add_001.png b/master/_images/sphx_glr_01-vector-add_001.png index 5c07bcd9f..4d388e6d8 100644 Binary files a/master/_images/sphx_glr_01-vector-add_001.png and b/master/_images/sphx_glr_01-vector-add_001.png differ diff --git a/master/_images/sphx_glr_01-vector-add_thumb.png b/master/_images/sphx_glr_01-vector-add_thumb.png index 7867c1015..e04f8920e 100644 Binary files a/master/_images/sphx_glr_01-vector-add_thumb.png and b/master/_images/sphx_glr_01-vector-add_thumb.png differ diff --git a/master/_images/sphx_glr_02-fused-softmax_001.png b/master/_images/sphx_glr_02-fused-softmax_001.png index b43cc811a..030a0d4e3 100644 Binary files a/master/_images/sphx_glr_02-fused-softmax_001.png and b/master/_images/sphx_glr_02-fused-softmax_001.png differ diff --git a/master/_images/sphx_glr_02-fused-softmax_thumb.png b/master/_images/sphx_glr_02-fused-softmax_thumb.png index 26b4ad159..2c62c2639 100644 Binary files a/master/_images/sphx_glr_02-fused-softmax_thumb.png and b/master/_images/sphx_glr_02-fused-softmax_thumb.png differ diff --git a/master/_images/sphx_glr_03-matrix-multiplication_001.png b/master/_images/sphx_glr_03-matrix-multiplication_001.png index f31025b9a..260021c0a 100644 Binary files a/master/_images/sphx_glr_03-matrix-multiplication_001.png and b/master/_images/sphx_glr_03-matrix-multiplication_001.png differ diff --git a/master/_images/sphx_glr_03-matrix-multiplication_thumb.png b/master/_images/sphx_glr_03-matrix-multiplication_thumb.png index 526e99f62..b636f0ad5 100644 Binary files a/master/_images/sphx_glr_03-matrix-multiplication_thumb.png and b/master/_images/sphx_glr_03-matrix-multiplication_thumb.png differ diff --git a/master/_images/sphx_glr_05-layer-norm_001.png b/master/_images/sphx_glr_05-layer-norm_001.png index 1b41f3f5a..bffbb7c04 100644 Binary files a/master/_images/sphx_glr_05-layer-norm_001.png and b/master/_images/sphx_glr_05-layer-norm_001.png differ diff --git a/master/_images/sphx_glr_05-layer-norm_thumb.png b/master/_images/sphx_glr_05-layer-norm_thumb.png index aa1f521bf..14402fa86 100644 Binary files a/master/_images/sphx_glr_05-layer-norm_thumb.png and b/master/_images/sphx_glr_05-layer-norm_thumb.png differ diff --git a/master/_sources/getting-started/tutorials/01-vector-add.rst.txt b/master/_sources/getting-started/tutorials/01-vector-add.rst.txt index 7ec8ec44a..bfd0cb3a3 100644 --- a/master/_sources/getting-started/tutorials/01-vector-add.rst.txt +++ b/master/_sources/getting-started/tutorials/01-vector-add.rst.txt @@ -234,7 +234,7 @@ We can now run the decorated function above. Pass `print_data=True` to see the p size Triton Torch 0 4096.0 9.600000 9.600000 1 8192.0 19.200000 19.200000 - 2 16384.0 38.400001 38.400001 + 2 16384.0 31.999999 31.999999 3 32768.0 76.800002 76.800002 4 65536.0 127.999995 127.999995 5 131072.0 219.428568 219.428568 @@ -255,7 +255,7 @@ We can now run the decorated function above. Pass `print_data=True` to see the p .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 1 minutes 42.248 seconds) + **Total running time of the script:** ( 1 minutes 38.087 seconds) .. _sphx_glr_download_getting-started_tutorials_01-vector-add.py: diff --git a/master/_sources/getting-started/tutorials/02-fused-softmax.rst.txt b/master/_sources/getting-started/tutorials/02-fused-softmax.rst.txt index 02a395730..2b764d1ee 100644 --- a/master/_sources/getting-started/tutorials/02-fused-softmax.rst.txt +++ b/master/_sources/getting-started/tutorials/02-fused-softmax.rst.txt @@ -278,17 +278,17 @@ We will then compare its performance against (1) :code:`torch.softmax` and (2) t softmax-performance: N Triton Torch (native) Torch (jit) - 0 256.0 512.000001 512.000001 186.181817 - 1 384.0 614.400016 585.142862 153.600004 + 0 256.0 512.000001 512.000001 190.511628 + 1 384.0 614.400016 558.545450 153.600004 2 512.0 655.360017 585.142849 154.566038 - 3 640.0 682.666684 640.000002 158.759699 + 3 640.0 706.206879 640.000002 160.000000 4 768.0 722.823517 664.216187 162.754967 .. ... ... ... ... - 93 12160.0 812.359066 406.179533 198.834951 - 94 12288.0 812.429770 415.222812 199.197579 - 95 12416.0 810.840807 412.149375 198.854847 - 96 12544.0 810.925276 412.546756 199.111113 - 97 12672.0 811.007961 412.097543 199.167004 + 93 12160.0 812.359066 406.179533 199.038365 + 94 12288.0 812.429770 415.222812 199.298541 + 95 12416.0 812.498981 412.149375 198.954424 + 96 12544.0 810.925276 412.546756 199.209928 + 97 12672.0 811.007961 412.097543 199.264875 [98 rows x 4 columns] @@ -306,7 +306,7 @@ In the above plot, we can see that: .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 3 minutes 25.192 seconds) + **Total running time of the script:** ( 3 minutes 26.158 seconds) .. _sphx_glr_download_getting-started_tutorials_02-fused-softmax.py: diff --git a/master/_sources/getting-started/tutorials/03-matrix-multiplication.rst.txt b/master/_sources/getting-started/tutorials/03-matrix-multiplication.rst.txt index 96286151a..4ca243d7c 100644 --- a/master/_sources/getting-started/tutorials/03-matrix-multiplication.rst.txt +++ b/master/_sources/getting-started/tutorials/03-matrix-multiplication.rst.txt @@ -262,8 +262,8 @@ Final Result b_ptrs += BLOCK_SIZE_K * stride_bk # you can fuse arbitrary activation functions here # while the accumulator is still in FP32! - if ACTIVATION: - accumulator = ACTIVATION(accumulator) + if ACTIVATION == "leaky_relu": + accumulator = leaky_relu(accumulator) c = accumulator.to(tl.float16) # ----------------------------------------------------------- @@ -300,7 +300,7 @@ and (1) checks any shape constraint; (2) allocates the output; (3) launches the - def matmul(a, b, activation=None): + def matmul(a, b, activation=""): # checks constraints assert a.shape[1] == b.shape[0], "incompatible dimensions" assert a.is_contiguous(), "matrix A must be contiguous" @@ -436,7 +436,7 @@ We can now compare the performance of our kernel against that of cuBLAS. Here we ) if provider == 'triton + relu': ms, min_ms, max_ms = triton.testing.do_bench( - lambda: matmul(a, b, activation=leaky_relu) + lambda: matmul(a, b, activation="leaky_relu") ) perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3) return perf(ms), perf(max_ms), perf(min_ms) @@ -459,37 +459,37 @@ We can now compare the performance of our kernel against that of cuBLAS. Here we matmul-performance: M cuBLAS ... Triton Triton (+ LeakyReLU) - 0 256.0 2.730667 ... 2.978909 2.978909 - 1 384.0 7.372800 ... 7.899428 7.899428 + 0 256.0 2.978909 ... 2.978909 2.978909 + 1 384.0 7.372800 ... 8.507077 8.507077 2 512.0 14.563555 ... 15.420235 15.420235 3 640.0 22.260869 ... 24.380953 24.380953 4 768.0 32.768000 ... 34.028308 34.028308 5 896.0 39.025776 ... 40.140799 39.025776 6 1024.0 49.932191 ... 53.773130 52.428801 - 7 1152.0 45.242181 ... 47.396572 47.396572 + 7 1152.0 45.242181 ... 48.161033 47.396572 8 1280.0 51.200001 ... 57.690139 57.690139 9 1408.0 64.138541 ... 68.147202 67.305878 - 10 1536.0 80.430545 ... 80.430545 79.526831 + 10 1536.0 80.430545 ... 81.355034 79.526831 11 1664.0 63.372618 ... 63.372618 62.492442 12 1792.0 72.983276 ... 73.460287 59.467852 - 13 1920.0 68.776119 ... 71.626943 71.257735 - 14 2048.0 73.908442 ... 78.398206 77.314362 - 15 2176.0 83.500614 ... 87.494120 85.998493 - 16 2304.0 68.446623 ... 78.064941 77.307030 - 17 2432.0 71.125224 ... 86.179335 85.653855 - 18 2560.0 77.833728 ... 82.331658 81.108913 - 19 2688.0 83.737433 ... 91.185232 89.888756 - 20 2816.0 83.233216 ... 84.441840 84.197315 - 21 2944.0 81.564701 ... 83.758038 82.373605 - 22 3072.0 82.540970 ... 89.593522 88.335577 - 23 3200.0 83.989503 ... 95.096582 89.012517 - 24 3328.0 82.464255 ... 82.939284 84.596116 - 25 3456.0 81.932484 ... 90.994998 91.200871 - 26 3584.0 87.127323 ... 99.354022 92.600816 - 27 3712.0 84.159518 ... 89.353616 83.247783 - 28 3840.0 85.136259 ... 93.484358 86.738820 - 29 3968.0 92.302520 ... 87.976885 90.926929 - 30 4096.0 91.741443 ... 90.933416 91.304576 + 13 1920.0 68.776119 ... 71.257735 70.892307 + 14 2048.0 73.584279 ... 78.033565 76.959706 + 15 2176.0 83.155572 ... 87.494120 85.998493 + 16 2304.0 68.446623 ... 78.320893 77.558029 + 17 2432.0 71.305746 ... 86.711310 75.320281 + 18 2560.0 77.833728 ... 82.747477 81.715711 + 19 2688.0 83.552988 ... 90.532356 89.676257 + 20 2816.0 83.552120 ... 84.035084 83.392363 + 21 2944.0 81.832567 ... 83.758038 81.967162 + 22 3072.0 82.540970 ... 89.877939 89.170242 + 23 3200.0 84.321474 ... 96.822991 95.380032 + 24 3328.0 83.034941 ... 85.806075 84.596116 + 25 3456.0 82.183044 ... 91.928814 87.632137 + 26 3584.0 87.381330 ... 92.696281 96.891584 + 27 3712.0 84.694652 ... 87.244203 88.092894 + 28 3840.0 85.136259 ... 88.900318 90.279183 + 29 3968.0 88.008611 ... 92.547541 84.268854 + 30 4096.0 93.368854 ... 87.781379 86.592080 [31 rows x 5 columns] @@ -499,7 +499,7 @@ We can now compare the performance of our kernel against that of cuBLAS. Here we .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 6 minutes 44.471 seconds) + **Total running time of the script:** ( 6 minutes 21.318 seconds) .. _sphx_glr_download_getting-started_tutorials_03-matrix-multiplication.py: diff --git a/master/_sources/getting-started/tutorials/04-low-memory-dropout.rst.txt b/master/_sources/getting-started/tutorials/04-low-memory-dropout.rst.txt index f8c09a8d5..edbf188ee 100644 --- a/master/_sources/getting-started/tutorials/04-low-memory-dropout.rst.txt +++ b/master/_sources/getting-started/tutorials/04-low-memory-dropout.rst.txt @@ -240,7 +240,7 @@ References .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.275 seconds) + **Total running time of the script:** ( 0 minutes 0.279 seconds) .. _sphx_glr_download_getting-started_tutorials_04-low-memory-dropout.py: diff --git a/master/_sources/getting-started/tutorials/05-layer-norm.rst.txt b/master/_sources/getting-started/tutorials/05-layer-norm.rst.txt index 98a94cb66..b24ab5b49 100644 --- a/master/_sources/getting-started/tutorials/05-layer-norm.rst.txt +++ b/master/_sources/getting-started/tutorials/05-layer-norm.rst.txt @@ -42,31 +42,31 @@ Layer Normalization 1 1536.0 630.153868 323.368435 511.999982 2 2048.0 668.734716 334.367358 520.126988 3 2560.0 694.237267 362.477870 512.000013 - 4 3072.0 712.347810 378.092307 501.551037 - 5 3584.0 725.873439 384.859062 458.751978 - 6 4096.0 728.177767 381.023256 458.293714 - 7 4608.0 670.254540 396.387087 426.173427 - 8 5120.0 688.403381 397.669909 426.666652 - 9 5632.0 698.542675 395.228063 413.357796 - 10 6144.0 697.191505 402.885254 411.313806 - 11 6656.0 700.631610 400.360920 398.861429 - 12 7168.0 690.891575 396.844306 388.772874 - 13 7680.0 678.895043 392.587863 387.634072 + 4 3072.0 712.347810 375.206126 496.484863 + 5 3584.0 725.873439 384.859062 451.527536 + 6 4096.0 728.177767 381.023256 455.111095 + 7 4608.0 670.254540 394.267384 421.302872 + 8 5120.0 688.403381 397.669909 424.455959 + 9 5632.0 698.542675 395.228063 411.470331 + 10 6144.0 697.191505 402.885254 409.600010 + 11 6656.0 700.631610 398.861429 398.861429 + 12 7168.0 690.891575 396.844306 387.459443 + 13 7680.0 678.895043 393.846167 386.415087 14 8192.0 639.375598 393.609605 372.363633 15 8704.0 624.502255 389.005597 380.502740 16 9216.0 604.327881 407.337026 383.002605 - 17 9728.0 585.142883 409.599987 383.369452 - 18 10240.0 564.965524 408.578556 382.803739 + 17 9728.0 585.142883 409.599987 382.427505 + 18 10240.0 564.965524 408.578556 381.911416 19 10752.0 546.133312 411.559798 381.445676 - 20 11264.0 533.207081 406.826188 373.134567 + 20 11264.0 531.634232 406.826188 373.134567 21 11776.0 520.486200 409.599991 377.587162 - 22 12288.0 516.031509 413.911572 383.251457 + 22 12288.0 514.680630 413.911572 383.251457 23 12800.0 504.433489 410.420828 376.470582 24 13312.0 494.180982 405.699062 376.310952 - 25 13824.0 482.934503 411.888257 379.389355 - 26 14336.0 471.967074 406.695045 374.185964 + 25 13824.0 481.882350 411.122660 379.389355 + 26 14336.0 470.997935 406.695045 374.185964 27 14848.0 461.297068 408.192434 375.304904 - 28 15360.0 454.269882 406.214870 378.092307 + 28 15360.0 454.269882 406.214870 377.511515 29 15872.0 447.887117 406.974373 376.225175 @@ -393,7 +393,7 @@ Layer Normalization .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 5 minutes 28.983 seconds) + **Total running time of the script:** ( 5 minutes 33.112 seconds) .. _sphx_glr_download_getting-started_tutorials_05-layer-norm.py: diff --git a/master/_sources/getting-started/tutorials/07-libdevice-function.rst.txt b/master/_sources/getting-started/tutorials/07-libdevice-function.rst.txt index 490652a7f..3c939bd8b 100644 --- a/master/_sources/getting-started/tutorials/07-libdevice-function.rst.txt +++ b/master/_sources/getting-started/tutorials/07-libdevice-function.rst.txt @@ -152,7 +152,7 @@ We can also customize the libdevice library path by passing the path to the `lib .. rst-class:: sphx-glr-timing - **Total running time of the script:** ( 0 minutes 0.249 seconds) + **Total running time of the script:** ( 0 minutes 0.257 seconds) .. _sphx_glr_download_getting-started_tutorials_07-libdevice-function.py: diff --git a/master/_sources/getting-started/tutorials/sg_execution_times.rst.txt b/master/_sources/getting-started/tutorials/sg_execution_times.rst.txt index b99c083b0..a336bb554 100644 --- a/master/_sources/getting-started/tutorials/sg_execution_times.rst.txt +++ b/master/_sources/getting-started/tutorials/sg_execution_times.rst.txt @@ -5,20 +5,20 @@ Computation times ================= -**17:21.490** total execution time for **getting-started_tutorials** files: +**16:59.282** total execution time for **getting-started_tutorials** files: +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_getting-started_tutorials_03-matrix-multiplication.py` (``03-matrix-multiplication.py``) | 06:44.471 | 0.0 MB | +| :ref:`sphx_glr_getting-started_tutorials_03-matrix-multiplication.py` (``03-matrix-multiplication.py``) | 06:21.318 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_getting-started_tutorials_05-layer-norm.py` (``05-layer-norm.py``) | 05:28.983 | 0.0 MB | +| :ref:`sphx_glr_getting-started_tutorials_05-layer-norm.py` (``05-layer-norm.py``) | 05:33.112 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_getting-started_tutorials_02-fused-softmax.py` (``02-fused-softmax.py``) | 03:25.192 | 0.0 MB | +| :ref:`sphx_glr_getting-started_tutorials_02-fused-softmax.py` (``02-fused-softmax.py``) | 03:26.158 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_getting-started_tutorials_01-vector-add.py` (``01-vector-add.py``) | 01:42.248 | 0.0 MB | +| :ref:`sphx_glr_getting-started_tutorials_01-vector-add.py` (``01-vector-add.py``) | 01:38.087 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_getting-started_tutorials_04-low-memory-dropout.py` (``04-low-memory-dropout.py``) | 00:00.275 | 0.0 MB | +| :ref:`sphx_glr_getting-started_tutorials_04-low-memory-dropout.py` (``04-low-memory-dropout.py``) | 00:00.279 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_getting-started_tutorials_07-libdevice-function.py` (``07-libdevice-function.py``) | 00:00.249 | 0.0 MB | +| :ref:`sphx_glr_getting-started_tutorials_07-libdevice-function.py` (``07-libdevice-function.py``) | 00:00.257 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ | :ref:`sphx_glr_getting-started_tutorials_06-fused-attention.py` (``06-fused-attention.py``) | 00:00.072 | 0.0 MB | +---------------------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/master/getting-started/tutorials/01-vector-add.html b/master/getting-started/tutorials/01-vector-add.html index 492365ad9..80ae794e2 100644 --- a/master/getting-started/tutorials/01-vector-add.html +++ b/master/getting-started/tutorials/01-vector-add.html @@ -326,7 +326,7 @@ for different problem sizes.
size Triton Torch 0 4096.0 9.600000 9.600000 1 8192.0 19.200000 19.200000 -2 16384.0 38.400001 38.400001 +2 16384.0 31.999999 31.999999 3 32768.0 76.800002 76.800002 4 65536.0 127.999995 127.999995 5 131072.0 219.428568 219.428568 @@ -342,7 +342,7 @@ for different problem sizes. 15 134217728.0 849.737435 850.656574 -Total running time of the script: ( 1 minutes 42.248 seconds)
+Total running time of the script: ( 1 minutes 38.087 seconds)