[GH-PAGES] Updated documentation

This commit is contained in:
Philippe Tillet
2021-03-06 17:29:14 -05:00
parent 449387489c
commit 9d95317e80
10 changed files with 0 additions and 1060 deletions

View File

@@ -1,20 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = Triton
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@@ -1,160 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Triton documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 10 01:19:09 2020.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['nbsphinx', 'sphinx.ext.autosectionlabel']
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Triton'
copyright = '2020, Philippe Tillet'
author = 'Philippe Tillet'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tritondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Triton.tex', 'Triton Documentation', 'Philippe Tillet', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'triton', 'Triton Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc, 'Triton', 'Triton Documentation', author, 'Triton', 'One line description of project.',
'Miscellaneous'
),
]

View File

@@ -1,21 +0,0 @@
.. Triton documentation master file, created by
sphinx-quickstart on Mon Feb 10 01:01:37 2020.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Triton's documentation!
==================================
.. toctree::
:maxdepth: 1
:caption: Installation Instructions
installation/packaged-binaries
installation/from-source
.. toctree::
:maxdepth: 1
:caption: Tutorials
Vector Addition <tutorials/01-vector-add.ipynb>
Fused Softmax <tutorials/02-fused-softmax.ipynb>

View File

@@ -1,52 +0,0 @@
==============
From Source
==============
+++++++++++++++
Python Package
+++++++++++++++
You can install the Python package from source by running the following commands:
.. code-block:: bash
sudo apt-get install llvm-10-dev
git clone https://github.com/ptillet/triton.git;
cd triton/python;
pip install -e .
You can then test your installation by running the unit tests:
.. code-block:: bash
pytest -vs .
and the benchmarks
.. code-block:: bash
cd bench/
python -m run --with-plots --result-dir /tmp/triton-bench
+++++++++++++++
C++ Package
+++++++++++++++
Those not interested in Python integration may want to use the internals of Triton (i.e, runtime, parser, codegen, driver, intermediate representation) directly. This can be done by running the following commands:
.. code-block:: bash
sudo apt-get install llvm-10-dev
git clone https://github.com/ptillet/triton.git;
mkdir build;
cd build;
cmake ../;
make -j8;
A custom llvm-config binary can also be provided:
.. code-block:: bash
cmake ../ -DLLVM_CONFIG=/path/to/llvm-config
Note that while direct usage of the C++ API is not officially supported, a usage tutorial can be found `here <https://github.com/ptillet/triton/blob/master/tutorials/01-matmul.cc>`_

View File

@@ -1,8 +0,0 @@
Packaged Binaries
=================
Triton can be installed directly from pip with the following command
.. code-block:: python
pip install triton

View File

@@ -1 +0,0 @@
../../python/tutorials/01-vector-add.ipynb

View File

@@ -1 +0,0 @@
../../python/tutorials/02-fused-softmax.ipynb

View File

@@ -1,329 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "acute-possession",
"metadata": {},
"source": [
"# Vector Addition"
]
},
{
"cell_type": "markdown",
"id": "median-malaysia",
"metadata": {},
"source": [
"In this tutorial, we will see how to construct a simple, high-performance vector addition using Triton. You will learn:\n",
"* The basic syntax of the Triton programming language\n",
"* The best practices for creating PyTorch custom operators using the `triton.kernel` Python API\n",
"* The best practices for validating and benchmarking custom ops against native reference implementations"
]
},
{
"cell_type": "markdown",
"id": "identical-conditions",
"metadata": {},
"source": [
"## Writing the Compute Kernel"
]
},
{
"cell_type": "markdown",
"id": "collectible-belle",
"metadata": {},
"source": [
"Each compute kernel is declared using the `__global__` attribute, and executed many times in parallel on different chunks of data (See the [Single Program, Multiple Data](https://en.wikipedia.org/wiki/SPMD) programming model for more details).\n",
"\n",
"\n",
"```c\n",
"__global__ void add(float* z, float* x, float* y, int N){\n",
" // The `get_program_id(i)` returns the i-th coordinate\n",
" // of the program in the overaching SPMD context\n",
" // (a.k.a launch grid). This is what allows us to process\n",
" // different chunks of data in parallel.\n",
" // For those similar with CUDA, `get_program_id({0,1,2})`\n",
" // is similar to blockIdx.{x,y,z}\n",
" int pid = get_program_id(0);\n",
" // In Triton, arrays are first-class citizen. In other words,\n",
" // they are primitives data-types and are -- contrary to C and\n",
" // CUDA -- not implemented as pointers to contiguous chunks of\n",
" // memory.\n",
" // In the few lines below, we create an array of `BLOCK` pointers\n",
" // whose memory values are, e.g.:\n",
" // [z + pid*BLOCK + 0, z + pid*BLOCK + 1, ..., z + pid*BLOCK + BLOCK - 1]\n",
" // Note: here BLOCK is expected to be a pre-processor macro defined at compile-time\n",
" int offset[BLOCK] = pid * BLOCK + 0 ... BLOCK;\n",
" float* pz [BLOCK] = z + offset;\n",
" float* px [BLOCK] = x + offset;\n",
" float* py [BLOCK] = y + offset;\n",
" // Simple element-wise control-flow for load/store operations can\n",
" // be achieved using the the ternary operator `cond ? val_true : val_false`\n",
" // or the conditional dereferencing operator `*?(cond)ptr\n",
" // Here, we make sure that we do not access memory out-of-bounds when we\n",
" // write-back `z`\n",
" bool check[BLOCK] = offset < N;\n",
" *?(check)pz = *?(check)px + *?(check)py;\n",
"}\n",
"```\n",
"\n",
"The existence of arrays as a primitive data-type for Triton comes with a number of advantages that are highlighted in the [MAPL'2019 Triton paper](http://www.eecs.harvard.edu/~htk/publication/2019-mapl-tillet-kung-cox.pdf)."
]
},
{
"cell_type": "markdown",
"id": "forbidden-wednesday",
"metadata": {},
"source": [
"## Writing the Torch bindings"
]
},
{
"cell_type": "markdown",
"id": "numerical-agency",
"metadata": {},
"source": [
"The only thing that matters when it comes to Triton and Torch is the `triton.kernel` class. This allows you to transform the above C-like function into a callable python object that can be used to modify `torch.tensor` objects.\n",
"\n",
"To create a `triton.kernel`, you only need three things:\n",
"* `source: string`: the source-code of the kernel you want to create\n",
"* `device: torch.device`: the device you want to compile this code for\n",
"* `defines: dict`: the set of macros that you want the pre-processor to `#define` for you\n",
"\n",
"Note: The constructor of `triton.kernel` does some just-in-time compilation, so expect some overhead there. For this reason, I personally like to initialize kernels lazily in a cache (see `_kernels` variable below). This also makes it possible to choose the compilation device dynamically based on the type of the operator's inputs."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "sporting-keyboard",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import triton\n",
"\n",
"# source-code for Triton compute kernel\n",
"# here we just copy-paste the above code without the extensive comments.\n",
"# you may prefer to store it in a .c file and load it from there instead.\n",
"_src = \"\"\"\n",
"__global__ void add(float* z, float* x, float* y, int N){\n",
" // program id\n",
" int pid = get_program_id(0);\n",
" // create arrays of pointers\n",
" int offset[BLOCK] = pid * BLOCK + 0 ... BLOCK;\n",
" float* pz[BLOCK] = z + offset;\n",
" float* px[BLOCK] = x + offset;\n",
" float* py[BLOCK] = y + offset;\n",
" // bounds checking\n",
" bool check[BLOCK] = offset < N;\n",
" // write-back\n",
" *?(check)pz = *?(check)px + *?(check)py;\n",
"}\n",
" \"\"\"\n",
"# This function returns a callable `triton.kernel` object\n",
"# created from the above source code.\n",
"# For portability, we maintain a cache of kernels for different `torch.device`\n",
"# We compile the kernel with -DBLOCK=1024\n",
"_kernels = dict()\n",
"def make_add_kernel(device):\n",
" if device not in _kernels:\n",
" defines = {'BLOCK': 1024}\n",
" _kernels[device] = triton.kernel(_src, device=device, defines=defines)\n",
" return _kernels[device]\n",
"\n",
"# This is a standard torch custom autograd Function\n",
"# The only difference is that we can now use the above kernel\n",
"# in the `forward` and `backward` functions.`\n",
"class _add(torch.autograd.Function):\n",
" \n",
" @staticmethod\n",
" def forward(ctx, x, y):\n",
" # constraints of the op\n",
" assert x.dtype == torch.float32\n",
" # *allocate output*\n",
" z = torch.empty_like(x)\n",
" # *create launch grid*:\n",
" # this is a function which takes compilation parameters `opt`\n",
" # as input and returns a tuple of int (i.e., launch grid) for the kernel.\n",
" # triton.cdiv is a shortcut for ceil division:\n",
" # triton.cdiv(a, b) = (a + b - 1) // b\n",
" N = z.shape[0]\n",
" grid = lambda opt: (triton.cdiv(N, opt.BLOCK), )\n",
" # *launch kernel*:\n",
" # pointer to the data of torch tensors can be retrieved with\n",
" # the `.data_ptr()` method\n",
" kernel = make_add_kernel(z.device)\n",
" kernel(z.data_ptr(), x.data_ptr(), y.data_ptr(), N, grid = grid)\n",
" return z\n",
"# Just like we standard PyTorch ops\n",
"# We use the `.apply` method to create a \n",
"# callable object for our function\n",
"add = _add.apply"
]
},
{
"cell_type": "markdown",
"id": "separated-polyester",
"metadata": {},
"source": [
"At this point `add(x, y)` is equivalent to `x + y` for contiguous tensors. Now let's test and benchmark it!"
]
},
{
"cell_type": "markdown",
"id": "exclusive-salvation",
"metadata": {},
"source": [
"## Writing a Unit Test"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "supported-ribbon",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([1.3713, 1.3076, 0.4940, ..., 0.6682, 1.1984, 1.2696], device='cuda:0')\n",
"tensor([1.3713, 1.3076, 0.4940, ..., 0.6682, 1.1984, 1.2696], device='cuda:0')\n",
"The maximum difference between torch and triton is 0.0\n"
]
}
],
"source": [
"torch.manual_seed(0)\n",
"x = torch.rand(98432, device='cuda')\n",
"y = torch.rand(98432, device='cuda')\n",
"za = x + y\n",
"zb = add(x, y)\n",
"print(za)\n",
"print(zb)\n",
"print(f'The maximum difference between torch and triton is '\n",
" f'{torch.max(torch.abs(za - zb))}')"
]
},
{
"cell_type": "markdown",
"id": "otherwise-canadian",
"metadata": {},
"source": [
"Seems to work!"
]
},
{
"cell_type": "markdown",
"id": "polished-australia",
"metadata": {},
"source": [
"## Writing a Benchmark"
]
},
{
"cell_type": "markdown",
"id": "historic-glass",
"metadata": {},
"source": [
"The performance of our GPU code can be benchmark using the `torch.cuda.Event(enable_timing=True)` wrapper. Below is a simple function that benchmarks `rep` runs of our kernels after `warmup` \"cold\" runs."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "strange-luxembourg",
"metadata": {},
"outputs": [],
"source": [
"# We now want to benchmark the performance of `add`\n",
"# Against that of PyTorch for increasing vector sizes\n",
"def do_bench(fn, warmup = 10, rep = 50):\n",
" start_event = torch.cuda.Event(enable_timing=True)\n",
" end_event = torch.cuda.Event(enable_timing=True)\n",
" ret = fn()\n",
" for i in range(warmup):\n",
" fn()\n",
" torch.cuda.synchronize()\n",
" start_event.record()\n",
" for i in range(rep):\n",
" fn()\n",
" end_event.record()\n",
" torch.cuda.synchronize()\n",
" time_ms = start_event.elapsed_time(end_event) / rep\n",
" return time_ms"
]
},
{
"cell_type": "markdown",
"id": "hairy-claim",
"metadata": {},
"source": [
"We can now benchmark our custom op for vectors of increasing sizes to get a sense of how it does"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "pleasant-valley",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"131072 0.020 0.003\n",
"262144 0.019 0.004\n",
"524288 0.016 0.016\n",
"1048576 0.033 0.033\n",
"2097152 0.071 0.070\n",
"4194304 0.142 0.144\n",
"8388608 0.287 0.286\n",
"16777216 0.572 0.568\n",
"33554432 1.139 1.110\n"
]
}
],
"source": [
"for N in [2**i for i in range(17, 26, 1)]:\n",
" x = torch.rand(N, device='cuda')\n",
" y = torch.rand(N, device='cuda')\n",
" triton_ms = do_bench(lambda: add(x, y))\n",
" torch_ms = do_bench(lambda: x + y)\n",
" # print the performance of triton and torch as well as the achieved bandwidth\n",
" print(f'{N} {triton_ms:.3f} {torch_ms:.3f}')"
]
},
{
"cell_type": "markdown",
"id": "juvenile-supplement",
"metadata": {},
"source": [
"Our op is on-par with Torch's vectorized element-wise kernel when the vectors are large enough. One caveat is that the latency of PyTorch is much smaller for small vectors (3us vs 18-20us). This is something we are actively working on to reduce."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,160 +0,0 @@
"""
Vector Addition
=================
In this tutorial, we will see how to construct a simple, high-performance vector addition using Triton. You will learn:
* The basic syntax of the Triton programming language
* The best practices for creating PyTorch custom operators using the `triton.kernel` Python API
* The best practices for validating and benchmarking custom ops against native reference implementations
"""
# %%
# Writing the Compute Kernel
# --------------------------
#
# Each compute kernel is declared using the `__global__` attribute, and executed many times in parallel
# on different chunks of data (See the [Single Program, Multiple Data](https://en.wikipedia.org/wiki/SPMD)
# programming model for more details).
# .. code-block:: c
# :linenos:
# __global__ void add(float* z, float* x, float* y, int N){
# // The `get_program_id(i)` returns the i-th coordinate
# // of the program in the overaching SPMD context
# // (a.k.a launch grid). This is what allows us to process
# // different chunks of data in parallel.
# // For those similar with CUDA, `get_program_id({0,1,2})`
# // is similar to blockIdx.{x,y,z}
# int pid = get_program_id(0);
# // In Triton, arrays are first-class citizen. In other words,
# // they are primitives data-types and are -- contrary to C and
# // CUDA -- not implemented as pointers to contiguous chunks of
# // memory.
# // In the few lines below, we create an array of `BLOCK` pointers
# // whose memory values are, e.g.:
# // [z + pid*BLOCK + 0, z + pid*BLOCK + 1, ..., z + pid*BLOCK + BLOCK - 1]
# // Note: here BLOCK is expected to be a pre-processor macro defined at compile-time
# int offset[BLOCK] = pid * BLOCK + 0 ... BLOCK;
# float* pz [BLOCK] = z + offset;
# float* px [BLOCK] = x + offset;
# float* py [BLOCK] = y + offset;
# // Simple element-wise control-flow for load/store operations can
# // be achieved using the the ternary operator `cond ? val_true : val_false`
# // or the conditional dereferencing operator `*?(cond)ptr
# // Here, we make sure that we do not access memory out-of-bounds when we
# // write-back `z`
# bool check[BLOCK] = offset < N;
# *?(check)pz = *?(check)px + *?(check)py;
# }
# ```
# The existence of arrays as a primitive data-type for Triton comes with a number of advantages that are highlighted in the [MAPL'2019 Triton paper](http://www.eecs.harvard.edu/~htk/publication/2019-mapl-tillet-kung-cox.pdf).
# %%
# Writing the Torch bindings
# --------------------------
# The only thing that matters when it comes to Triton and Torch is the `triton.kernel` class. This allows you to transform the above C-like function into a callable python object that can be used to modify `torch.tensor` objects.
#
# To create a `triton.kernel`, you only need three things:
# - `source: string`: the source-code of the kernel you want to create
# - `device: torch.device`: the device you want to compile this code for
# - `defines: dict`: the set of macros that you want the pre-processor to `#define` for you
import torch
import triton
# %%
# source-code for Triton compute kernel
# here we just copy-paste the above code without the extensive comments.
# you may prefer to store it in a .c file and load it from there instead.
_src = """
__global__ void add(float* z, float* x, float* y, int N){
// program id
int pid = get_program_id(0);
// create arrays of pointers
int offset[BLOCK] = pid * BLOCK + 0 ... BLOCK;
float* pz[BLOCK] = z + offset;
float* px[BLOCK] = x + offset;
float* py[BLOCK] = y + offset;
// bounds checking
bool check[BLOCK] = offset < N;
// write-back
*?(check)pz = *?(check)px + *?(check)py;
}
"""
# %%
# This function returns a callable `triton.kernel` object
# created from the above source code.
# For portability, we maintain a cache of kernels for different `torch.device`
# We compile the kernel with -DBLOCK=1024
def make_add_kernel(device):
cache = make_add_kernel.cache
if device not in cache:
defines = {'BLOCK': 1024}
cache[device] = triton.kernel(_src, device=device, defines=defines)
return cache[device]
make_add_kernel.cache = dict()
# %%
# This is a standard torch custom autograd Function
# The only difference is that we can now use the above kernel
# in the `forward` and `backward` functions.`
class _add(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
# constraints of the op
assert x.dtype == torch.float32
# *allocate output*
z = torch.empty_like(x)
# *create launch grid*:
# this is a function which takes compilation parameters `opt`
# as input and returns a tuple of int (i.e., launch grid) for the kernel.
# triton.cdiv is a shortcut for ceil division:
# triton.cdiv(a, b) = (a + b - 1) // b
N = z.shape[0]
grid = lambda opt: (triton.cdiv(N, opt.BLOCK), )
# *launch kernel*:
# pointer to the data of torch tensors can be retrieved with
# the `.data_ptr()` method
kernel = make_add_kernel(z.device)
kernel(z.data_ptr(), x.data_ptr(), y.data_ptr(), N, grid=grid)
return z
# %%
# Just like we standard PyTorch ops
# We use the `.apply` method to create a
# callable object for our function
add = _add.apply
# %%
# Writing a Unit Test
# --------------------------
torch.manual_seed(0)
x = torch.rand(98432, device='cuda')
y = torch.rand(98432, device='cuda')
za = x + y
zb = add(x, y)
print(za)
print(zb)
print(f'The maximum difference between torch and triton is ' f'{torch.max(torch.abs(za - zb))}')
# %%
# Writing a Benchmark
# --------------------------
# We can now benchmark our custom op for vectors of increasing sizes to get a sense of how it does
warmup = 10
rep = 200
for N in [2**i for i in range(17, 26, 1)]:
x = torch.rand(N, device='cuda')
y = torch.rand(N, device='cuda')
triton_ms = triton.testing.do_bench(lambda: add(x, y), warmup=warmup, rep=rep)
torch_ms = triton.testing.do_bench(lambda: x + y, warmup=warmup, rep=rep)
# print the performance of triton and torch as well as the achieved bandwidth
print(f'{N} {triton_ms:.3f} {torch_ms:.3f}')

File diff suppressed because one or more lines are too long