[GH-PAGES] Updated website
@@ -1,4 +1,4 @@
|
|||||||
# Sphinx build info version 1
|
# Sphinx build info version 1
|
||||||
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
|
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
|
||||||
config: 732b7c9b5320d95c9044f60926224006
|
config: 77765ce6577c8ba49ed171ea824b3394
|
||||||
tags: 645f666f9bcd5a90fca523b33c5a78b7
|
tags: 645f666f9bcd5a90fca523b33c5a78b7
|
||||||
|
@@ -24,11 +24,9 @@ def add_kernel(
|
|||||||
y_ptr, # *Pointer* to second input vector
|
y_ptr, # *Pointer* to second input vector
|
||||||
output_ptr, # *Pointer* to output vector
|
output_ptr, # *Pointer* to output vector
|
||||||
n_elements, # Size of the vector
|
n_elements, # Size of the vector
|
||||||
time_start_ptr, time_end_ptr,
|
|
||||||
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process
|
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process
|
||||||
# NOTE: `constexpr` so it can be used as a shape value
|
# NOTE: `constexpr` so it can be used as a shape value
|
||||||
):
|
):
|
||||||
tl.atomic_min(time_start_ptr, tl.clock())
|
|
||||||
# There are multiple 'program's processing different data. We identify which program
|
# There are multiple 'program's processing different data. We identify which program
|
||||||
# we are here
|
# we are here
|
||||||
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0
|
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0
|
||||||
@@ -47,7 +45,6 @@ def add_kernel(
|
|||||||
output = x + y
|
output = x + y
|
||||||
# Write x + y back to DRAM
|
# Write x + y back to DRAM
|
||||||
tl.store(output_ptr + offsets, output, mask=mask)
|
tl.store(output_ptr + offsets, output, mask=mask)
|
||||||
tl.atomic_max(time_end_ptr, tl.clock())
|
|
||||||
|
|
||||||
|
|
||||||
# %%
|
# %%
|
||||||
@@ -56,8 +53,6 @@ def add_kernel(
|
|||||||
|
|
||||||
|
|
||||||
def add(x: torch.Tensor, y: torch.Tensor):
|
def add(x: torch.Tensor, y: torch.Tensor):
|
||||||
time_start = torch.zeros(1, dtype=torch.int64, device='cuda')
|
|
||||||
time_end = torch.zeros(1, dtype=torch.int64, device='cuda')
|
|
||||||
# We need to preallocate the output
|
# We need to preallocate the output
|
||||||
output = torch.empty_like(x)
|
output = torch.empty_like(x)
|
||||||
assert x.is_cuda and y.is_cuda and output.is_cuda
|
assert x.is_cuda and y.is_cuda and output.is_cuda
|
||||||
@@ -70,7 +65,7 @@ def add(x: torch.Tensor, y: torch.Tensor):
|
|||||||
# - each torch.tensor object is implicitly converted into a pointer to its first element.
|
# - each torch.tensor object is implicitly converted into a pointer to its first element.
|
||||||
# - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel
|
# - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel
|
||||||
# - don't forget to pass meta-parameters as keywords arguments
|
# - don't forget to pass meta-parameters as keywords arguments
|
||||||
add_kernel[grid](x, y, output, n_elements, time_start, time_end, BLOCK_SIZE=1024)
|
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)
|
||||||
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
|
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
|
||||||
# running asynchronously at this point.
|
# running asynchronously at this point.
|
||||||
return output
|
return output
|
||||||
|
@@ -33,7 +33,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import torch\n\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef add_kernel(\n x_ptr, # *Pointer* to first input vector\n y_ptr, # *Pointer* to second input vector\n output_ptr, # *Pointer* to output vector\n n_elements, # Size of the vector\n time_start_ptr, time_end_ptr,\n BLOCK_SIZE: tl.constexpr, # Number of elements each program should process\n # NOTE: `constexpr` so it can be used as a shape value\n):\n tl.atomic_min(time_start_ptr, tl.clock())\n # There are multiple 'program's processing different data. We identify which program\n # we are here\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0\n # This program will process inputs that are offset from the initial data.\n # for instance, if you had a vector of length 256 and block_size of 64, the programs\n # would each access the elements [0:64, 64:128, 128:192, 192:256].\n # Note that offsets is a list of pointers\n block_start = pid * BLOCK_SIZE\n offsets = block_start + tl.arange(0, BLOCK_SIZE)\n # Create a mask to guard memory operations against out-of-bounds accesses\n mask = offsets < n_elements\n # Load x and y from DRAM, masking out any extra elements in case the input is not a\n # multiple of the block size\n x = tl.load(x_ptr + offsets, mask=mask)\n y = tl.load(y_ptr + offsets, mask=mask)\n output = x + y\n # Write x + y back to DRAM\n tl.store(output_ptr + offsets, output, mask=mask)\n tl.atomic_max(time_end_ptr, tl.clock())"
|
"import torch\n\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef add_kernel(\n x_ptr, # *Pointer* to first input vector\n y_ptr, # *Pointer* to second input vector\n output_ptr, # *Pointer* to output vector\n n_elements, # Size of the vector\n BLOCK_SIZE: tl.constexpr, # Number of elements each program should process\n # NOTE: `constexpr` so it can be used as a shape value\n):\n # There are multiple 'program's processing different data. We identify which program\n # we are here\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0\n # This program will process inputs that are offset from the initial data.\n # for instance, if you had a vector of length 256 and block_size of 64, the programs\n # would each access the elements [0:64, 64:128, 128:192, 192:256].\n # Note that offsets is a list of pointers\n block_start = pid * BLOCK_SIZE\n offsets = block_start + tl.arange(0, BLOCK_SIZE)\n # Create a mask to guard memory operations against out-of-bounds accesses\n mask = offsets < n_elements\n # Load x and y from DRAM, masking out any extra elements in case the input is not a\n # multiple of the block size\n x = tl.load(x_ptr + offsets, mask=mask)\n y = tl.load(y_ptr + offsets, mask=mask)\n output = x + y\n # Write x + y back to DRAM\n tl.store(output_ptr + offsets, output, mask=mask)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -51,7 +51,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"def add(x: torch.Tensor, y: torch.Tensor):\n time_start = torch.zeros(1, dtype=torch.int64, device='cuda')\n time_end = torch.zeros(1, dtype=torch.int64, device='cuda')\n # We need to preallocate the output\n output = torch.empty_like(x)\n assert x.is_cuda and y.is_cuda and output.is_cuda\n n_elements = output.numel()\n # The SPMD launch grid denotes the number of kernel instances that run in parallel.\n # It is analogous to CUDA launch grids. It can be either Tuple[int], or Callable(metaparameters) -> Tuple[int]\n # In this case, we use a 1D grid where the size is the number of blocks\n grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)\n # NOTE:\n # - each torch.tensor object is implicitly converted into a pointer to its first element.\n # - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel\n # - don't forget to pass meta-parameters as keywords arguments\n add_kernel[grid](x, y, output, n_elements, time_start, time_end, BLOCK_SIZE=1024)\n # We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still\n # running asynchronously at this point.\n return output"
|
"def add(x: torch.Tensor, y: torch.Tensor):\n # We need to preallocate the output\n output = torch.empty_like(x)\n assert x.is_cuda and y.is_cuda and output.is_cuda\n n_elements = output.numel()\n # The SPMD launch grid denotes the number of kernel instances that run in parallel.\n # It is analogous to CUDA launch grids. It can be either Tuple[int], or Callable(metaparameters) -> Tuple[int]\n # In this case, we use a 1D grid where the size is the number of blocks\n grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)\n # NOTE:\n # - each torch.tensor object is implicitly converted into a pointer to its first element.\n # - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel\n # - don't forget to pass meta-parameters as keywords arguments\n add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)\n # We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still\n # running asynchronously at this point.\n return output"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 37 KiB After Width: | Height: | Size: 36 KiB |
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 33 KiB |
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |
@@ -31,7 +31,7 @@ In this tutorial, you will write a simple vector addition using Triton and learn
|
|||||||
Compute Kernel
|
Compute Kernel
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 14-53
|
.. GENERATED FROM PYTHON SOURCE LINES 14-50
|
||||||
|
|
||||||
.. code-block:: default
|
.. code-block:: default
|
||||||
|
|
||||||
@@ -48,11 +48,9 @@ Compute Kernel
|
|||||||
y_ptr, # *Pointer* to second input vector
|
y_ptr, # *Pointer* to second input vector
|
||||||
output_ptr, # *Pointer* to output vector
|
output_ptr, # *Pointer* to output vector
|
||||||
n_elements, # Size of the vector
|
n_elements, # Size of the vector
|
||||||
time_start_ptr, time_end_ptr,
|
|
||||||
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process
|
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process
|
||||||
# NOTE: `constexpr` so it can be used as a shape value
|
# NOTE: `constexpr` so it can be used as a shape value
|
||||||
):
|
):
|
||||||
tl.atomic_min(time_start_ptr, tl.clock())
|
|
||||||
# There are multiple 'program's processing different data. We identify which program
|
# There are multiple 'program's processing different data. We identify which program
|
||||||
# we are here
|
# we are here
|
||||||
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0
|
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0
|
||||||
@@ -71,7 +69,6 @@ Compute Kernel
|
|||||||
output = x + y
|
output = x + y
|
||||||
# Write x + y back to DRAM
|
# Write x + y back to DRAM
|
||||||
tl.store(output_ptr + offsets, output, mask=mask)
|
tl.store(output_ptr + offsets, output, mask=mask)
|
||||||
tl.atomic_max(time_end_ptr, tl.clock())
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -81,20 +78,18 @@ Compute Kernel
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 54-56
|
.. GENERATED FROM PYTHON SOURCE LINES 51-53
|
||||||
|
|
||||||
Let's also declare a helper function to (1) allocate the `z` tensor
|
Let's also declare a helper function to (1) allocate the `z` tensor
|
||||||
and (2) enqueue the above kernel with appropriate grid/block sizes.
|
and (2) enqueue the above kernel with appropriate grid/block sizes.
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 56-79
|
.. GENERATED FROM PYTHON SOURCE LINES 53-74
|
||||||
|
|
||||||
.. code-block:: default
|
.. code-block:: default
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def add(x: torch.Tensor, y: torch.Tensor):
|
def add(x: torch.Tensor, y: torch.Tensor):
|
||||||
time_start = torch.zeros(1, dtype=torch.int64, device='cuda')
|
|
||||||
time_end = torch.zeros(1, dtype=torch.int64, device='cuda')
|
|
||||||
# We need to preallocate the output
|
# We need to preallocate the output
|
||||||
output = torch.empty_like(x)
|
output = torch.empty_like(x)
|
||||||
assert x.is_cuda and y.is_cuda and output.is_cuda
|
assert x.is_cuda and y.is_cuda and output.is_cuda
|
||||||
@@ -107,7 +102,7 @@ and (2) enqueue the above kernel with appropriate grid/block sizes.
|
|||||||
# - each torch.tensor object is implicitly converted into a pointer to its first element.
|
# - each torch.tensor object is implicitly converted into a pointer to its first element.
|
||||||
# - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel
|
# - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel
|
||||||
# - don't forget to pass meta-parameters as keywords arguments
|
# - don't forget to pass meta-parameters as keywords arguments
|
||||||
add_kernel[grid](x, y, output, n_elements, time_start, time_end, BLOCK_SIZE=1024)
|
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)
|
||||||
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
|
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
|
||||||
# running asynchronously at this point.
|
# running asynchronously at this point.
|
||||||
return output
|
return output
|
||||||
@@ -120,11 +115,11 @@ and (2) enqueue the above kernel with appropriate grid/block sizes.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 80-81
|
.. GENERATED FROM PYTHON SOURCE LINES 75-76
|
||||||
|
|
||||||
We can now use the above function to compute the element-wise sum of two `torch.tensor` objects and test its correctness:
|
We can now use the above function to compute the element-wise sum of two `torch.tensor` objects and test its correctness:
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 81-95
|
.. GENERATED FROM PYTHON SOURCE LINES 76-90
|
||||||
|
|
||||||
.. code-block:: default
|
.. code-block:: default
|
||||||
|
|
||||||
@@ -159,11 +154,11 @@ We can now use the above function to compute the element-wise sum of two `torch.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 96-97
|
.. GENERATED FROM PYTHON SOURCE LINES 91-92
|
||||||
|
|
||||||
Seems like we're good to go!
|
Seems like we're good to go!
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 99-104
|
.. GENERATED FROM PYTHON SOURCE LINES 94-99
|
||||||
|
|
||||||
Benchmark
|
Benchmark
|
||||||
-----------
|
-----------
|
||||||
@@ -171,7 +166,7 @@ We can now benchmark our custom op on vectors of increasing sizes to get a sense
|
|||||||
To make things easier, Triton has a set of built-in utilities that allow us to concisely plot the performance of your custom ops
|
To make things easier, Triton has a set of built-in utilities that allow us to concisely plot the performance of your custom ops
|
||||||
for different problem sizes.
|
for different problem sizes.
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 104-133
|
.. GENERATED FROM PYTHON SOURCE LINES 99-128
|
||||||
|
|
||||||
.. code-block:: default
|
.. code-block:: default
|
||||||
|
|
||||||
@@ -211,12 +206,12 @@ for different problem sizes.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 134-136
|
.. GENERATED FROM PYTHON SOURCE LINES 129-131
|
||||||
|
|
||||||
We can now run the decorated function above. Pass `print_data=True` to see the performance number, `show_plots=True` to plot them, and/or
|
We can now run the decorated function above. Pass `print_data=True` to see the performance number, `show_plots=True` to plot them, and/or
|
||||||
`save_path='/path/to/results/' to save them to disk along with raw CSV data
|
`save_path='/path/to/results/' to save them to disk along with raw CSV data
|
||||||
|
|
||||||
.. GENERATED FROM PYTHON SOURCE LINES 136-137
|
.. GENERATED FROM PYTHON SOURCE LINES 131-132
|
||||||
|
|
||||||
.. code-block:: default
|
.. code-block:: default
|
||||||
|
|
||||||
@@ -237,22 +232,22 @@ We can now run the decorated function above. Pass `print_data=True` to see the p
|
|||||||
|
|
||||||
vector-add-performance:
|
vector-add-performance:
|
||||||
size Triton Torch
|
size Triton Torch
|
||||||
0 4096.0 4.800000 9.600000
|
0 4096.0 9.600000 9.600000
|
||||||
1 8192.0 9.600000 19.200000
|
1 8192.0 19.200000 19.200000
|
||||||
2 16384.0 19.200000 38.400001
|
2 16384.0 38.400001 38.400001
|
||||||
3 32768.0 34.909091 63.999998
|
3 32768.0 63.999998 63.999998
|
||||||
4 65536.0 69.818181 127.999995
|
4 65536.0 127.999995 127.999995
|
||||||
5 131072.0 139.636363 219.428568
|
5 131072.0 219.428568 219.428568
|
||||||
6 262144.0 219.428568 384.000001
|
6 262144.0 341.333321 384.000001
|
||||||
7 524288.0 361.411758 472.615390
|
7 524288.0 472.615390 472.615390
|
||||||
8 1048576.0 491.520012 614.400016
|
8 1048576.0 614.400016 614.400016
|
||||||
9 2097152.0 599.414644 702.171410
|
9 2097152.0 722.823517 702.171410
|
||||||
10 4194304.0 702.171410 780.190482
|
10 4194304.0 780.190482 780.190482
|
||||||
11 8388608.0 774.047204 812.429770
|
11 8388608.0 812.429770 812.429770
|
||||||
12 16777216.0 809.086412 833.084721
|
12 16777216.0 833.084721 833.084721
|
||||||
13 33554432.0 829.569620 842.004273
|
13 33554432.0 842.004273 842.004273
|
||||||
14 67108864.0 840.205105 848.362445
|
14 67108864.0 847.448255 848.362445
|
||||||
15 134217728.0 846.080710 850.656574
|
15 134217728.0 849.737435 850.656574
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -260,7 +255,7 @@ We can now run the decorated function above. Pass `print_data=True` to see the p
|
|||||||
|
|
||||||
.. rst-class:: sphx-glr-timing
|
.. rst-class:: sphx-glr-timing
|
||||||
|
|
||||||
**Total running time of the script:** ( 1 minutes 42.289 seconds)
|
**Total running time of the script:** ( 1 minutes 49.775 seconds)
|
||||||
|
|
||||||
|
|
||||||
.. _sphx_glr_download_getting-started_tutorials_01-vector-add.py:
|
.. _sphx_glr_download_getting-started_tutorials_01-vector-add.py:
|
||||||
|
@@ -278,17 +278,17 @@ We will then compare its performance against (1) :code:`torch.softmax` and (2) t
|
|||||||
|
|
||||||
softmax-performance:
|
softmax-performance:
|
||||||
N Triton Torch (native) Torch (jit)
|
N Triton Torch (native) Torch (jit)
|
||||||
0 256.0 512.000001 546.133347 186.181817
|
0 256.0 512.000001 546.133347 190.511628
|
||||||
1 384.0 614.400016 558.545450 153.600004
|
1 384.0 585.142862 585.142862 151.703707
|
||||||
2 512.0 655.360017 585.142849 154.566038
|
2 512.0 655.360017 585.142849 156.038096
|
||||||
3 640.0 706.206879 640.000002 158.759699
|
3 640.0 682.666684 640.000002 158.759699
|
||||||
4 768.0 722.823517 664.216187 162.754967
|
4 768.0 722.823517 646.736871 162.754967
|
||||||
.. ... ... ... ...
|
.. ... ... ... ...
|
||||||
93 12160.0 815.765209 405.755985 199.038365
|
93 12160.0 814.058574 405.755985 198.834951
|
||||||
94 12288.0 815.800825 416.101597 199.197579
|
94 12288.0 815.800825 415.661740 198.995960
|
||||||
95 12416.0 814.163950 412.149375 198.854847
|
95 12416.0 814.163950 412.149375 198.755369
|
||||||
96 12544.0 814.214963 413.183734 199.012395
|
96 12544.0 814.214963 412.971190 198.864492
|
||||||
97 12672.0 814.265046 412.516771 199.167004
|
97 12672.0 814.265046 412.097543 199.069228
|
||||||
|
|
||||||
[98 rows x 4 columns]
|
[98 rows x 4 columns]
|
||||||
|
|
||||||
@@ -306,7 +306,7 @@ In the above plot, we can see that:
|
|||||||
|
|
||||||
.. rst-class:: sphx-glr-timing
|
.. rst-class:: sphx-glr-timing
|
||||||
|
|
||||||
**Total running time of the script:** ( 3 minutes 28.192 seconds)
|
**Total running time of the script:** ( 3 minutes 28.816 seconds)
|
||||||
|
|
||||||
|
|
||||||
.. _sphx_glr_download_getting-started_tutorials_02-fused-softmax.py:
|
.. _sphx_glr_download_getting-started_tutorials_02-fused-softmax.py:
|
||||||
|
@@ -459,36 +459,36 @@ We can now compare the performance of our kernel against that of cuBLAS. Here we
|
|||||||
matmul-performance:
|
matmul-performance:
|
||||||
M cuBLAS ... Triton Triton (+ LeakyReLU)
|
M cuBLAS ... Triton Triton (+ LeakyReLU)
|
||||||
0 256.0 2.730667 ... 2.978909 2.978909
|
0 256.0 2.730667 ... 2.978909 2.978909
|
||||||
1 384.0 7.372800 ... 8.507077 7.899428
|
1 384.0 7.372800 ... 7.899428 7.899428
|
||||||
2 512.0 14.563555 ... 15.420235 15.420235
|
2 512.0 14.563555 ... 15.420235 15.420235
|
||||||
3 640.0 22.260869 ... 24.380953 24.380953
|
3 640.0 22.260869 ... 24.380953 24.380953
|
||||||
4 768.0 31.597714 ... 34.028308 34.028308
|
4 768.0 32.768000 ... 35.389441 34.028308
|
||||||
5 896.0 37.971025 ... 40.140799 39.025776
|
5 896.0 37.971025 ... 40.140799 39.025776
|
||||||
6 1024.0 49.932191 ... 53.773130 52.428801
|
6 1024.0 49.932191 ... 53.773130 53.773130
|
||||||
7 1152.0 43.911529 ... 47.396572 46.656000
|
7 1152.0 45.242181 ... 48.161033 47.396572
|
||||||
8 1280.0 50.567902 ... 56.888887 56.888887
|
8 1280.0 51.200001 ... 57.690139 57.690139
|
||||||
9 1408.0 63.392744 ... 68.147202 67.305878
|
9 1408.0 64.138541 ... 69.009825 67.305878
|
||||||
10 1536.0 79.526831 ... 79.526831 78.643199
|
10 1536.0 80.430545 ... 80.430545 79.526831
|
||||||
11 1664.0 62.061463 ... 62.492442 62.061463
|
11 1664.0 62.929456 ... 63.372618 62.929456
|
||||||
12 1792.0 71.588687 ... 62.096267 61.755076
|
12 1792.0 72.983276 ... 63.142831 63.142831
|
||||||
13 1920.0 67.764707 ... 69.818184 69.467336
|
13 1920.0 68.776119 ... 71.257735 70.892307
|
||||||
14 2048.0 72.005219 ... 76.608294 75.915006
|
14 2048.0 73.584279 ... 78.398206 78.033565
|
||||||
15 2176.0 81.472263 ... 85.632545 84.909907
|
15 2176.0 83.155572 ... 87.115360 86.739860
|
||||||
16 2304.0 67.289781 ... 76.319081 76.076024
|
16 2304.0 68.446623 ... 77.558029 77.558029
|
||||||
17 2432.0 69.713308 ... 83.119713 74.127872
|
17 2432.0 71.305746 ... 75.930985 75.320281
|
||||||
18 2560.0 76.382283 ... 80.709358 80.709358
|
18 2560.0 77.833728 ... 82.125311 81.715711
|
||||||
19 2688.0 82.823267 ... 89.254248 88.836198
|
19 2688.0 83.552988 ... 90.966561 90.532356
|
||||||
20 2816.0 82.602666 ... 82.759409 82.602666
|
20 2816.0 81.369790 ... 83.873477 84.360174
|
||||||
21 2944.0 81.034195 ... 82.373605 82.237674
|
21 2944.0 81.832567 ... 83.617504 83.337844
|
||||||
22 3072.0 81.005868 ... 88.335577 87.924073
|
22 3072.0 79.750851 ... 89.451983 88.197981
|
||||||
23 3200.0 83.769634 ... 95.665176 94.955488
|
23 3200.0 79.503104 ... 96.822991 96.096095
|
||||||
24 3328.0 82.275764 ... 84.695641 84.496824
|
24 3328.0 83.034941 ... 82.558825 82.181847
|
||||||
25 3456.0 80.864158 ... 90.994998 90.892410
|
25 3456.0 81.849303 ... 91.407671 92.455926
|
||||||
26 3584.0 86.291162 ... 98.808123 98.483450
|
26 3584.0 87.211821 ... 91.750399 97.840469
|
||||||
27 3712.0 84.658765 ... 88.876645 88.561477
|
27 3712.0 85.896254 ... 86.341700 87.322855
|
||||||
28 3840.0 83.781816 ... 92.313853 92.006659
|
28 3840.0 80.313725 ... 85.597527 91.853823
|
||||||
29 3968.0 92.302520 ... 91.472214 90.994735
|
29 3968.0 86.174142 ... 84.154440 82.560175
|
||||||
30 4096.0 93.271527 ... 92.563952 92.372834
|
30 4096.0 89.240508 ... 84.201835 82.849652
|
||||||
|
|
||||||
[31 rows x 5 columns]
|
[31 rows x 5 columns]
|
||||||
|
|
||||||
@@ -498,7 +498,7 @@ We can now compare the performance of our kernel against that of cuBLAS. Here we
|
|||||||
|
|
||||||
.. rst-class:: sphx-glr-timing
|
.. rst-class:: sphx-glr-timing
|
||||||
|
|
||||||
**Total running time of the script:** ( 6 minutes 37.755 seconds)
|
**Total running time of the script:** ( 6 minutes 48.616 seconds)
|
||||||
|
|
||||||
|
|
||||||
.. _sphx_glr_download_getting-started_tutorials_03-matrix-multiplication.py:
|
.. _sphx_glr_download_getting-started_tutorials_03-matrix-multiplication.py:
|
||||||
|
@@ -240,7 +240,7 @@ References
|
|||||||
|
|
||||||
.. rst-class:: sphx-glr-timing
|
.. rst-class:: sphx-glr-timing
|
||||||
|
|
||||||
**Total running time of the script:** ( 0 minutes 0.341 seconds)
|
**Total running time of the script:** ( 0 minutes 0.337 seconds)
|
||||||
|
|
||||||
|
|
||||||
.. _sphx_glr_download_getting-started_tutorials_04-low-memory-dropout.py:
|
.. _sphx_glr_download_getting-started_tutorials_04-low-memory-dropout.py:
|
||||||
|
@@ -38,36 +38,36 @@ Layer Normalization
|
|||||||
|
|
||||||
layer-norm-backward:
|
layer-norm-backward:
|
||||||
N Triton Torch Apex
|
N Triton Torch Apex
|
||||||
0 1024.0 356.173905 98.303995 307.200008
|
0 1024.0 356.173905 99.497980 315.076934
|
||||||
1 1536.0 396.387087 133.565214 341.333333
|
1 1536.0 409.599994 134.050910 344.523365
|
||||||
2 2048.0 481.882362 160.627450 325.509933
|
2 2048.0 491.520012 159.067963 321.254900
|
||||||
3 2560.0 451.764698 180.175950 321.675394
|
3 2560.0 461.954908 182.314537 325.079368
|
||||||
4 3072.0 511.999982 189.046153 316.429186
|
4 3072.0 519.211251 191.501303 320.556515
|
||||||
5 3584.0 547.872604 206.769233 308.301075
|
5 3584.0 554.941930 207.768111 309.410081
|
||||||
6 4096.0 558.545450 218.939860 298.796351
|
6 4096.0 564.965515 220.907859 300.623865
|
||||||
7 4608.0 491.520008 231.849059 286.507772
|
7 4608.0 500.416301 232.336141 287.251954
|
||||||
8 5120.0 518.481012 240.469672 283.133649
|
8 5120.0 529.655159 243.809526 289.129408
|
||||||
9 5632.0 532.157453 241.371422 288.204696
|
9 5632.0 540.671974 244.426754 291.310338
|
||||||
10 6144.0 542.117638 249.502530 286.322318
|
10 6144.0 552.269672 251.202731 288.000001
|
||||||
11 6656.0 532.479975 253.561895 284.242007
|
11 6656.0 534.260858 255.590406 286.279570
|
||||||
12 7168.0 507.469040 254.109315 277.919225
|
12 7168.0 512.000004 253.734520 277.919225
|
||||||
13 7680.0 486.332448 263.314295 280.547947
|
13 7680.0 487.619051 266.743841 284.884090
|
||||||
14 8192.0 464.794337 263.903346 277.694924
|
14 8192.0 468.114289 258.354805 278.481578
|
||||||
15 8704.0 406.412440 263.093202 280.774186
|
15 8704.0 415.300208 267.472468 285.377055
|
||||||
16 9216.0 418.909088 270.065931 286.507772
|
16 9216.0 428.651187 272.394084 289.887291
|
||||||
17 9728.0 427.604376 281.291575 289.667485
|
17 9728.0 438.033784 279.942444 288.950501
|
||||||
18 10240.0 434.973455 284.115604 288.450695
|
18 10240.0 445.217381 287.102804 290.153487
|
||||||
19 10752.0 423.724151 244.827326 289.291486
|
19 10752.0 427.231788 246.935876 289.941565
|
||||||
20 11264.0 423.061049 242.019694 282.482755
|
20 11264.0 428.424741 245.536784 286.069848
|
||||||
21 11776.0 417.465304 247.915800 287.219500
|
21 11776.0 418.702211 249.667843 288.981596
|
||||||
22 12288.0 414.202242 252.601276 293.737063
|
22 12288.0 414.784810 254.453844 294.323369
|
||||||
23 12800.0 410.146863 252.424003 288.993430
|
23 12800.0 410.695192 254.094291 288.180121
|
||||||
24 13312.0 406.991092 252.759501 289.916513
|
24 13312.0 410.125805 252.559690 289.129403
|
||||||
25 13824.0 404.112047 255.408777 291.031592
|
25 13824.0 404.604870 256.991469 291.799461
|
||||||
26 14336.0 395.475867 251.692749 284.821192
|
26 14336.0 396.387109 255.809666 288.886653
|
||||||
27 14848.0 383.174202 255.816222 287.612590
|
27 14848.0 386.498925 257.665934 288.777966
|
||||||
28 15360.0 378.480483 259.058326 289.129401
|
28 15360.0 378.869469 258.513318 286.656296
|
||||||
29 15872.0 369.832994 260.196726 288.800600
|
29 15872.0 372.000001 261.626369 290.562936
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -339,7 +339,7 @@ Layer Normalization
|
|||||||
|
|
||||||
.. rst-class:: sphx-glr-timing
|
.. rst-class:: sphx-glr-timing
|
||||||
|
|
||||||
**Total running time of the script:** ( 2 minutes 15.414 seconds)
|
**Total running time of the script:** ( 2 minutes 14.228 seconds)
|
||||||
|
|
||||||
|
|
||||||
.. _sphx_glr_download_getting-started_tutorials_05-layer-norm.py:
|
.. _sphx_glr_download_getting-started_tutorials_05-layer-norm.py:
|
||||||
|
@@ -5,16 +5,16 @@
|
|||||||
|
|
||||||
Computation times
|
Computation times
|
||||||
=================
|
=================
|
||||||
**14:03.991** total execution time for **getting-started_tutorials** files:
|
**14:21.771** total execution time for **getting-started_tutorials** files:
|
||||||
|
|
||||||
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
||||||
| :ref:`sphx_glr_getting-started_tutorials_03-matrix-multiplication.py` (``03-matrix-multiplication.py``) | 06:37.755 | 0.0 MB |
|
| :ref:`sphx_glr_getting-started_tutorials_03-matrix-multiplication.py` (``03-matrix-multiplication.py``) | 06:48.616 | 0.0 MB |
|
||||||
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
||||||
| :ref:`sphx_glr_getting-started_tutorials_02-fused-softmax.py` (``02-fused-softmax.py``) | 03:28.192 | 0.0 MB |
|
| :ref:`sphx_glr_getting-started_tutorials_02-fused-softmax.py` (``02-fused-softmax.py``) | 03:28.816 | 0.0 MB |
|
||||||
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
||||||
| :ref:`sphx_glr_getting-started_tutorials_05-layer-norm.py` (``05-layer-norm.py``) | 02:15.414 | 0.0 MB |
|
| :ref:`sphx_glr_getting-started_tutorials_05-layer-norm.py` (``05-layer-norm.py``) | 02:14.228 | 0.0 MB |
|
||||||
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
||||||
| :ref:`sphx_glr_getting-started_tutorials_01-vector-add.py` (``01-vector-add.py``) | 01:42.289 | 0.0 MB |
|
| :ref:`sphx_glr_getting-started_tutorials_01-vector-add.py` (``01-vector-add.py``) | 01:49.775 | 0.0 MB |
|
||||||
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
||||||
| :ref:`sphx_glr_getting-started_tutorials_04-low-memory-dropout.py` (``04-low-memory-dropout.py``) | 00:00.341 | 0.0 MB |
|
| :ref:`sphx_glr_getting-started_tutorials_04-low-memory-dropout.py` (``04-low-memory-dropout.py``) | 00:00.337 | 0.0 MB |
|
||||||
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
+---------------------------------------------------------------------------------------------------------+-----------+--------+
|
||||||
|
@@ -214,11 +214,9 @@ to download the full example code</p>
|
|||||||
<span class="n">y_ptr</span><span class="p">,</span> <span class="c1"># *Pointer* to second input vector</span>
|
<span class="n">y_ptr</span><span class="p">,</span> <span class="c1"># *Pointer* to second input vector</span>
|
||||||
<span class="n">output_ptr</span><span class="p">,</span> <span class="c1"># *Pointer* to output vector</span>
|
<span class="n">output_ptr</span><span class="p">,</span> <span class="c1"># *Pointer* to output vector</span>
|
||||||
<span class="n">n_elements</span><span class="p">,</span> <span class="c1"># Size of the vector</span>
|
<span class="n">n_elements</span><span class="p">,</span> <span class="c1"># Size of the vector</span>
|
||||||
<span class="n">time_start_ptr</span><span class="p">,</span> <span class="n">time_end_ptr</span><span class="p">,</span>
|
|
||||||
<span class="n">BLOCK_SIZE</span><span class="p">:</span> <span class="n">tl</span><span class="o">.</span><span class="n">constexpr</span><span class="p">,</span> <span class="c1"># Number of elements each program should process</span>
|
<span class="n">BLOCK_SIZE</span><span class="p">:</span> <span class="n">tl</span><span class="o">.</span><span class="n">constexpr</span><span class="p">,</span> <span class="c1"># Number of elements each program should process</span>
|
||||||
<span class="c1"># NOTE: `constexpr` so it can be used as a shape value</span>
|
<span class="c1"># NOTE: `constexpr` so it can be used as a shape value</span>
|
||||||
<span class="p">):</span>
|
<span class="p">):</span>
|
||||||
<span class="n">tl</span><span class="o">.</span><span class="n">atomic_min</span><span class="p">(</span><span class="n">time_start_ptr</span><span class="p">,</span> <span class="n">tl</span><span class="o">.</span><span class="n">clock</span><span class="p">())</span>
|
|
||||||
<span class="c1"># There are multiple 'program's processing different data. We identify which program</span>
|
<span class="c1"># There are multiple 'program's processing different data. We identify which program</span>
|
||||||
<span class="c1"># we are here</span>
|
<span class="c1"># we are here</span>
|
||||||
<span class="n">pid</span> <span class="o">=</span> <span class="n">tl</span><span class="o">.</span><span class="n">program_id</span><span class="p">(</span><span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="c1"># We use a 1D launch grid so axis is 0</span>
|
<span class="n">pid</span> <span class="o">=</span> <span class="n">tl</span><span class="o">.</span><span class="n">program_id</span><span class="p">(</span><span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="c1"># We use a 1D launch grid so axis is 0</span>
|
||||||
@@ -237,14 +235,11 @@ to download the full example code</p>
|
|||||||
<span class="n">output</span> <span class="o">=</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span>
|
<span class="n">output</span> <span class="o">=</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span>
|
||||||
<span class="c1"># Write x + y back to DRAM</span>
|
<span class="c1"># Write x + y back to DRAM</span>
|
||||||
<span class="n">tl</span><span class="o">.</span><span class="n">store</span><span class="p">(</span><span class="n">output_ptr</span> <span class="o">+</span> <span class="n">offsets</span><span class="p">,</span> <span class="n">output</span><span class="p">,</span> <span class="n">mask</span><span class="o">=</span><span class="n">mask</span><span class="p">)</span>
|
<span class="n">tl</span><span class="o">.</span><span class="n">store</span><span class="p">(</span><span class="n">output_ptr</span> <span class="o">+</span> <span class="n">offsets</span><span class="p">,</span> <span class="n">output</span><span class="p">,</span> <span class="n">mask</span><span class="o">=</span><span class="n">mask</span><span class="p">)</span>
|
||||||
<span class="n">tl</span><span class="o">.</span><span class="n">atomic_max</span><span class="p">(</span><span class="n">time_end_ptr</span><span class="p">,</span> <span class="n">tl</span><span class="o">.</span><span class="n">clock</span><span class="p">())</span>
|
|
||||||
</pre></div>
|
</pre></div>
|
||||||
</div>
|
</div>
|
||||||
<p>Let’s also declare a helper function to (1) allocate the <cite>z</cite> tensor
|
<p>Let’s also declare a helper function to (1) allocate the <cite>z</cite> tensor
|
||||||
and (2) enqueue the above kernel with appropriate grid/block sizes.</p>
|
and (2) enqueue the above kernel with appropriate grid/block sizes.</p>
|
||||||
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">add</span><span class="p">(</span><span class="n">x</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
|
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">def</span> <span class="nf">add</span><span class="p">(</span><span class="n">x</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">):</span>
|
||||||
<span class="n">time_start</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int64</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="s1">'cuda'</span><span class="p">)</span>
|
|
||||||
<span class="n">time_end</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int64</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="s1">'cuda'</span><span class="p">)</span>
|
|
||||||
<span class="c1"># We need to preallocate the output</span>
|
<span class="c1"># We need to preallocate the output</span>
|
||||||
<span class="n">output</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">empty_like</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
|
<span class="n">output</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">empty_like</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
|
||||||
<span class="k">assert</span> <span class="n">x</span><span class="o">.</span><span class="n">is_cuda</span> <span class="ow">and</span> <span class="n">y</span><span class="o">.</span><span class="n">is_cuda</span> <span class="ow">and</span> <span class="n">output</span><span class="o">.</span><span class="n">is_cuda</span>
|
<span class="k">assert</span> <span class="n">x</span><span class="o">.</span><span class="n">is_cuda</span> <span class="ow">and</span> <span class="n">y</span><span class="o">.</span><span class="n">is_cuda</span> <span class="ow">and</span> <span class="n">output</span><span class="o">.</span><span class="n">is_cuda</span>
|
||||||
@@ -257,7 +252,7 @@ and (2) enqueue the above kernel with appropriate grid/block sizes.</p>
|
|||||||
<span class="c1"># - each torch.tensor object is implicitly converted into a pointer to its first element.</span>
|
<span class="c1"># - each torch.tensor object is implicitly converted into a pointer to its first element.</span>
|
||||||
<span class="c1"># - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel</span>
|
<span class="c1"># - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel</span>
|
||||||
<span class="c1"># - don't forget to pass meta-parameters as keywords arguments</span>
|
<span class="c1"># - don't forget to pass meta-parameters as keywords arguments</span>
|
||||||
<span class="n">add_kernel</span><span class="p">[</span><span class="n">grid</span><span class="p">](</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">output</span><span class="p">,</span> <span class="n">n_elements</span><span class="p">,</span> <span class="n">time_start</span><span class="p">,</span> <span class="n">time_end</span><span class="p">,</span> <span class="n">BLOCK_SIZE</span><span class="o">=</span><span class="mi">1024</span><span class="p">)</span>
|
<span class="n">add_kernel</span><span class="p">[</span><span class="n">grid</span><span class="p">](</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">output</span><span class="p">,</span> <span class="n">n_elements</span><span class="p">,</span> <span class="n">BLOCK_SIZE</span><span class="o">=</span><span class="mi">1024</span><span class="p">)</span>
|
||||||
<span class="c1"># We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still</span>
|
<span class="c1"># We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still</span>
|
||||||
<span class="c1"># running asynchronously at this point.</span>
|
<span class="c1"># running asynchronously at this point.</span>
|
||||||
<span class="k">return</span> <span class="n">output</span>
|
<span class="k">return</span> <span class="n">output</span>
|
||||||
@@ -327,25 +322,25 @@ for different problem sizes.</p>
|
|||||||
<p class="sphx-glr-script-out">Out:</p>
|
<p class="sphx-glr-script-out">Out:</p>
|
||||||
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>vector-add-performance:
|
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>vector-add-performance:
|
||||||
size Triton Torch
|
size Triton Torch
|
||||||
0 4096.0 4.800000 9.600000
|
0 4096.0 9.600000 9.600000
|
||||||
1 8192.0 9.600000 19.200000
|
1 8192.0 19.200000 19.200000
|
||||||
2 16384.0 19.200000 38.400001
|
2 16384.0 38.400001 38.400001
|
||||||
3 32768.0 34.909091 63.999998
|
3 32768.0 63.999998 63.999998
|
||||||
4 65536.0 69.818181 127.999995
|
4 65536.0 127.999995 127.999995
|
||||||
5 131072.0 139.636363 219.428568
|
5 131072.0 219.428568 219.428568
|
||||||
6 262144.0 219.428568 384.000001
|
6 262144.0 341.333321 384.000001
|
||||||
7 524288.0 361.411758 472.615390
|
7 524288.0 472.615390 472.615390
|
||||||
8 1048576.0 491.520012 614.400016
|
8 1048576.0 614.400016 614.400016
|
||||||
9 2097152.0 599.414644 702.171410
|
9 2097152.0 722.823517 702.171410
|
||||||
10 4194304.0 702.171410 780.190482
|
10 4194304.0 780.190482 780.190482
|
||||||
11 8388608.0 774.047204 812.429770
|
11 8388608.0 812.429770 812.429770
|
||||||
12 16777216.0 809.086412 833.084721
|
12 16777216.0 833.084721 833.084721
|
||||||
13 33554432.0 829.569620 842.004273
|
13 33554432.0 842.004273 842.004273
|
||||||
14 67108864.0 840.205105 848.362445
|
14 67108864.0 847.448255 848.362445
|
||||||
15 134217728.0 846.080710 850.656574
|
15 134217728.0 849.737435 850.656574
|
||||||
</pre></div>
|
</pre></div>
|
||||||
</div>
|
</div>
|
||||||
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 1 minutes 42.289 seconds)</p>
|
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 1 minutes 49.775 seconds)</p>
|
||||||
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-01-vector-add-py">
|
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-01-vector-add-py">
|
||||||
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
||||||
<p><a class="reference download internal" download="" href="../../_downloads/62d97d49a32414049819dd8bb8378080/01-vector-add.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">01-vector-add.py</span></code></a></p>
|
<p><a class="reference download internal" download="" href="../../_downloads/62d97d49a32414049819dd8bb8378080/01-vector-add.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">01-vector-add.py</span></code></a></p>
|
||||||
|
@@ -369,17 +369,17 @@ We will then compare its performance against (1) <code class="code docutils lite
|
|||||||
<p class="sphx-glr-script-out">Out:</p>
|
<p class="sphx-glr-script-out">Out:</p>
|
||||||
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>softmax-performance:
|
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>softmax-performance:
|
||||||
N Triton Torch (native) Torch (jit)
|
N Triton Torch (native) Torch (jit)
|
||||||
0 256.0 512.000001 546.133347 186.181817
|
0 256.0 512.000001 546.133347 190.511628
|
||||||
1 384.0 614.400016 558.545450 153.600004
|
1 384.0 585.142862 585.142862 151.703707
|
||||||
2 512.0 655.360017 585.142849 154.566038
|
2 512.0 655.360017 585.142849 156.038096
|
||||||
3 640.0 706.206879 640.000002 158.759699
|
3 640.0 682.666684 640.000002 158.759699
|
||||||
4 768.0 722.823517 664.216187 162.754967
|
4 768.0 722.823517 646.736871 162.754967
|
||||||
.. ... ... ... ...
|
.. ... ... ... ...
|
||||||
93 12160.0 815.765209 405.755985 199.038365
|
93 12160.0 814.058574 405.755985 198.834951
|
||||||
94 12288.0 815.800825 416.101597 199.197579
|
94 12288.0 815.800825 415.661740 198.995960
|
||||||
95 12416.0 814.163950 412.149375 198.854847
|
95 12416.0 814.163950 412.149375 198.755369
|
||||||
96 12544.0 814.214963 413.183734 199.012395
|
96 12544.0 814.214963 412.971190 198.864492
|
||||||
97 12672.0 814.265046 412.516771 199.167004
|
97 12672.0 814.265046 412.097543 199.069228
|
||||||
|
|
||||||
[98 rows x 4 columns]
|
[98 rows x 4 columns]
|
||||||
</pre></div>
|
</pre></div>
|
||||||
@@ -392,7 +392,7 @@ We will then compare its performance against (1) <code class="code docutils lite
|
|||||||
Note however that the PyTorch <cite>softmax</cite> operation is more general and will works on tensors of any shape.</p></li>
|
Note however that the PyTorch <cite>softmax</cite> operation is more general and will works on tensors of any shape.</p></li>
|
||||||
</ul>
|
</ul>
|
||||||
</div></blockquote>
|
</div></blockquote>
|
||||||
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 3 minutes 28.192 seconds)</p>
|
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 3 minutes 28.816 seconds)</p>
|
||||||
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-02-fused-softmax-py">
|
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-02-fused-softmax-py">
|
||||||
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
||||||
<p><a class="reference download internal" download="" href="../../_downloads/d91442ac2982c4e0cc3ab0f43534afbc/02-fused-softmax.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">02-fused-softmax.py</span></code></a></p>
|
<p><a class="reference download internal" download="" href="../../_downloads/d91442ac2982c4e0cc3ab0f43534afbc/02-fused-softmax.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">02-fused-softmax.py</span></code></a></p>
|
||||||
|
@@ -565,41 +565,41 @@ torch_output=tensor([[ 1.1045, -36.9688, 31.4688, ..., -11.3906, 24.4531, -3
|
|||||||
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>matmul-performance:
|
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>matmul-performance:
|
||||||
M cuBLAS ... Triton Triton (+ LeakyReLU)
|
M cuBLAS ... Triton Triton (+ LeakyReLU)
|
||||||
0 256.0 2.730667 ... 2.978909 2.978909
|
0 256.0 2.730667 ... 2.978909 2.978909
|
||||||
1 384.0 7.372800 ... 8.507077 7.899428
|
1 384.0 7.372800 ... 7.899428 7.899428
|
||||||
2 512.0 14.563555 ... 15.420235 15.420235
|
2 512.0 14.563555 ... 15.420235 15.420235
|
||||||
3 640.0 22.260869 ... 24.380953 24.380953
|
3 640.0 22.260869 ... 24.380953 24.380953
|
||||||
4 768.0 31.597714 ... 34.028308 34.028308
|
4 768.0 32.768000 ... 35.389441 34.028308
|
||||||
5 896.0 37.971025 ... 40.140799 39.025776
|
5 896.0 37.971025 ... 40.140799 39.025776
|
||||||
6 1024.0 49.932191 ... 53.773130 52.428801
|
6 1024.0 49.932191 ... 53.773130 53.773130
|
||||||
7 1152.0 43.911529 ... 47.396572 46.656000
|
7 1152.0 45.242181 ... 48.161033 47.396572
|
||||||
8 1280.0 50.567902 ... 56.888887 56.888887
|
8 1280.0 51.200001 ... 57.690139 57.690139
|
||||||
9 1408.0 63.392744 ... 68.147202 67.305878
|
9 1408.0 64.138541 ... 69.009825 67.305878
|
||||||
10 1536.0 79.526831 ... 79.526831 78.643199
|
10 1536.0 80.430545 ... 80.430545 79.526831
|
||||||
11 1664.0 62.061463 ... 62.492442 62.061463
|
11 1664.0 62.929456 ... 63.372618 62.929456
|
||||||
12 1792.0 71.588687 ... 62.096267 61.755076
|
12 1792.0 72.983276 ... 63.142831 63.142831
|
||||||
13 1920.0 67.764707 ... 69.818184 69.467336
|
13 1920.0 68.776119 ... 71.257735 70.892307
|
||||||
14 2048.0 72.005219 ... 76.608294 75.915006
|
14 2048.0 73.584279 ... 78.398206 78.033565
|
||||||
15 2176.0 81.472263 ... 85.632545 84.909907
|
15 2176.0 83.155572 ... 87.115360 86.739860
|
||||||
16 2304.0 67.289781 ... 76.319081 76.076024
|
16 2304.0 68.446623 ... 77.558029 77.558029
|
||||||
17 2432.0 69.713308 ... 83.119713 74.127872
|
17 2432.0 71.305746 ... 75.930985 75.320281
|
||||||
18 2560.0 76.382283 ... 80.709358 80.709358
|
18 2560.0 77.833728 ... 82.125311 81.715711
|
||||||
19 2688.0 82.823267 ... 89.254248 88.836198
|
19 2688.0 83.552988 ... 90.966561 90.532356
|
||||||
20 2816.0 82.602666 ... 82.759409 82.602666
|
20 2816.0 81.369790 ... 83.873477 84.360174
|
||||||
21 2944.0 81.034195 ... 82.373605 82.237674
|
21 2944.0 81.832567 ... 83.617504 83.337844
|
||||||
22 3072.0 81.005868 ... 88.335577 87.924073
|
22 3072.0 79.750851 ... 89.451983 88.197981
|
||||||
23 3200.0 83.769634 ... 95.665176 94.955488
|
23 3200.0 79.503104 ... 96.822991 96.096095
|
||||||
24 3328.0 82.275764 ... 84.695641 84.496824
|
24 3328.0 83.034941 ... 82.558825 82.181847
|
||||||
25 3456.0 80.864158 ... 90.994998 90.892410
|
25 3456.0 81.849303 ... 91.407671 92.455926
|
||||||
26 3584.0 86.291162 ... 98.808123 98.483450
|
26 3584.0 87.211821 ... 91.750399 97.840469
|
||||||
27 3712.0 84.658765 ... 88.876645 88.561477
|
27 3712.0 85.896254 ... 86.341700 87.322855
|
||||||
28 3840.0 83.781816 ... 92.313853 92.006659
|
28 3840.0 80.313725 ... 85.597527 91.853823
|
||||||
29 3968.0 92.302520 ... 91.472214 90.994735
|
29 3968.0 86.174142 ... 84.154440 82.560175
|
||||||
30 4096.0 93.271527 ... 92.563952 92.372834
|
30 4096.0 89.240508 ... 84.201835 82.849652
|
||||||
|
|
||||||
[31 rows x 5 columns]
|
[31 rows x 5 columns]
|
||||||
</pre></div>
|
</pre></div>
|
||||||
</div>
|
</div>
|
||||||
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 6 minutes 37.755 seconds)</p>
|
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 6 minutes 48.616 seconds)</p>
|
||||||
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-03-matrix-multiplication-py">
|
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-03-matrix-multiplication-py">
|
||||||
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
||||||
<p><a class="reference download internal" download="" href="../../_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">03-matrix-multiplication.py</span></code></a></p>
|
<p><a class="reference download internal" download="" href="../../_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">03-matrix-multiplication.py</span></code></a></p>
|
||||||
|
@@ -372,7 +372,7 @@ to explore the <cite>triton/language/random</cite> folder!</p>
|
|||||||
<dd><p>Nitish Srivastava and Geoffrey Hinton and Alex Krizhevsky and Ilya Sutskever and Ruslan Salakhutdinov, “Dropout: A Simple Way to Prevent Neural Networks from Overfitting”, JMLR 2014</p>
|
<dd><p>Nitish Srivastava and Geoffrey Hinton and Alex Krizhevsky and Ilya Sutskever and Ruslan Salakhutdinov, “Dropout: A Simple Way to Prevent Neural Networks from Overfitting”, JMLR 2014</p>
|
||||||
</dd>
|
</dd>
|
||||||
</dl>
|
</dl>
|
||||||
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 0 minutes 0.341 seconds)</p>
|
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 0 minutes 0.337 seconds)</p>
|
||||||
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-04-low-memory-dropout-py">
|
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-04-low-memory-dropout-py">
|
||||||
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
||||||
<p><a class="reference download internal" download="" href="../../_downloads/c9aed78977a4c05741d675a38dde3d7d/04-low-memory-dropout.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">04-low-memory-dropout.py</span></code></a></p>
|
<p><a class="reference download internal" download="" href="../../_downloads/c9aed78977a4c05741d675a38dde3d7d/04-low-memory-dropout.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">04-low-memory-dropout.py</span></code></a></p>
|
||||||
|
@@ -194,36 +194,36 @@ to download the full example code</p>
|
|||||||
<p class="sphx-glr-script-out">Out:</p>
|
<p class="sphx-glr-script-out">Out:</p>
|
||||||
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>layer-norm-backward:
|
<div class="sphx-glr-script-out highlight-none notranslate"><div class="highlight"><pre><span></span>layer-norm-backward:
|
||||||
N Triton Torch Apex
|
N Triton Torch Apex
|
||||||
0 1024.0 356.173905 98.303995 307.200008
|
0 1024.0 356.173905 99.497980 315.076934
|
||||||
1 1536.0 396.387087 133.565214 341.333333
|
1 1536.0 409.599994 134.050910 344.523365
|
||||||
2 2048.0 481.882362 160.627450 325.509933
|
2 2048.0 491.520012 159.067963 321.254900
|
||||||
3 2560.0 451.764698 180.175950 321.675394
|
3 2560.0 461.954908 182.314537 325.079368
|
||||||
4 3072.0 511.999982 189.046153 316.429186
|
4 3072.0 519.211251 191.501303 320.556515
|
||||||
5 3584.0 547.872604 206.769233 308.301075
|
5 3584.0 554.941930 207.768111 309.410081
|
||||||
6 4096.0 558.545450 218.939860 298.796351
|
6 4096.0 564.965515 220.907859 300.623865
|
||||||
7 4608.0 491.520008 231.849059 286.507772
|
7 4608.0 500.416301 232.336141 287.251954
|
||||||
8 5120.0 518.481012 240.469672 283.133649
|
8 5120.0 529.655159 243.809526 289.129408
|
||||||
9 5632.0 532.157453 241.371422 288.204696
|
9 5632.0 540.671974 244.426754 291.310338
|
||||||
10 6144.0 542.117638 249.502530 286.322318
|
10 6144.0 552.269672 251.202731 288.000001
|
||||||
11 6656.0 532.479975 253.561895 284.242007
|
11 6656.0 534.260858 255.590406 286.279570
|
||||||
12 7168.0 507.469040 254.109315 277.919225
|
12 7168.0 512.000004 253.734520 277.919225
|
||||||
13 7680.0 486.332448 263.314295 280.547947
|
13 7680.0 487.619051 266.743841 284.884090
|
||||||
14 8192.0 464.794337 263.903346 277.694924
|
14 8192.0 468.114289 258.354805 278.481578
|
||||||
15 8704.0 406.412440 263.093202 280.774186
|
15 8704.0 415.300208 267.472468 285.377055
|
||||||
16 9216.0 418.909088 270.065931 286.507772
|
16 9216.0 428.651187 272.394084 289.887291
|
||||||
17 9728.0 427.604376 281.291575 289.667485
|
17 9728.0 438.033784 279.942444 288.950501
|
||||||
18 10240.0 434.973455 284.115604 288.450695
|
18 10240.0 445.217381 287.102804 290.153487
|
||||||
19 10752.0 423.724151 244.827326 289.291486
|
19 10752.0 427.231788 246.935876 289.941565
|
||||||
20 11264.0 423.061049 242.019694 282.482755
|
20 11264.0 428.424741 245.536784 286.069848
|
||||||
21 11776.0 417.465304 247.915800 287.219500
|
21 11776.0 418.702211 249.667843 288.981596
|
||||||
22 12288.0 414.202242 252.601276 293.737063
|
22 12288.0 414.784810 254.453844 294.323369
|
||||||
23 12800.0 410.146863 252.424003 288.993430
|
23 12800.0 410.695192 254.094291 288.180121
|
||||||
24 13312.0 406.991092 252.759501 289.916513
|
24 13312.0 410.125805 252.559690 289.129403
|
||||||
25 13824.0 404.112047 255.408777 291.031592
|
25 13824.0 404.604870 256.991469 291.799461
|
||||||
26 14336.0 395.475867 251.692749 284.821192
|
26 14336.0 396.387109 255.809666 288.886653
|
||||||
27 14848.0 383.174202 255.816222 287.612590
|
27 14848.0 386.498925 257.665934 288.777966
|
||||||
28 15360.0 378.480483 259.058326 289.129401
|
28 15360.0 378.869469 258.513318 286.656296
|
||||||
29 15872.0 369.832994 260.196726 288.800600
|
29 15872.0 372.000001 261.626369 290.562936
|
||||||
</pre></div>
|
</pre></div>
|
||||||
</div>
|
</div>
|
||||||
<div class="line-block">
|
<div class="line-block">
|
||||||
@@ -487,7 +487,7 @@ to download the full example code</p>
|
|||||||
<span class="n">bench_layer_norm</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">save_path</span><span class="o">=</span><span class="s1">'.'</span><span class="p">,</span> <span class="n">print_data</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
|
<span class="n">bench_layer_norm</span><span class="o">.</span><span class="n">run</span><span class="p">(</span><span class="n">save_path</span><span class="o">=</span><span class="s1">'.'</span><span class="p">,</span> <span class="n">print_data</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
|
||||||
</pre></div>
|
</pre></div>
|
||||||
</div>
|
</div>
|
||||||
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 2 minutes 15.414 seconds)</p>
|
<p class="sphx-glr-timing"><strong>Total running time of the script:</strong> ( 2 minutes 14.228 seconds)</p>
|
||||||
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-05-layer-norm-py">
|
<div class="sphx-glr-footer class sphx-glr-footer-example docutils container" id="sphx-glr-download-getting-started-tutorials-05-layer-norm-py">
|
||||||
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
<div class="sphx-glr-download sphx-glr-download-python docutils container">
|
||||||
<p><a class="reference download internal" download="" href="../../_downloads/935c0dd0fbeb4b2e69588471cbb2d4b2/05-layer-norm.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">05-layer-norm.py</span></code></a></p>
|
<p><a class="reference download internal" download="" href="../../_downloads/935c0dd0fbeb4b2e69588471cbb2d4b2/05-layer-norm.py"><code class="xref download docutils literal notranslate"><span class="pre">Download</span> <span class="pre">Python</span> <span class="pre">source</span> <span class="pre">code:</span> <span class="pre">05-layer-norm.py</span></code></a></p>
|
||||||
|
@@ -174,7 +174,7 @@
|
|||||||
|
|
||||||
<div class="section" id="computation-times">
|
<div class="section" id="computation-times">
|
||||||
<span id="sphx-glr-getting-started-tutorials-sg-execution-times"></span><h1>Computation times<a class="headerlink" href="#computation-times" title="Permalink to this headline">¶</a></h1>
|
<span id="sphx-glr-getting-started-tutorials-sg-execution-times"></span><h1>Computation times<a class="headerlink" href="#computation-times" title="Permalink to this headline">¶</a></h1>
|
||||||
<p><strong>14:03.991</strong> total execution time for <strong>getting-started_tutorials</strong> files:</p>
|
<p><strong>14:21.771</strong> total execution time for <strong>getting-started_tutorials</strong> files:</p>
|
||||||
<table class="docutils align-default">
|
<table class="docutils align-default">
|
||||||
<colgroup>
|
<colgroup>
|
||||||
<col style="width: 85%" />
|
<col style="width: 85%" />
|
||||||
@@ -183,23 +183,23 @@
|
|||||||
</colgroup>
|
</colgroup>
|
||||||
<tbody>
|
<tbody>
|
||||||
<tr class="row-odd"><td><p><a class="reference internal" href="03-matrix-multiplication.html#sphx-glr-getting-started-tutorials-03-matrix-multiplication-py"><span class="std std-ref">Matrix Multiplication</span></a> (<code class="docutils literal notranslate"><span class="pre">03-matrix-multiplication.py</span></code>)</p></td>
|
<tr class="row-odd"><td><p><a class="reference internal" href="03-matrix-multiplication.html#sphx-glr-getting-started-tutorials-03-matrix-multiplication-py"><span class="std std-ref">Matrix Multiplication</span></a> (<code class="docutils literal notranslate"><span class="pre">03-matrix-multiplication.py</span></code>)</p></td>
|
||||||
<td><p>06:37.755</p></td>
|
<td><p>06:48.616</p></td>
|
||||||
<td><p>0.0 MB</p></td>
|
<td><p>0.0 MB</p></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr class="row-even"><td><p><a class="reference internal" href="02-fused-softmax.html#sphx-glr-getting-started-tutorials-02-fused-softmax-py"><span class="std std-ref">Fused Softmax</span></a> (<code class="docutils literal notranslate"><span class="pre">02-fused-softmax.py</span></code>)</p></td>
|
<tr class="row-even"><td><p><a class="reference internal" href="02-fused-softmax.html#sphx-glr-getting-started-tutorials-02-fused-softmax-py"><span class="std std-ref">Fused Softmax</span></a> (<code class="docutils literal notranslate"><span class="pre">02-fused-softmax.py</span></code>)</p></td>
|
||||||
<td><p>03:28.192</p></td>
|
<td><p>03:28.816</p></td>
|
||||||
<td><p>0.0 MB</p></td>
|
<td><p>0.0 MB</p></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr class="row-odd"><td><p><a class="reference internal" href="05-layer-norm.html#sphx-glr-getting-started-tutorials-05-layer-norm-py"><span class="std std-ref">Layer Normalization</span></a> (<code class="docutils literal notranslate"><span class="pre">05-layer-norm.py</span></code>)</p></td>
|
<tr class="row-odd"><td><p><a class="reference internal" href="05-layer-norm.html#sphx-glr-getting-started-tutorials-05-layer-norm-py"><span class="std std-ref">Layer Normalization</span></a> (<code class="docutils literal notranslate"><span class="pre">05-layer-norm.py</span></code>)</p></td>
|
||||||
<td><p>02:15.414</p></td>
|
<td><p>02:14.228</p></td>
|
||||||
<td><p>0.0 MB</p></td>
|
<td><p>0.0 MB</p></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr class="row-even"><td><p><a class="reference internal" href="01-vector-add.html#sphx-glr-getting-started-tutorials-01-vector-add-py"><span class="std std-ref">Vector Addition</span></a> (<code class="docutils literal notranslate"><span class="pre">01-vector-add.py</span></code>)</p></td>
|
<tr class="row-even"><td><p><a class="reference internal" href="01-vector-add.html#sphx-glr-getting-started-tutorials-01-vector-add-py"><span class="std std-ref">Vector Addition</span></a> (<code class="docutils literal notranslate"><span class="pre">01-vector-add.py</span></code>)</p></td>
|
||||||
<td><p>01:42.289</p></td>
|
<td><p>01:49.775</p></td>
|
||||||
<td><p>0.0 MB</p></td>
|
<td><p>0.0 MB</p></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr class="row-odd"><td><p><a class="reference internal" href="04-low-memory-dropout.html#sphx-glr-getting-started-tutorials-04-low-memory-dropout-py"><span class="std std-ref">Low-Memory Dropout</span></a> (<code class="docutils literal notranslate"><span class="pre">04-low-memory-dropout.py</span></code>)</p></td>
|
<tr class="row-odd"><td><p><a class="reference internal" href="04-low-memory-dropout.html#sphx-glr-getting-started-tutorials-04-low-memory-dropout-py"><span class="std std-ref">Low-Memory Dropout</span></a> (<code class="docutils literal notranslate"><span class="pre">04-low-memory-dropout.py</span></code>)</p></td>
|
||||||
<td><p>00:00.341</p></td>
|
<td><p>00:00.337</p></td>
|
||||||
<td><p>0.0 MB</p></td>
|
<td><p>0.0 MB</p></td>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
# Sphinx build info version 1
|
# Sphinx build info version 1
|
||||||
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
|
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
|
||||||
config: c5a977b958bf2778dc2c7ac5e05e79b9
|
config: f78bd8aa52783d67cb29c057c8d17265
|
||||||
tags: 645f666f9bcd5a90fca523b33c5a78b7
|
tags: 645f666f9bcd5a90fca523b33c5a78b7
|
||||||
|