[DOCS] Correct spelling (#665)

This PR corrects spelling like #664 for Triton-MLIR. It should not break anything.
This commit is contained in:
Shintaro Iwasaki
2022-09-16 15:07:34 -07:00
committed by GitHub
parent e9e1a4e682
commit 13669b46a6
13 changed files with 21 additions and 21 deletions

View File

@@ -229,7 +229,7 @@ void parse_args(py::list &args, py::list do_not_specialize,
// copy param
std::memcpy(params_ptr, &value, 8);
params_ptr += 8;
// udpate cache key
// update cache key
cache_key += dtype_cache_key_part(arg.attr("dtype"));
cache_key += "*";
cache_key += "[multipleof(";
@@ -330,7 +330,7 @@ void parse_args(py::list &args, py::list &arg_names, std::string &params,
// copy param
std::memcpy(params_ptr, &value, 8);
params_ptr += 8;
// udpate cache key
// update cache key
continue;
}
// argument is `constexpr`

View File

@@ -53,7 +53,7 @@ def mangle_ty(ty):
elt = mangle_ty(ty.scalar)
shape = '_'.join(map(str, ty.shape))
return f'{elt}S{shape}S'
assert False, "Unsupport type"
assert False, "Unsupported type"
def mangle_fn(name, arg_tys, constants):
@@ -464,7 +464,7 @@ class CodeGenerator(ast.NodeVisitor):
with enter_sub_region(self) as sr:
liveins, insert_block = sr
# condtion (the before region)
# condition (the before region)
cond_block = self.builder.create_block()
self.builder.set_insertion_point_to_start(cond_block)
cond = self.visit(node.test)

View File

@@ -185,7 +185,7 @@ class dtype:
return builder.get_float_ty()
elif self.name == 'fp64':
return builder.get_double_ty()
raise ValueError(f'fail to covert {self} to ir type')
raise ValueError(f'fail to convert {self} to ir type')
def __str__(self):
return self.name
@@ -895,7 +895,7 @@ def where(condition, x, y, _builder=None):
Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`.
If you want to avoid unintented memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead.
If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead.
The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`.
:code:`x` and :code:`y` must have the data type.

View File

@@ -328,7 +328,7 @@ def dsd_lut(layout, block, step, trans, device):
# create increments
incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous()
# pad by a factor 2*MAX_NUM_STAGES
# to accomodate pre-fetching inside the kernel
# to accommodate pre-fetching inside the kernel
pad = torch.zeros(20, device=incs.device, dtype=incs.dtype)
incs = torch.cat((incs, pad))
# create lut

View File

@@ -104,7 +104,7 @@ def extract(file_path, fun):
# peek the next line
line = sass_lines[line_idx].decode()
# Print sass
# label naming convension: LBB#i
# label naming convention: LBB#i
for idx, (ctrl, asm) in enumerate(asm_buffer):
# Print label if this is BRA target
offset = idx * 16

View File

@@ -78,7 +78,7 @@ def softmax_kernel(
input_ptrs = row_start_ptr + col_offsets
# Load the row into SRAM, using a mask since BLOCK_SIZE may be > than n_cols
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf'))
# Substract maximum for numerical stability
# Subtract maximum for numerical stability
row_minus_max = row - tl.max(row, axis=0)
# Note that exponentials in Triton are fast but approximate (i.e., think __expf in CUDA)
numerator = tl.exp(row_minus_max)

View File

@@ -18,7 +18,7 @@ You will specifically learn about:
# They are notoriously hard to optimize, hence their implementation is generally done by
# hardware vendors themselves as part of so-called "kernel libraries" (e.g., cuBLAS).
# Unfortunately, these libraries are often proprietary and cannot be easily customized
# to accomodate the needs of modern deep learning workloads (e.g., fused activation functions).
# to accommodate the needs of modern deep learning workloads (e.g., fused activation functions).
# In this tutorial, you will learn how to implement efficient matrix multiplications by
# yourself with Triton, in a way that is easy to customize and extend.
#