[DOCS] Correct spelling (#665)

This PR corrects spelling like #664 for Triton-MLIR. It should not break anything.
This commit is contained in:
Shintaro Iwasaki
2022-09-16 15:07:34 -07:00
committed by GitHub
parent e9e1a4e682
commit 13669b46a6
13 changed files with 21 additions and 21 deletions

View File

@@ -25,7 +25,7 @@
# LLVM_VERSION_STRING - Full LLVM version string (e.g. 6.0.0svn).
# LLVM_VERSION_BASE_STRING - Base LLVM version string without git/svn suffix (e.g. 6.0.0).
#
# Note: The variable names were chosen in conformance with the offical CMake
# Note: The variable names were chosen in conformance with the official CMake
# guidelines, see ${CMAKE_ROOT}/Modules/readme.txt.
# Try suffixed versions to pick up the newest LLVM install available on Debian

View File

@@ -14,7 +14,7 @@ Traditional compilers typically rely on intermediate representations, such as LL
Program Representation
+++++++++++++++++++++++
Polyhedral compilation is a vast area of research. In this section we only outline the most basic aspects of this topic, but readers interested in the solid mathematical foundations underneath may refer to the ample litterature on linear and integer programming.
Polyhedral compilation is a vast area of research. In this section we only outline the most basic aspects of this topic, but readers interested in the solid mathematical foundations underneath may refer to the ample literature on linear and integer programming.
.. table::
:widths: 50 50

View File

@@ -44,7 +44,7 @@ class PTXInstrExecution;
//
// builder.getAllMlirArgs() // get {pVal, iVal, jVal, kVal}
//
// To get the string containing all the contraints with "," seperated,
// To get the string containing all the constraints with "," separated,
// builder.getConstraints() // get "=r,r,k"
//
// PTXBuilder can build a PTX asm with multiple instructions, sample code:
@@ -107,10 +107,10 @@ struct PTXBuilder {
// Create a new operand. It will not add to operand list.
// @value: the MLIR value bind to this operand.
// @constraint: ASM operand constraint, .e.g. "=r"
// @formater: extra format to represent this operand in ASM code, default is
// "%{0}".format(operand.idx).
// @formatter: extra format to represent this operand in ASM code, default is
// "%{0}".format(operand.idx).
Operand *newOperand(mlir::Value value, StringRef constraint,
std::function<std::string(int idx)> formater = nullptr);
std::function<std::string(int idx)> formatter = nullptr);
// Create a new operand which is written to, that is, the constraint starts
// with "=", e.g. "=r".

View File

@@ -20,10 +20,10 @@ std::string strJoin(llvm::ArrayRef<std::string> strs,
PTXInstr::Operand *
PTXBuilder::newOperand(mlir::Value value, StringRef constraint,
std::function<std::string(int)> formater) {
std::function<std::string(int)> formatter) {
argArchive.emplace_back(std::make_unique<Operand>(value, constraint));
auto *opr = argArchive.back().get();
opr->repr = formater;
opr->repr = formatter;
opr->idx = oprCounter++;
return opr;
}

View File

@@ -25,7 +25,7 @@ class LoopPipeliner {
/// cache forOp we are working on
scf::ForOp forOp;
/// cahce YieldOp for this forOp
/// cache YieldOp for this forOp
scf::YieldOp yieldOp;
/// loads to be pipelined

View File

@@ -103,7 +103,7 @@ static bool find_and_replace(std::string &str, const std::string &begin,
std::string path_to_ptxas(int &version) {
std::vector<std::string> rets;
std::string ret;
// search pathes for ptxas
// search paths for ptxas
std::vector<std::string> ptxas_prefixes = {"", "/usr/local/cuda/bin/"};
std::string triton_ptxas = tools::getenv("TRITON_PTXAS_PATH");
if (!triton_ptxas.empty())

View File

@@ -229,7 +229,7 @@ void parse_args(py::list &args, py::list do_not_specialize,
// copy param
std::memcpy(params_ptr, &value, 8);
params_ptr += 8;
// udpate cache key
// update cache key
cache_key += dtype_cache_key_part(arg.attr("dtype"));
cache_key += "*";
cache_key += "[multipleof(";
@@ -330,7 +330,7 @@ void parse_args(py::list &args, py::list &arg_names, std::string &params,
// copy param
std::memcpy(params_ptr, &value, 8);
params_ptr += 8;
// udpate cache key
// update cache key
continue;
}
// argument is `constexpr`

View File

@@ -53,7 +53,7 @@ def mangle_ty(ty):
elt = mangle_ty(ty.scalar)
shape = '_'.join(map(str, ty.shape))
return f'{elt}S{shape}S'
assert False, "Unsupport type"
assert False, "Unsupported type"
def mangle_fn(name, arg_tys, constants):
@@ -464,7 +464,7 @@ class CodeGenerator(ast.NodeVisitor):
with enter_sub_region(self) as sr:
liveins, insert_block = sr
# condtion (the before region)
# condition (the before region)
cond_block = self.builder.create_block()
self.builder.set_insertion_point_to_start(cond_block)
cond = self.visit(node.test)

View File

@@ -185,7 +185,7 @@ class dtype:
return builder.get_float_ty()
elif self.name == 'fp64':
return builder.get_double_ty()
raise ValueError(f'fail to covert {self} to ir type')
raise ValueError(f'fail to convert {self} to ir type')
def __str__(self):
return self.name
@@ -895,7 +895,7 @@ def where(condition, x, y, _builder=None):
Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`.
If you want to avoid unintented memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead.
If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead.
The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`.
:code:`x` and :code:`y` must have the data type.

View File

@@ -328,7 +328,7 @@ def dsd_lut(layout, block, step, trans, device):
# create increments
incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous()
# pad by a factor 2*MAX_NUM_STAGES
# to accomodate pre-fetching inside the kernel
# to accommodate pre-fetching inside the kernel
pad = torch.zeros(20, device=incs.device, dtype=incs.dtype)
incs = torch.cat((incs, pad))
# create lut

View File

@@ -104,7 +104,7 @@ def extract(file_path, fun):
# peek the next line
line = sass_lines[line_idx].decode()
# Print sass
# label naming convension: LBB#i
# label naming convention: LBB#i
for idx, (ctrl, asm) in enumerate(asm_buffer):
# Print label if this is BRA target
offset = idx * 16

View File

@@ -78,7 +78,7 @@ def softmax_kernel(
input_ptrs = row_start_ptr + col_offsets
# Load the row into SRAM, using a mask since BLOCK_SIZE may be > than n_cols
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf'))
# Substract maximum for numerical stability
# Subtract maximum for numerical stability
row_minus_max = row - tl.max(row, axis=0)
# Note that exponentials in Triton are fast but approximate (i.e., think __expf in CUDA)
numerator = tl.exp(row_minus_max)

View File

@@ -18,7 +18,7 @@ You will specifically learn about:
# They are notoriously hard to optimize, hence their implementation is generally done by
# hardware vendors themselves as part of so-called "kernel libraries" (e.g., cuBLAS).
# Unfortunately, these libraries are often proprietary and cannot be easily customized
# to accomodate the needs of modern deep learning workloads (e.g., fused activation functions).
# to accommodate the needs of modern deep learning workloads (e.g., fused activation functions).
# In this tutorial, you will learn how to implement efficient matrix multiplications by
# yourself with Triton, in a way that is easy to customize and extend.
#