This PR merges the `triton-mlir` branch, in which we have been quietly rewriting the Triton backend from scratch to increase maintainability, stability and ultimately performance. Changes to the runtime are minimal, and this new version aims to remain backward-compatible with the previous commit. The legacy backend is now officially deprecated, but can still be accessed via the `legacy-backend` tag. Co-authored-by: Keren Zhou <kerenzhou@openai.com> Co-authored-by: Yan Chunwei <yanchunwei@outlook.com> Co-authored-by: goostavz <109190422+goostavz@users.noreply.github.com> Co-authored-by: Shintaro Iwasaki <siwasaki@fb.com> Co-authored-by: Yan Da <dyanab@connect.ust.hk> Co-authored-by: Jun Yang <yangjunpro@gmail.com> Co-authored-by: Ian Bearman <ianb@microsoft.com> Co-authored-by: Jason Ansel <jansel@jansel.net> Co-authored-by: Qingyi Liu <qingyil@nvidia.com> Co-authored-by: ben-zhang-609 <110140741+ben-zhang-609@users.noreply.github.com> Co-authored-by: Chenggang Zhao <lyricz@yeah.net> Co-authored-by: ben-zhang-609 <benzh609@gmail.com> Co-authored-by: dongdongl <dongdongl@nvidia.com>
88 lines
2.4 KiB
TableGen
88 lines
2.4 KiB
TableGen
#ifndef TRITONGPU_PASSES
|
|
#define TRITONGPU_PASSES
|
|
|
|
include "mlir/Pass/PassBase.td"
|
|
|
|
def TritonGPUPipeline : Pass<"tritongpu-pipeline", "mlir::ModuleOp"> {
|
|
let summary = "pipeline";
|
|
|
|
let description = [{
|
|
Unroll loops to hide global memory -> shared memory latency.
|
|
}];
|
|
|
|
let constructor = "mlir::createTritonGPUPipelinePass()";
|
|
|
|
let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect",
|
|
"mlir::scf::SCFDialect",
|
|
"mlir::arith::ArithmeticDialect"];
|
|
|
|
let options = [
|
|
Option<"numStages", "num-stages",
|
|
"int32_t", /*default*/"2",
|
|
"number of pipeline stages">
|
|
];
|
|
}
|
|
|
|
def TritonGPUPrefetch : Pass<"tritongpu-prefetch", "mlir::ModuleOp"> {
|
|
let summary = "prefetch";
|
|
|
|
let description = [{
|
|
Prefetch operands (a and b) of tt.dot into shared memory to hide shared memory -> register latency.
|
|
}];
|
|
|
|
let constructor = "mlir::createTritonGPUPrefetchPass()";
|
|
|
|
let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect",
|
|
"mlir::scf::SCFDialect",
|
|
"mlir::arith::ArithmeticDialect"];
|
|
}
|
|
|
|
def TritonGPUCoalesce: Pass<"tritongpu-coalesce", "mlir::ModuleOp"> {
|
|
let summary = "coalesce";
|
|
|
|
let description = [{
|
|
TODO
|
|
}];
|
|
|
|
let constructor = "mlir::createTritonGPUCoalescePass()";
|
|
|
|
let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect"];
|
|
}
|
|
|
|
def TritonGPUCombineOps : Pass<"tritongpu-combine", "mlir::ModuleOp"> {
|
|
let summary = "combine triton gpu ops";
|
|
|
|
let description = [{
|
|
convert_layout(convert_layout(%src, #LAYOUT_0), #LAYOUT_1) =>
|
|
convert_layout(%src, #LAYOUT_1)
|
|
|
|
convert_layout(%src, #LAYOUT) => %src if %src.layout() == #LAYOUT
|
|
}];
|
|
|
|
let constructor = "mlir::createTritonGPUCombineOpsPass()";
|
|
|
|
let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect",
|
|
"mlir::triton::TritonDialect"];
|
|
|
|
let options = [
|
|
Option<"computeCapability", "compute-capability",
|
|
"int32_t", /*default*/"80",
|
|
"device compute capability">
|
|
];
|
|
}
|
|
|
|
def TritonGPUCanonicalizeLoops: Pass<"tritongpu-canonicalize-loops", "mlir::ModuleOp"> {
|
|
let summary = "canonicalize scf.ForOp ops";
|
|
|
|
let description = [{
|
|
This implements some optimizations that are missing in the standard scf.ForOp
|
|
canonicalizer.
|
|
}];
|
|
|
|
let constructor = "mlir::createTritonGPUCanonicalizeLoopsPass()";
|
|
|
|
let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect"];
|
|
}
|
|
|
|
#endif
|