Merge triton-mlir
branch - Complete rewrite of the backend from scratch (#1004)
This PR merges the `triton-mlir` branch, in which we have been quietly rewriting the Triton backend from scratch to increase maintainability, stability and ultimately performance. Changes to the runtime are minimal, and this new version aims to remain backward-compatible with the previous commit. The legacy backend is now officially deprecated, but can still be accessed via the `legacy-backend` tag. Co-authored-by: Keren Zhou <kerenzhou@openai.com> Co-authored-by: Yan Chunwei <yanchunwei@outlook.com> Co-authored-by: goostavz <109190422+goostavz@users.noreply.github.com> Co-authored-by: Shintaro Iwasaki <siwasaki@fb.com> Co-authored-by: Yan Da <dyanab@connect.ust.hk> Co-authored-by: Jun Yang <yangjunpro@gmail.com> Co-authored-by: Ian Bearman <ianb@microsoft.com> Co-authored-by: Jason Ansel <jansel@jansel.net> Co-authored-by: Qingyi Liu <qingyil@nvidia.com> Co-authored-by: ben-zhang-609 <110140741+ben-zhang-609@users.noreply.github.com> Co-authored-by: Chenggang Zhao <lyricz@yeah.net> Co-authored-by: ben-zhang-609 <benzh609@gmail.com> Co-authored-by: dongdongl <dongdongl@nvidia.com>
This commit is contained in:
67
lib/Analysis/Alias.cpp
Normal file
67
lib/Analysis/Alias.cpp
Normal file
@@ -0,0 +1,67 @@
|
||||
#include "triton/Analysis/Alias.h"
|
||||
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
||||
#include "triton/Analysis/Utility.h"
|
||||
#include "triton/Dialect/TritonGPU/IR/Dialect.h"
|
||||
|
||||
namespace mlir {
|
||||
|
||||
AliasInfo AliasInfo::join(const AliasInfo &lhs, const AliasInfo &rhs) {
|
||||
if (lhs == rhs)
|
||||
return lhs;
|
||||
AliasInfo ret;
|
||||
for (auto value : lhs.allocs) {
|
||||
ret.insert(value);
|
||||
}
|
||||
for (auto value : rhs.allocs) {
|
||||
ret.insert(value);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ChangeResult SharedMemoryAliasAnalysis::visitOperation(
|
||||
Operation *op, ArrayRef<LatticeElement<AliasInfo> *> operands) {
|
||||
AliasInfo aliasInfo;
|
||||
bool pessimistic = true;
|
||||
if (maybeSharedAllocationOp(op)) {
|
||||
// These ops may allocate a new shared memory buffer.
|
||||
auto result = op->getResult(0);
|
||||
// FIXME(Keren): extract and insert are always alias for now
|
||||
if (isa<tensor::ExtractSliceOp, triton::TransOp>(op)) {
|
||||
// extract_slice %src
|
||||
aliasInfo = AliasInfo(operands[0]->getValue());
|
||||
pessimistic = false;
|
||||
} else if (isa<tensor::InsertSliceOp>(op) ||
|
||||
isa<triton::gpu::InsertSliceAsyncOp>(op)) {
|
||||
// insert_slice_async %src, %dst, %index
|
||||
// insert_slice %src into %dst[%offsets]
|
||||
aliasInfo = AliasInfo(operands[1]->getValue());
|
||||
pessimistic = false;
|
||||
} else if (isSharedEncoding(result)) {
|
||||
aliasInfo.insert(result);
|
||||
pessimistic = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (pessimistic) {
|
||||
return markAllPessimisticFixpoint(op->getResults());
|
||||
}
|
||||
// Join all lattice elements
|
||||
ChangeResult result = ChangeResult::NoChange;
|
||||
for (Value value : op->getResults()) {
|
||||
result |= getLatticeElement(value).join(aliasInfo);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
AliasResult SharedMemoryAliasAnalysis::alias(Value lhs, Value rhs) {
|
||||
// TODO: implement
|
||||
return AliasResult::MayAlias;
|
||||
}
|
||||
|
||||
ModRefResult SharedMemoryAliasAnalysis::getModRef(Operation *op,
|
||||
Value location) {
|
||||
// TODO: implement
|
||||
return ModRefResult::getModAndRef();
|
||||
}
|
||||
|
||||
} // namespace mlir
|
Reference in New Issue
Block a user