[BACKEND] Memory allocation (#33)

This commit is contained in:
Keren Zhou
2022-08-04 11:22:49 -07:00
committed by GitHub
parent b988bae813
commit a7b49b3227
7 changed files with 514 additions and 1 deletions

View File

@@ -1,5 +1,6 @@
add_mlir_library(TritonTestAnalysis
TestAxisInfo.cpp
TestAllocation.cpp
LINK_LIBS PUBLIC
TritonAnalysis

View File

@@ -0,0 +1,49 @@
#include "mlir/Pass/Pass.h"
#include "triton/Analysis/Allocation.h"
using namespace mlir;
namespace {
struct TestAllocationPass
: public PassWrapper<TestAllocationPass, OperationPass<FuncOp>> {
// LLVM15+
// MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAllocationPass);
StringRef getArgument() const final { return "test-print-allocation"; }
StringRef getDescription() const final {
return "print the result of the allocation pass";
}
void runOnOperation() override {
Operation *operation = getOperation();
auto &os = llvm::errs();
os << "Testing: " << operation->getName() << "\n";
AllocationAnalysis analysis(operation);
operation->walk([&](Operation *op) {
if (op->getNumResults() < 1)
return;
for (Value result : op->getResults()) {
Type type = result.getType();
if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
Attribute encoding = tensorType.getEncoding();
if (encoding.isa<triton::gpu::TritonGPUSharedEncodingAttr>()) {
size_t offset = analysis.getOffset(result);
size_t size = analysis.getAllocatedSize(result);
os << "offset = " << offset << ", size = " << size << "\n";
}
}
}
});
os << "size = " << analysis.getSharedMemorySize() << "\n";
}
};
} // namespace
namespace mlir {
namespace test {
void registerTestAllocationPass() { PassRegistration<TestAllocationPass>(); }
} // namespace test
} // namespace mlir