[Triton-MLIR] Add compute capability (#902)

add compute capability from python frontend to backend.

Co-authored-by: Keren Zhou <kerenzhou@openai.com>
This commit is contained in:
ben-zhang-609
2022-11-23 03:08:23 +08:00
committed by GitHub
parent 2afebcd79b
commit 07786dc932
7 changed files with 123 additions and 83 deletions

View File

@@ -1279,8 +1279,9 @@ void init_triton_ir(py::module &&m) {
self.addPass(mlir::createTritonGPUPrefetchPass());
})
.def("add_triton_gpu_combine_pass",
[](mlir::PassManager &self) {
self.addPass(mlir::createTritonGPUCombineOpsPass());
[](mlir::PassManager &self, int computeCapability) {
self.addPass(
mlir::createTritonGPUCombineOpsPass(computeCapability));
})
.def("add_triton_gpu_to_llvm",
[](mlir::PassManager &self) {
@@ -1301,7 +1302,7 @@ void init_triton_translation(py::module &m) {
m.def(
"translate_triton_gpu_to_llvmir",
[](mlir::ModuleOp op) {
[](mlir::ModuleOp op, int computeCapability) {
llvm::LLVMContext llvmContext;
auto llvmModule =
::mlir::triton::translateTritonGPUToLLVMIR(&llvmContext, op);