#include "triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h" #include "triton/Dialect/Triton/IR/Dialect.h" #include "triton/Dialect/TritonGPU/IR/Dialect.h" #include using namespace mlir; // // TypeConverter // TritonGPUTypeConverter::TritonGPUTypeConverter(MLIRContext *context, int numThreads) : context(context), numThreads(numThreads) { // TODO: how does MLIR pick the right conversion? addConversion([](Type type) { return type; }); addConversion([this](RankedTensorType tensorType) -> RankedTensorType { MLIRContext *context = this->context; int numThreads = this->numThreads; llvm::ArrayRef shape = tensorType.getShape(); Type elementType = tensorType.getElementType(); int64_t rank = tensorType.getRank(); int64_t numElements = tensorType.getNumElements(); // TODO: are there any better ways to raise this error? if (!(numElements >= numThreads)) { SmallVector buffer; llvm::raw_svector_ostream os(buffer); os << tensorType << " has " << numElements << " numElements " << " smaller than numThreads (" << numThreads << ")\n" << "consider using smaller num-warps\n"; llvm::report_fatal_error(os.str()); } assert(numElements % numThreads == 0); // or assert no encoding? // Now we assume: // contiguous = 1, order = 0, 1, 2, ..., llvm::SmallVector threadTileSize(rank, 1); // naive layout llvm::SmallVector warpTileSize(rank, 1); llvm::SmallVector blockTileSize(rank); llvm::SmallVector order(rank); int remainingThreads = numThreads; int remainingLanes = /*warp size*/32; for (int64_t dim = 0; dim < rank; ++dim) { blockTileSize[dim] = std::clamp(remainingThreads, 1, int(shape[dim])); warpTileSize[dim] = std::clamp(remainingLanes, 1, int(shape[dim])); order[dim] = dim; remainingThreads /= blockTileSize[dim]; remainingLanes /= warpTileSize[dim]; // TODO: will we need repetition? } Attribute encoding = triton::gpu::TritonGPUBlockedEncodingAttr::get( context, threadTileSize, warpTileSize, blockTileSize, order); return RankedTensorType::get(shape, elementType, encoding); }); // // materailizations // // This will be called when (newArgType != origArgType) // This will create newArg, and map(origArg, newArg) addArgumentMaterialization([&](OpBuilder &builder, RankedTensorType tensorType, ValueRange inputs, Location loc) { llvm_unreachable("Not implemented"); return llvm::None; }); // If the origValue still has live user(s), use this to // convert origValue to newValue addSourceMaterialization([&](OpBuilder &builder, RankedTensorType tensorType, ValueRange inputs, Location loc) { llvm_unreachable("Not implemented"); return llvm::None; }); // This will be called when (desiredType != newOperandType) // where, desiredType = typeConverter->convertType(origType) // NOTE: only for remapped values. addTargetMaterialization([&](OpBuilder &builder, RankedTensorType tensorType, ValueRange inputs, Location loc) { assert(inputs.size() == 1); llvm_unreachable("Not implemented"); return llvm::None; }); } // // TritonGPUConversion // TritonGPUConversionTarget::TritonGPUConversionTarget(MLIRContext &context) : ConversionTarget(context) { // TODO: we should also verify ops of TritonGPUDialect addLegalDialect(); // Some ops from SCF are illegal addIllegalOp(); addDynamicallyLegalDialect([&](Operation *op) { auto isLegal = [](Value v) -> bool { Type type = v.getType(); if (auto tensorType = type.dyn_cast()) { if (tensorType.getEncoding()) return true; return false; } return true; }; if (llvm::all_of(op->getOperands(), isLegal) && llvm::all_of(op->getResults(), isLegal)) return true; return false; }); // We have requirements for the data layouts addDynamicallyLegalOp([this](triton::DotOp dotOp) -> bool { Attribute aEncoding = dotOp.a().getType().cast().getEncoding(); Attribute bEncoding = dotOp.b().getType().cast().getEncoding(); if (aEncoding && aEncoding.isa() && bEncoding && bEncoding.isa()) return true; return false; }); }