|
|
|
@@ -205,6 +205,20 @@ auto wrapAsStructAttrs(OpBuilder &b, ArrayAttr attrs) {
|
|
|
|
|
b.getContext(), b.getNamedAttr(LLVM::getStructAttrsAttrName(), attrs));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Helper function to get strides from a given shape and its order
|
|
|
|
|
auto getStridesFromShapeAndOrder(ArrayRef<int64_t> shape,
|
|
|
|
|
ArrayRef<unsigned> order, Location loc,
|
|
|
|
|
ConversionPatternRewriter &rewriter) {
|
|
|
|
|
auto rank = shape.size();
|
|
|
|
|
SmallVector<Value> strides(rank);
|
|
|
|
|
auto stride = 1;
|
|
|
|
|
for (auto idx : order) {
|
|
|
|
|
strides[idx] = i32_val(stride);
|
|
|
|
|
stride *= shape[idx];
|
|
|
|
|
}
|
|
|
|
|
return strides;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct FuncOpConversionBase : public ConvertOpToLLVMPattern<FuncOp> {
|
|
|
|
|
protected:
|
|
|
|
|
using ConvertOpToLLVMPattern<FuncOp>::ConvertOpToLLVMPattern;
|
|
|
|
@@ -452,13 +466,10 @@ struct SharedMemoryObject {
|
|
|
|
|
ArrayRef<unsigned> order, Location loc,
|
|
|
|
|
ConversionPatternRewriter &rewriter)
|
|
|
|
|
: base(base) {
|
|
|
|
|
auto rank = shape.size();
|
|
|
|
|
auto stride = 1;
|
|
|
|
|
strides.resize(rank);
|
|
|
|
|
strides = getStridesFromShapeAndOrder(shape, order, loc, rewriter);
|
|
|
|
|
|
|
|
|
|
for (auto idx : order) {
|
|
|
|
|
strides[idx] = i32_val(stride);
|
|
|
|
|
offsets.emplace_back(i32_val(0));
|
|
|
|
|
stride *= shape[idx];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -2835,6 +2846,112 @@ public:
|
|
|
|
|
return failure();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void storeBlockedToShared(Value src, Value llSrc,
|
|
|
|
|
ArrayRef<Value> srcStrides,
|
|
|
|
|
ArrayRef<Value> srcIndices, Value dst,
|
|
|
|
|
Value smemBase, Type elemPtrTy, Location loc,
|
|
|
|
|
ConversionPatternRewriter &rewriter) {
|
|
|
|
|
auto srcTy = src.getType().cast<RankedTensorType>();
|
|
|
|
|
auto srcShape = srcTy.getShape();
|
|
|
|
|
assert(srcShape.size() == 2 && "Unexpected rank of insertSlice");
|
|
|
|
|
|
|
|
|
|
auto elemTy = srcTy.getElementType();
|
|
|
|
|
auto dstTy = dst.getType().cast<RankedTensorType>();
|
|
|
|
|
auto srcBlockedLayout = srcTy.getEncoding().cast<BlockedEncodingAttr>();
|
|
|
|
|
auto dstSharedLayout = dstTy.getEncoding().cast<SharedEncodingAttr>();
|
|
|
|
|
auto inOrd = srcBlockedLayout.getOrder();
|
|
|
|
|
auto outOrd = dstSharedLayout.getOrder();
|
|
|
|
|
unsigned inVec =
|
|
|
|
|
inOrd == outOrd ? srcBlockedLayout.getSizePerThread()[inOrd[0]] : 1;
|
|
|
|
|
unsigned outVec = dstSharedLayout.getVec();
|
|
|
|
|
unsigned minVec = std::min(outVec, inVec);
|
|
|
|
|
unsigned perPhase = dstSharedLayout.getPerPhase();
|
|
|
|
|
unsigned maxPhase = dstSharedLayout.getMaxPhase();
|
|
|
|
|
unsigned numElems = getElemsPerThread(srcTy);
|
|
|
|
|
auto inVals = getElementsFromStruct(loc, llSrc, rewriter);
|
|
|
|
|
auto srcAccumSizeInThreads =
|
|
|
|
|
product<unsigned>(srcBlockedLayout.getSizePerThread());
|
|
|
|
|
auto wordTy = vec_ty(elemTy, minVec);
|
|
|
|
|
|
|
|
|
|
// TODO: [goostavz] We should make a cache for the calculation of
|
|
|
|
|
// emitBaseIndexForBlockedLayout in case backend compiler not being able to
|
|
|
|
|
// optimize that
|
|
|
|
|
SmallVector<unsigned> srcShapePerCTA = getShapePerCTA(srcBlockedLayout);
|
|
|
|
|
SmallVector<unsigned> reps{ceil<unsigned>(srcShape[0], srcShapePerCTA[0]),
|
|
|
|
|
ceil<unsigned>(srcShape[1], srcShapePerCTA[1])};
|
|
|
|
|
|
|
|
|
|
// Visit each input value in the order they are placed in inVals
|
|
|
|
|
//
|
|
|
|
|
// Please note that the order was not awaring of blockLayout.getOrder(),
|
|
|
|
|
// thus the adjacent elems may not belong to a same word. This could be
|
|
|
|
|
// improved if we update the elements order by emitIndicesForBlockedLayout()
|
|
|
|
|
SmallVector<unsigned> wordsInEachRep(2);
|
|
|
|
|
wordsInEachRep[0] = inOrd[0] == 0
|
|
|
|
|
? srcBlockedLayout.getSizePerThread()[0] / minVec
|
|
|
|
|
: srcBlockedLayout.getSizePerThread()[0];
|
|
|
|
|
wordsInEachRep[1] = inOrd[0] == 0
|
|
|
|
|
? srcBlockedLayout.getSizePerThread()[1]
|
|
|
|
|
: srcBlockedLayout.getSizePerThread()[1] / minVec;
|
|
|
|
|
Value outVecVal = i32_val(outVec);
|
|
|
|
|
Value minVecVal = i32_val(minVec);
|
|
|
|
|
auto numWordsEachRep = product<unsigned>(wordsInEachRep);
|
|
|
|
|
SmallVector<Value> wordVecs(numWordsEachRep);
|
|
|
|
|
for (unsigned i = 0; i < numElems; ++i) {
|
|
|
|
|
if (i % srcAccumSizeInThreads == 0) {
|
|
|
|
|
// start of a replication
|
|
|
|
|
for (unsigned w = 0; w < numWordsEachRep; ++w) {
|
|
|
|
|
wordVecs[w] = undef(wordTy);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
unsigned linearIdxInNanoTile = i % srcAccumSizeInThreads;
|
|
|
|
|
auto multiDimIdxInNanoTile = getMultiDimIndex<unsigned>(
|
|
|
|
|
linearIdxInNanoTile, srcBlockedLayout.getSizePerThread(), inOrd);
|
|
|
|
|
unsigned pos = multiDimIdxInNanoTile[inOrd[0]] % minVec;
|
|
|
|
|
multiDimIdxInNanoTile[inOrd[0]] /= minVec;
|
|
|
|
|
auto wordVecIdx = getLinearIndex<unsigned>(multiDimIdxInNanoTile,
|
|
|
|
|
wordsInEachRep, inOrd);
|
|
|
|
|
wordVecs[wordVecIdx] =
|
|
|
|
|
insert_element(wordTy, wordVecs[wordVecIdx], inVals[i], i32_val(pos));
|
|
|
|
|
|
|
|
|
|
if (i % srcAccumSizeInThreads == srcAccumSizeInThreads - 1) {
|
|
|
|
|
// end of replication, store the vectors into shared memory
|
|
|
|
|
unsigned linearRepIdx = i / srcAccumSizeInThreads;
|
|
|
|
|
auto multiDimRepIdx =
|
|
|
|
|
getMultiDimIndex<unsigned>(linearRepIdx, reps, inOrd);
|
|
|
|
|
for (unsigned linearWordIdx = 0; linearWordIdx < numWordsEachRep;
|
|
|
|
|
++linearWordIdx) {
|
|
|
|
|
// step 1: recover the multidim_index from the index of input_elements
|
|
|
|
|
auto multiDimWordIdx =
|
|
|
|
|
getMultiDimIndex<unsigned>(linearWordIdx, wordsInEachRep, inOrd);
|
|
|
|
|
SmallVector<Value> multiDimIdx(2);
|
|
|
|
|
auto wordOffset0 = multiDimRepIdx[0] * srcShapePerCTA[0] +
|
|
|
|
|
multiDimWordIdx[0] * (inOrd[0] == 0 ? minVec : 1);
|
|
|
|
|
auto wordOffset1 = multiDimRepIdx[1] * srcShapePerCTA[1] +
|
|
|
|
|
multiDimWordIdx[1] * (inOrd[0] == 1 ? minVec : 1);
|
|
|
|
|
multiDimIdx[0] = add(srcIndices[0], i32_val(wordOffset0));
|
|
|
|
|
multiDimIdx[1] = add(srcIndices[1], i32_val(wordOffset1));
|
|
|
|
|
|
|
|
|
|
// step 2: do swizzling
|
|
|
|
|
Value remained = urem(multiDimIdx[outOrd[0]], outVecVal);
|
|
|
|
|
multiDimIdx[outOrd[0]] = udiv(multiDimIdx[outOrd[0]], outVecVal);
|
|
|
|
|
Value off_1 = mul(multiDimIdx[outOrd[1]], srcStrides[outOrd[1]]);
|
|
|
|
|
Value phaseId = udiv(multiDimIdx[outOrd[1]], i32_val(perPhase));
|
|
|
|
|
phaseId = urem(phaseId, i32_val(maxPhase));
|
|
|
|
|
Value off_0 = xor_(multiDimIdx[outOrd[0]], phaseId);
|
|
|
|
|
off_0 = mul(off_0, outVecVal);
|
|
|
|
|
remained = udiv(remained, minVecVal);
|
|
|
|
|
off_0 = add(off_0, mul(remained, minVecVal));
|
|
|
|
|
Value offset = add(off_1, off_0);
|
|
|
|
|
|
|
|
|
|
// step 3: store
|
|
|
|
|
Value smemAddr = gep(elemPtrTy, smemBase, offset);
|
|
|
|
|
smemAddr = bitcast(smemAddr, ptr_ty(wordTy, 3));
|
|
|
|
|
store(wordVecs[linearWordIdx], smemAddr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
SmallVector<Value> getMultiDimOffset(Attribute layout, Location loc,
|
|
|
|
|
ConversionPatternRewriter &rewriter,
|
|
|
|
@@ -3129,110 +3246,91 @@ LogicalResult ConvertLayoutOpConversion::lowerBlockedToShared(
|
|
|
|
|
auto dstSharedLayout = dstTy.getEncoding().cast<SharedEncodingAttr>();
|
|
|
|
|
auto inOrd = srcBlockedLayout.getOrder();
|
|
|
|
|
auto outOrd = dstSharedLayout.getOrder();
|
|
|
|
|
unsigned inVec =
|
|
|
|
|
inOrd == outOrd ? srcBlockedLayout.getSizePerThread()[inOrd[0]] : 1;
|
|
|
|
|
unsigned outVec = dstSharedLayout.getVec();
|
|
|
|
|
unsigned minVec = std::min(outVec, inVec);
|
|
|
|
|
unsigned perPhase = dstSharedLayout.getPerPhase();
|
|
|
|
|
unsigned maxPhase = dstSharedLayout.getMaxPhase();
|
|
|
|
|
unsigned numElems = getElemsPerThread(srcTy);
|
|
|
|
|
auto inVals = getElementsFromStruct(loc, adaptor.src(), rewriter);
|
|
|
|
|
auto srcAccumSizeInThreads =
|
|
|
|
|
product<unsigned>(srcBlockedLayout.getSizePerThread());
|
|
|
|
|
auto elemTy = srcTy.getElementType();
|
|
|
|
|
auto wordTy = vec_ty(elemTy, minVec);
|
|
|
|
|
|
|
|
|
|
// TODO: [goostavz] We should make a cache for the calculation of
|
|
|
|
|
// emitBaseIndexForBlockedLayout in case backend compiler not being able to
|
|
|
|
|
// optimize that
|
|
|
|
|
SmallVector<Value> multiDimOffsetFirstElem =
|
|
|
|
|
emitBaseIndexForBlockedLayout(loc, rewriter, srcBlockedLayout, srcShape);
|
|
|
|
|
SmallVector<unsigned> srcShapePerCTA = getShapePerCTA(srcBlockedLayout);
|
|
|
|
|
SmallVector<unsigned> reps{ceil<unsigned>(srcShape[0], srcShapePerCTA[0]),
|
|
|
|
|
ceil<unsigned>(srcShape[1], srcShapePerCTA[1])};
|
|
|
|
|
|
|
|
|
|
// Visit each input value in the order they are placed in inVals
|
|
|
|
|
//
|
|
|
|
|
// Please note that the order was not awaring of blockLayout.getOrder(),
|
|
|
|
|
// thus the adjacent elems may not belong to a same word. This could be
|
|
|
|
|
// improved if we update the elements order by emitIndicesForBlockedLayout()
|
|
|
|
|
SmallVector<unsigned> wordsInEachRep(2);
|
|
|
|
|
wordsInEachRep[0] = inOrd[0] == 0
|
|
|
|
|
? srcBlockedLayout.getSizePerThread()[0] / minVec
|
|
|
|
|
: srcBlockedLayout.getSizePerThread()[0];
|
|
|
|
|
wordsInEachRep[1] = inOrd[0] == 0
|
|
|
|
|
? srcBlockedLayout.getSizePerThread()[1]
|
|
|
|
|
: srcBlockedLayout.getSizePerThread()[1] / minVec;
|
|
|
|
|
Value outVecVal = idx_val(outVec);
|
|
|
|
|
Value minVecVal = idx_val(minVec);
|
|
|
|
|
Value smemBase = getSharedMemoryBase(loc, rewriter, dst);
|
|
|
|
|
auto elemTy = getTypeConverter()->convertType(srcTy.getElementType());
|
|
|
|
|
auto elemPtrTy = ptr_ty(getTypeConverter()->convertType(elemTy), 3);
|
|
|
|
|
smemBase = bitcast(smemBase, elemPtrTy);
|
|
|
|
|
|
|
|
|
|
auto srcStrides = getStridesFromShapeAndOrder(srcShape, inOrd, loc, rewriter);
|
|
|
|
|
auto srcIndices =
|
|
|
|
|
emitBaseIndexForBlockedLayout(loc, rewriter, srcBlockedLayout, srcShape);
|
|
|
|
|
storeBlockedToShared(src, adaptor.src(), srcStrides, srcIndices, dst,
|
|
|
|
|
smemBase, elemPtrTy, loc, rewriter);
|
|
|
|
|
|
|
|
|
|
auto smemObj = SharedMemoryObject(smemBase, dstShape, outOrd, loc, rewriter);
|
|
|
|
|
auto retVal = getStructFromSharedMemoryObject(loc, smemObj, rewriter);
|
|
|
|
|
auto numWordsEachRep = product<unsigned>(wordsInEachRep);
|
|
|
|
|
SmallVector<Value> wordVecs(numWordsEachRep);
|
|
|
|
|
for (unsigned i = 0; i < numElems; ++i) {
|
|
|
|
|
if (i % srcAccumSizeInThreads == 0) {
|
|
|
|
|
// start of a replication
|
|
|
|
|
for (unsigned w = 0; w < numWordsEachRep; ++w) {
|
|
|
|
|
wordVecs[w] = undef(wordTy);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
unsigned linearIdxInNanoTile = i % srcAccumSizeInThreads;
|
|
|
|
|
auto multiDimIdxInNanoTile = getMultiDimIndex<unsigned>(
|
|
|
|
|
linearIdxInNanoTile, srcBlockedLayout.getSizePerThread(), inOrd);
|
|
|
|
|
unsigned pos = multiDimIdxInNanoTile[inOrd[0]] % minVec;
|
|
|
|
|
multiDimIdxInNanoTile[inOrd[0]] /= minVec;
|
|
|
|
|
auto wordVecIdx =
|
|
|
|
|
getLinearIndex<unsigned>(multiDimIdxInNanoTile, wordsInEachRep, inOrd);
|
|
|
|
|
wordVecs[wordVecIdx] =
|
|
|
|
|
insert_element(wordTy, wordVecs[wordVecIdx], inVals[i], idx_val(pos));
|
|
|
|
|
|
|
|
|
|
if (i % srcAccumSizeInThreads == srcAccumSizeInThreads - 1) {
|
|
|
|
|
// end of replication, store the vectors into shared memory
|
|
|
|
|
unsigned linearRepIdx = i / srcAccumSizeInThreads;
|
|
|
|
|
auto multiDimRepIdx =
|
|
|
|
|
getMultiDimIndex<unsigned>(linearRepIdx, reps, inOrd);
|
|
|
|
|
for (unsigned linearWordIdx = 0; linearWordIdx < numWordsEachRep;
|
|
|
|
|
++linearWordIdx) {
|
|
|
|
|
// step 1: recover the multidim_index from the index of input_elements
|
|
|
|
|
auto multiDimWordIdx =
|
|
|
|
|
getMultiDimIndex<unsigned>(linearWordIdx, wordsInEachRep, inOrd);
|
|
|
|
|
SmallVector<Value> multiDimIdx(2);
|
|
|
|
|
auto wordOffset0 = multiDimRepIdx[0] * srcShapePerCTA[0] +
|
|
|
|
|
multiDimWordIdx[0] * (inOrd[0] == 0 ? minVec : 1);
|
|
|
|
|
auto wordOffset1 = multiDimRepIdx[1] * srcShapePerCTA[1] +
|
|
|
|
|
multiDimWordIdx[1] * (inOrd[0] == 1 ? minVec : 1);
|
|
|
|
|
multiDimIdx[0] = add(multiDimOffsetFirstElem[0], idx_val(wordOffset0));
|
|
|
|
|
multiDimIdx[1] = add(multiDimOffsetFirstElem[1], idx_val(wordOffset1));
|
|
|
|
|
|
|
|
|
|
// step 2: do swizzling
|
|
|
|
|
Value remained = urem(multiDimIdx[outOrd[0]], outVecVal);
|
|
|
|
|
multiDimIdx[outOrd[0]] = udiv(multiDimIdx[outOrd[0]], outVecVal);
|
|
|
|
|
Value off_1 = mul(multiDimIdx[outOrd[1]], idx_val(srcShape[outOrd[0]]));
|
|
|
|
|
Value phaseId = udiv(multiDimIdx[outOrd[1]], idx_val(perPhase));
|
|
|
|
|
phaseId = urem(phaseId, idx_val(maxPhase));
|
|
|
|
|
Value off_0 = xor_(multiDimIdx[outOrd[0]], phaseId);
|
|
|
|
|
off_0 = mul(off_0, outVecVal);
|
|
|
|
|
remained = udiv(remained, minVecVal);
|
|
|
|
|
off_0 = add(off_0, mul(remained, minVecVal));
|
|
|
|
|
Value offset = add(off_1, off_0);
|
|
|
|
|
|
|
|
|
|
// step 3: store
|
|
|
|
|
Value smemAddr = gep(elemPtrTy, smemBase, offset);
|
|
|
|
|
smemAddr = bitcast(smemAddr, ptr_ty(wordTy, 3));
|
|
|
|
|
store(wordVecs[linearWordIdx], smemAddr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Barrier is not necessary.
|
|
|
|
|
// The membar pass knows that it writes to shared memory and will handle it
|
|
|
|
|
// properly.
|
|
|
|
|
rewriter.replaceOp(op, retVal);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct InsertSliceOpConversion
|
|
|
|
|
: public ConvertTritonGPUOpToLLVMPattern<tensor::InsertSliceOp> {
|
|
|
|
|
using ConvertTritonGPUOpToLLVMPattern<
|
|
|
|
|
tensor::InsertSliceOp>::ConvertTritonGPUOpToLLVMPattern;
|
|
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
|
matchAndRewrite(tensor::InsertSliceOp op, OpAdaptor adaptor,
|
|
|
|
|
ConversionPatternRewriter &rewriter) const override {
|
|
|
|
|
// %dst = insert_slice %src into %dst[%offsets]
|
|
|
|
|
Location loc = op->getLoc();
|
|
|
|
|
Value dst = op.dest();
|
|
|
|
|
Value src = op.source();
|
|
|
|
|
Value res = op.result();
|
|
|
|
|
assert(allocation->getBufferId(res) == Allocation::InvalidBufferId &&
|
|
|
|
|
"Only support in-place insert_slice for now");
|
|
|
|
|
|
|
|
|
|
auto srcTy = src.getType().dyn_cast<RankedTensorType>();
|
|
|
|
|
auto srcLayout = srcTy.getEncoding().dyn_cast<BlockedEncodingAttr>();
|
|
|
|
|
auto srcShape = srcTy.getShape();
|
|
|
|
|
assert(srcLayout && "Unexpected srcLayout in InsertSliceOpConversion");
|
|
|
|
|
|
|
|
|
|
auto dstTy = dst.getType().dyn_cast<RankedTensorType>();
|
|
|
|
|
auto dstLayout = dstTy.getEncoding().dyn_cast<SharedEncodingAttr>();
|
|
|
|
|
auto llDst = adaptor.dest();
|
|
|
|
|
assert(dstLayout && "Unexpected dstLayout in InsertSliceOpConversion");
|
|
|
|
|
assert(op.hasUnitStride() &&
|
|
|
|
|
"Only unit stride supported by InsertSliceOpConversion");
|
|
|
|
|
|
|
|
|
|
// newBase = base + offset
|
|
|
|
|
// Triton support either static and dynamic offsets
|
|
|
|
|
auto smemObj = getSharedMemoryObjectFromStruct(loc, llDst, rewriter);
|
|
|
|
|
SmallVector<Value, 4> offsets;
|
|
|
|
|
SmallVector<Value, 4> srcStrides;
|
|
|
|
|
auto mixedOffsets = op.getMixedOffsets();
|
|
|
|
|
for (auto i = 0; i < mixedOffsets.size(); ++i) {
|
|
|
|
|
if (op.isDynamicOffset(i)) {
|
|
|
|
|
offsets.emplace_back(adaptor.offsets()[i]);
|
|
|
|
|
} else {
|
|
|
|
|
offsets.emplace_back(i32_val(op.getStaticOffset(i)));
|
|
|
|
|
}
|
|
|
|
|
// Like insert_slice_async, we only support slice from one dimension,
|
|
|
|
|
// which has a slice size of 1
|
|
|
|
|
if (op.getStaticSize(i) != 1) {
|
|
|
|
|
srcStrides.emplace_back(smemObj.strides[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Compute the offset based on the original strides of the shared memory
|
|
|
|
|
// object
|
|
|
|
|
auto offset = dot(rewriter, loc, offsets, smemObj.strides);
|
|
|
|
|
auto llvmElemTy = getTypeConverter()->convertType(dstTy.getElementType());
|
|
|
|
|
auto elemPtrTy = ptr_ty(llvmElemTy, 3);
|
|
|
|
|
auto smemBase = gep(elemPtrTy, smemObj.base, offset);
|
|
|
|
|
|
|
|
|
|
auto llSrc = adaptor.source();
|
|
|
|
|
auto srcIndices =
|
|
|
|
|
emitBaseIndexForBlockedLayout(loc, rewriter, srcLayout, srcShape);
|
|
|
|
|
ConvertLayoutOpConversion::storeBlockedToShared(src, llSrc, srcStrides,
|
|
|
|
|
srcIndices, dst, smemBase,
|
|
|
|
|
elemPtrTy, loc, rewriter);
|
|
|
|
|
// Barrier is not necessary.
|
|
|
|
|
// The membar pass knows that it writes to shared memory and will handle it
|
|
|
|
|
// properly.
|
|
|
|
|
rewriter.replaceOp(op, llDst);
|
|
|
|
|
return success();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/// ====================== dot codegen begin ==========================
|
|
|
|
|
|
|
|
|
|
// Data loader for mma.16816 instruction.
|
|
|
|
@@ -5972,7 +6070,7 @@ struct AtomicRMWOpConversion
|
|
|
|
|
auto valElements = getElementsFromStruct(loc, llVal, rewriter);
|
|
|
|
|
auto ptrElements = getElementsFromStruct(loc, llPtr, rewriter);
|
|
|
|
|
auto maskElements = getElementsFromStruct(loc, llMask, rewriter);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto valueTy = op.getResult().getType().dyn_cast<RankedTensorType>();
|
|
|
|
|
Type valueElemTy =
|
|
|
|
|
valueTy ? getTypeConverter()->convertType(valueTy.getElementType())
|
|
|
|
@@ -6166,11 +6264,14 @@ void populateTritonToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter,
|
|
|
|
|
patterns.add<ReduceOpConversion>(typeConverter, allocation, smem, benefit);
|
|
|
|
|
patterns.add<ConvertLayoutOpConversion>(typeConverter, allocation, smem,
|
|
|
|
|
benefit);
|
|
|
|
|
patterns.add<AtomicRMWOpConversion>(typeConverter, allocation, smem, axisInfoAnalysis, benefit);
|
|
|
|
|
patterns.add<AtomicRMWOpConversion>(typeConverter, allocation, smem,
|
|
|
|
|
axisInfoAnalysis, benefit);
|
|
|
|
|
patterns.add<ExtractSliceOpConversion>(typeConverter, allocation, smem,
|
|
|
|
|
benefit);
|
|
|
|
|
patterns.add<GetProgramIdOpConversion>(typeConverter, benefit);
|
|
|
|
|
patterns.add<GetNumProgramsOpConversion>(typeConverter, benefit);
|
|
|
|
|
patterns.add<InsertSliceOpConversion>(typeConverter, allocation, smem,
|
|
|
|
|
benefit);
|
|
|
|
|
patterns.add<InsertSliceAsyncOpConversion>(typeConverter, allocation, smem,
|
|
|
|
|
axisInfoAnalysis, benefit);
|
|
|
|
|
patterns.add<LoadOpConversion>(typeConverter, axisInfoAnalysis, benefit);
|
|
|
|
@@ -6216,8 +6317,57 @@ private:
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void decomposeInsertSliceAsyncOp(ModuleOp mod,
|
|
|
|
|
TritonGPUToLLVMTypeConverter &converter) {
|
|
|
|
|
// cp.async is supported in Ampere and later
|
|
|
|
|
if (computeCapability >= 80)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
// insert_slice_async %src, %dst, %idx, %mask, %other
|
|
|
|
|
// =>
|
|
|
|
|
// %tmp = load %src, %mask, %other
|
|
|
|
|
// %res = insert_slice %tmp into %dst[%idx]
|
|
|
|
|
mod.walk([&](triton::gpu::InsertSliceAsyncOp insertSliceAsyncOp) -> void {
|
|
|
|
|
OpBuilder builder(insertSliceAsyncOp);
|
|
|
|
|
// load
|
|
|
|
|
auto srcTy = insertSliceAsyncOp.src().getType().cast<RankedTensorType>();
|
|
|
|
|
auto dstTy = insertSliceAsyncOp.getType().cast<RankedTensorType>();
|
|
|
|
|
auto srcBlocked =
|
|
|
|
|
srcTy.getEncoding().dyn_cast<triton::gpu::BlockedEncodingAttr>();
|
|
|
|
|
auto elemTy = converter.convertType(dstTy.getElementType());
|
|
|
|
|
auto tmpTy = RankedTensorType::get(srcTy.getShape(), elemTy, srcBlocked);
|
|
|
|
|
auto loadOp = builder.create<triton::LoadOp>(
|
|
|
|
|
insertSliceAsyncOp.getLoc(), tmpTy, insertSliceAsyncOp.src(),
|
|
|
|
|
insertSliceAsyncOp.mask(), insertSliceAsyncOp.other(),
|
|
|
|
|
insertSliceAsyncOp.cache(), insertSliceAsyncOp.evict(),
|
|
|
|
|
insertSliceAsyncOp.isVolatile());
|
|
|
|
|
// insert_slice
|
|
|
|
|
auto axis = insertSliceAsyncOp.axis();
|
|
|
|
|
auto intAttr = [&](int64_t v) { return builder.getI64IntegerAttr(v); };
|
|
|
|
|
auto offsets = SmallVector<OpFoldResult>(dstTy.getRank(), intAttr(0));
|
|
|
|
|
auto sizes = SmallVector<OpFoldResult>(dstTy.getRank(), intAttr(1));
|
|
|
|
|
auto strides = SmallVector<OpFoldResult>(dstTy.getRank(), intAttr(1));
|
|
|
|
|
offsets[axis] = insertSliceAsyncOp.index();
|
|
|
|
|
for (size_t i = 0; i < dstTy.getRank(); i++) {
|
|
|
|
|
if (i != axis)
|
|
|
|
|
sizes[i] = intAttr(dstTy.getShape()[i]);
|
|
|
|
|
}
|
|
|
|
|
auto insertSliceOp = builder.create<tensor::InsertSliceOp>(
|
|
|
|
|
insertSliceAsyncOp.getLoc(), loadOp, insertSliceAsyncOp.dst(),
|
|
|
|
|
offsets, sizes, strides);
|
|
|
|
|
// Replace
|
|
|
|
|
insertSliceAsyncOp.replaceAllUsesWith(insertSliceOp.getResult());
|
|
|
|
|
insertSliceAsyncOp.erase();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
mod.walk([&](triton::gpu::AsyncWaitOp asyncWaitOp) -> void {
|
|
|
|
|
asyncWaitOp.erase();
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
ConvertTritonGPUToLLVM() = default;
|
|
|
|
|
explicit ConvertTritonGPUToLLVM(int computeCapability)
|
|
|
|
|
: computeCapability(computeCapability) {}
|
|
|
|
|
|
|
|
|
|
void runOnOperation() override {
|
|
|
|
|
MLIRContext *context = &getContext();
|
|
|
|
@@ -6233,18 +6383,22 @@ public:
|
|
|
|
|
int numWarps = triton::gpu::TritonGPUDialect::getNumWarps(mod);
|
|
|
|
|
|
|
|
|
|
// step 1: Decompose unoptimized layout conversions to use shared memory
|
|
|
|
|
// step 2: Allocate shared memories and insert barriers
|
|
|
|
|
// step 3: Convert SCF to CFG
|
|
|
|
|
// step 4: Convert FuncOp to LLVMFuncOp via partial conversion
|
|
|
|
|
// step 5: Convert the rest of ops via partial conversion
|
|
|
|
|
// The reason for putting step 1 before step 2 is that the membar analysis
|
|
|
|
|
// currently only supports SCF but not CFG.
|
|
|
|
|
// The reason for a separation between 1/4 is that, step 3 is out of
|
|
|
|
|
// the scope of Dialect Conversion, thus we need to make sure the smem
|
|
|
|
|
// is not revised during the conversion of step 4.
|
|
|
|
|
// step 2: Decompose insert_slice_async to use load + insert_slice for
|
|
|
|
|
// pre-Ampere architectures
|
|
|
|
|
// step 3: Allocate shared memories and insert barriers
|
|
|
|
|
// step 4: Convert SCF to CFG
|
|
|
|
|
// step 5: Convert FuncOp to LLVMFuncOp via partial conversion
|
|
|
|
|
// step 6: Convert the rest of ops via partial
|
|
|
|
|
// conversion The reason for putting step 1 before step 2 is that the membar
|
|
|
|
|
// analysis currently only supports SCF but not CFG. The reason for a
|
|
|
|
|
// separation between 1/4 is that, step 3 is out of the scope of Dialect
|
|
|
|
|
// Conversion, thus we need to make sure the smem is not revised during the
|
|
|
|
|
// conversion of step 4.
|
|
|
|
|
|
|
|
|
|
decomposeBlockedToDotOperand(mod);
|
|
|
|
|
|
|
|
|
|
decomposeInsertSliceAsyncOp(mod, typeConverter);
|
|
|
|
|
|
|
|
|
|
Allocation allocation(mod);
|
|
|
|
|
MembarAnalysis membar(&allocation);
|
|
|
|
|
|
|
|
|
@@ -6303,6 +6457,8 @@ protected:
|
|
|
|
|
TritonGPUToLLVMTypeConverter &typeConverter);
|
|
|
|
|
|
|
|
|
|
Value smem;
|
|
|
|
|
|
|
|
|
|
int computeCapability{};
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void ConvertTritonGPUToLLVM::initSharedMemory(
|
|
|
|
@@ -6365,8 +6521,9 @@ TritonLLVMFunctionConversionTarget::TritonLLVMFunctionConversionTarget(
|
|
|
|
|
|
|
|
|
|
namespace triton {
|
|
|
|
|
|
|
|
|
|
std::unique_ptr<OperationPass<ModuleOp>> createConvertTritonGPUToLLVMPass() {
|
|
|
|
|
return std::make_unique<::ConvertTritonGPUToLLVM>();
|
|
|
|
|
std::unique_ptr<OperationPass<ModuleOp>>
|
|
|
|
|
createConvertTritonGPUToLLVMPass(int computeCapability) {
|
|
|
|
|
return std::make_unique<::ConvertTritonGPUToLLVM>(computeCapability);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace triton
|
|
|
|
|