Skip to content

Commit 3b6ca2a

Browse files
committed
bump llvm
1 parent d5e6a56 commit 3b6ca2a

38 files changed

+87
-87
lines changed

Diff for: cmake/llvm-version-imex.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
add6b2f35f2bcf1f59a2ab2d5b3dab124fe0895a
1+
7842374103b26933d71a8fe354cd4d8715d55b1c

Diff for: cmake/llvm-version.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
add6b2f35f2bcf1f59a2ab2d5b3dab124fe0895a
1+
7842374103b26933d71a8fe354cd4d8715d55b1c

Diff for: include/gc/Dialect/LLVMIR/XeVMOps.td

+3-3
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def XeVM_L1StoreCacheControl : XeVM_StoreCacheControl<"L1">;
7070
def XeVM_L3StoreCacheControl : XeVM_StoreCacheControl<"L3">;
7171

7272
def XeVM_BlockLoad2dOp : XeVM_Op<"blockload2d">,
73-
Results<(outs FixedVectorOf<[XeVM_ElemType]>:$res)>,
73+
Results<(outs FixedVectorOfRankAndType<[1,2,3], [XeVM_ElemType]>:$res)>,
7474
Arguments<(ins
7575
Arg<LLVM_AnyPointer, "", [MemRead]>:$ptr,
7676
I32:$base_width,
@@ -137,7 +137,7 @@ def XeVM_BlockStore2dOp : XeVM_Op<"blockstore2d">,
137137
I32Attr:$tile_width,
138138
I32Attr:$tile_height,
139139
I32Attr:$v_blocks,
140-
FixedVectorOf<[XeVM_ElemType]>:$stored_val,
140+
FixedVectorOfRankAndType<[1, 2, 3], [XeVM_ElemType]>:$stored_val,
141141
DefaultValuedAttr<XeVM_L1StoreCacheControl, "::mlir::xevm::L1StoreCacheControl::DEFAULT">:$l1_cache_control,
142142
DefaultValuedAttr<XeVM_L3StoreCacheControl, "::mlir::xevm::L3StoreCacheControl::DEFAULT">:$l3_cache_control
143143
)> {
@@ -243,7 +243,7 @@ def XeVM_PrecisionTypeAttr : I32EnumAttr<"PrecisionType",
243243
}
244244

245245
def XeVM_DPASOp : XeVM_Op<"dpas">,
246-
Results<(outs FixedVectorOf<[XeVM_MatrixElemType]>:$d)>,
246+
Results<(outs FixedVectorOfRankAndType<[1], [XeVM_MatrixElemType]>:$d)>,
247247
Arguments<(ins
248248
FixedVectorOfRankAndType<[1], [XeVM_MatrixElemType]>:$c,
249249
FixedVectorOfRankAndType<[1], [XeVM_MatrixElemType]>:$a,

Diff for: include/gc/Transforms/Microkernel/BrgemmRuntimeUtils.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,13 @@ static inline int64_t getDnnlDataTypeVal(RewriterBase &rewriter,
2727
auto context = rewriter.getContext();
2828
auto tattr = dyn_cast_or_null<TypeAttr>(attr);
2929
assert(tattr);
30-
if (tattr == TypeAttr::get(FloatType::getF32(context))) {
30+
if (tattr == TypeAttr::get(Float32Type::get(context))) {
3131
return static_cast<int64_t>(dnnl_f32);
32-
} else if (tattr == TypeAttr::get(FloatType::getF64(context))) {
32+
} else if (tattr == TypeAttr::get(Float64Type::get(context))) {
3333
return static_cast<int64_t>(dnnl_f64);
34-
} else if (tattr == TypeAttr::get(FloatType::getBF16(context))) {
34+
} else if (tattr == TypeAttr::get(BFloat16Type::get(context))) {
3535
return static_cast<int64_t>(dnnl_bf16);
36-
} else if (tattr == TypeAttr::get(FloatType::getF16(context))) {
36+
} else if (tattr == TypeAttr::get(Float16Type::get(context))) {
3737
return static_cast<int64_t>(dnnl_f16);
3838
} else if (tattr == TypeAttr::get(
3939
IntegerType::get(context, 32, IntegerType::Signed))) {

Diff for: include/gc/Transforms/Utils/StructuredOpMatcher.h

+3-2
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ struct HasStaticStrides {
163163
SmallVector<int64_t> strides;
164164
if (auto memRefType = dyn_cast_or_null<MemRefType>(operandType)) {
165165
int64_t offset;
166-
if (failed(getStridesAndOffset(memRefType, strides, offset)))
166+
if (failed(memRefType.getStridesAndOffset(strides, offset)))
167167
return false;
168168
if (llvm::any_of(strides, [](int64_t stride) {
169169
return stride == ShapedType::kDynamic;
@@ -244,7 +244,8 @@ struct NumDpsInits {
244244
// Callable object to validate number of input operands for `op`.
245245
struct NumDpsInputs {
246246
NumDpsInputs() = delete;
247-
explicit NumDpsInputs(std::function<bool(size_t)> fun) : fun(std::move(fun)){};
247+
explicit NumDpsInputs(std::function<bool(size_t)> fun)
248+
: fun(std::move(fun)){};
248249

249250
bool operator()(Operation *op) {
250251
if (auto linalgOp = dyn_cast_or_null<linalg::LinalgOp>(op))

Diff for: lib/gc/Dialect/Linalgx/Utils.cpp

+5-3
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,7 @@ bool isGenericAttrEquivalent(linalg::GenericOp op, ShapedType shapeA,
385385
DenseMap<AffineExpr, AffineExpr> replaceMap;
386386
std::map<unsigned, utils::IteratorType> iterMap;
387387
// get shape-to-loop map
388-
AffineMap inverse = inversePermutation(concatAffineMaps(inMaps));
388+
AffineMap inverse = inversePermutation(concatAffineMaps(inMaps, context));
389389
assert(inverse && "shape-to-loops map to be non-null");
390390
assert(dimSize == inverse.getResults().size());
391391
// renumber the dim id based on shape-to-loop map
@@ -492,8 +492,10 @@ bool isGenericPackedMatmulOpImpl(linalg::GenericOp genericOp,
492492
return false;
493493
}
494494
// Check for packing
495-
ValueRange inputs = genericOp.getDpsInputs();
496-
ValueRange outputs = genericOp.getDpsInits();
495+
auto inputsVec = genericOp.getDpsInputs();
496+
ValueRange inputs = inputsVec;
497+
auto outputsVec = genericOp.getDpsInits();
498+
ValueRange outputs = outputsVec;
497499
auto shapeA = cast<ShapedType>(inputs.front().getType());
498500
auto shapeB = cast<ShapedType>(inputs.back().getType());
499501
auto shapeC = cast<ShapedType>(outputs.back().getType());

Diff for: lib/gc/Dialect/Microkernel/MicrokernelOps.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -551,11 +551,11 @@ static LogicalResult verifyBrgemmDataTypes(ArrayAttr dtypes,
551551

552552
auto context = op.getContext();
553553

554-
#define FTAttr(t) TypeAttr::get(FloatType::get##t(context))
554+
#define FTAttr(t) TypeAttr::get(t::get(context))
555555
#define ITAttr(s, w) TypeAttr::get(IntegerType::get(context, w, IntegerType::s))
556556
SmallVector<std::pair<TypeAttr, TypeAttr>> validDataTypes = {
557-
{FTAttr(F32), FTAttr(F32)},
558-
{FTAttr(BF16), FTAttr(BF16)},
557+
{FTAttr(Float32Type), FTAttr(Float32Type)},
558+
{FTAttr(BFloat16Type), FTAttr(BFloat16Type)},
559559
{ITAttr(Unsigned, 8), ITAttr(Signed, 8)},
560560
{ITAttr(Signed, 8), ITAttr(Unsigned, 8)},
561561
{ITAttr(Unsigned, 8), ITAttr(Unsigned, 8)},

Diff for: lib/gc/ExecutionEngine/GPURuntime/ocl/GpuOclRuntime.cpp

+4-3
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,7 @@ StringRef createStaticMain(OpBuilder &builder, ModuleOp &module,
718718
auto offsetPtr = constArgs.end();
719719
constArgs.emplace_back(0);
720720
constArgs.append(shape.begin(), shape.end());
721-
if (failed(getStridesAndOffset(type, constArgs, *offsetPtr))) {
721+
if (failed(type.getStridesAndOffset(constArgs, *offsetPtr))) {
722722
gcLogD("Failed to get strides and offset of arg", i,
723723
" of the function ", funcName.begin());
724724
return {};
@@ -929,8 +929,9 @@ OclModuleBuilder::build(const OclRuntime::Ext &ext) {
929929
builder.getI64IntegerAttr(static_cast<int64_t>(wgSize)));
930930
TargetDeviceSpecInterface devSpec =
931931
TargetDeviceSpecAttr::get(ctx, dltiAttrs);
932-
auto sysSpec =
933-
TargetSystemSpecAttr::get(ctx, ArrayRef(std::pair(devStr, devSpec)));
932+
DataLayoutEntryInterface dl =
933+
DataLayoutEntryAttr::get(ctx, devStr, devSpec);
934+
auto sysSpec = TargetSystemSpecAttr::get(ctx, ArrayRef(dl));
934935
mod = mlirModule.clone();
935936
mod.getOperation()->setAttr("#dlti.sys_spec", sysSpec);
936937
PassManager pm{ctx};

Diff for: lib/gc/Transforms/DecomposeAggregatedOps.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ struct DecomposeAggregatedOps
4242
void runOnOperation() override {
4343
RewritePatternSet patterns(getOperation().getContext());
4444
patterns.add<DecomposeAggregateOpsImpl>(patterns.getContext());
45-
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
45+
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
4646
}
4747
};
4848

Diff for: lib/gc/Transforms/DecomposeTensorOperation.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -170,8 +170,7 @@ struct DecomposeTensorOperationPass
170170
patterns.add<DecomposeGatherOp>(patterns.getContext());
171171
tensor::populateDecomposeTensorConcatPatterns(patterns);
172172

173-
if (failed(applyPatternsAndFoldGreedily(getOperation(),
174-
std::move(patterns)))) {
173+
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) {
175174
return signalPassFailure();
176175
}
177176
}

Diff for: lib/gc/Transforms/DeepTileContractionOp.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -405,7 +405,7 @@ generateOuterLoop(RewriterBase &b, linalg::LinalgOp linalgOp,
405405
// the extra copy generated by bufferization. So remove the dummy loop
406406
// at this early stage.
407407
if (!isDummyLoop(tilingResult->loops.back())) {
408-
b.replaceOp(currentOp, tilingResult->replacements);
408+
b.replaceOp(currentOp, tilingResult->mergeResult.replacements);
409409
currentOp = dyn_cast<linalg::LinalgOp>(tilingResult->tiledOps.back());
410410
if (iteratorTypes[d] == mlir::utils::IteratorType::reduction)
411411
result.reductionLoops.push_back(tilingResult->loops.back());
@@ -477,7 +477,7 @@ generateOuterLoop(RewriterBase &b, linalg::LinalgOp linalgOp,
477477
b, cast<TilingInterface>(currentOp.getOperation()), tileOption);
478478
if (failed(tilingResult))
479479
return failure();
480-
b.replaceOp(currentOp, tilingResult->replacements);
480+
b.replaceOp(currentOp, tilingResult->mergeResult.replacements);
481481
currentOp = dyn_cast<linalg::LinalgOp>(tilingResult->tiledOps.back());
482482
}
483483
}
@@ -1029,8 +1029,7 @@ struct DeepTileContractionOp
10291029
dialect->getCanonicalizationPatterns(patterns);
10301030
for (RegisteredOperationName op : ctx.getRegisteredOperations())
10311031
op.getCanonicalizationPatterns(patterns, &ctx);
1032-
if (failed(
1033-
applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
1032+
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns))))
10341033
return signalPassFailure();
10351034
}
10361035
};

Diff for: lib/gc/Transforms/FoldTensorOperation.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,7 @@ struct FoldTensorOperationPass
4444
// Use to remove useless tensor operation like extract or
4545
// insert slice.
4646
config.strictMode = GreedyRewriteStrictness::ExistingOps;
47-
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(pattern),
48-
config);
47+
(void)applyPatternsGreedily(getOperation(), std::move(pattern), config);
4948
}
5049
};
5150
} // namespace

Diff for: lib/gc/Transforms/GPU/AllocsToSLM.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ struct AllocsToSLM : public gc::impl::AllocsToSLMBase<AllocsToSLM> {
152152

153153
RewritePatternSet patterns(ctx);
154154
patterns.add<ConvertAlloc>(patterns.getContext());
155-
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
155+
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
156156
}
157157
};
158158

Diff for: lib/gc/Transforms/GPU/IMEX/LinalgToXeGPU.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -2124,17 +2124,17 @@ struct LinalgToXeGPU : public gc::impl::LinalgToXeGPUBase<LinalgToXeGPU> {
21242124
// Run GEMM pattern first to allow fusion with its consumers.
21252125
RewritePatternSet gemmPatterns(&getContext());
21262126
populateLinalgGemmToXeGPUPatterns(gemmPatterns, options);
2127-
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(gemmPatterns));
2127+
(void)applyPatternsGreedily(getOperation(), std::move(gemmPatterns));
21282128

21292129
// Convert memory fill ops.
21302130
RewritePatternSet fillPatterns(&getContext());
21312131
populateLinalgMemoryFillToXeGPUPatterns(fillPatterns, options);
2132-
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(fillPatterns));
2132+
(void)applyPatternsGreedily(getOperation(), std::move(fillPatterns));
21332133

21342134
// Convert other remaining ops.
21352135
RewritePatternSet patterns(&getContext());
21362136
populateLinalgEltwiseToXeGPUPatterns(patterns, options);
2137-
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
2137+
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
21382138
}
21392139
};
21402140

Diff for: lib/gc/Transforms/GPU/Pipeline.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,8 @@ void populateGPUPipeline(OpPassManager &pm,
154154
pm.addPass(createGpuKernelOutliningPass());
155155
pm.addPass(createConvertXeVMToLLVMPass());
156156
pm.addPass(createGpuXeVMAttachTarget());
157-
pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToLLVMSPVOps());
157+
pm.addNestedPass<gpu::GPUModuleOp>(
158+
createConvertGpuOpsToLLVMSPVOps({.use64bitIndex = true}));
158159
pm.addNestedPass<gpu::GPUModuleOp>(createConvertIndexToLLVMPass());
159160
pm.addNestedPass<gpu::GPUModuleOp>(createArithToLLVMConversionPass());
160161
pm.addPass(createReconcileUnrealizedCastsPass());

Diff for: lib/gc/Transforms/IterativeTilingAndFusion.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -813,7 +813,7 @@ void iterativeTilingAndFusionUntilExhaustion(
813813
defaultTilingOfType(rewriter, op, isaOpTy, cfg);
814814
if (succeeded(tilingResult)) {
815815
tiledOps.insert(tilingResult->tiledOps[0]);
816-
rewriter.replaceOp(op, tilingResult->replacements);
816+
rewriter.replaceOp(op, tilingResult->mergeResult.replacements);
817817
break;
818818
}
819819
}

Diff for: lib/gc/Transforms/LowerToTileVector.cpp

+5-6
Original file line numberDiff line numberDiff line change
@@ -614,17 +614,16 @@ struct LowerToTileVectorPass
614614
// Init patterns use to remove useless tensor operation like extract or
615615
// insert slice.
616616
configInit.strictMode = GreedyRewriteStrictness::ExistingOps;
617-
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patternsInit),
618-
configInit);
617+
(void)applyPatternsGreedily(funcOp, std::move(patternsInit), configInit);
619618

620619
RewritePatternSet firstPatterns(ctx);
621620
// All the dynamic shape will reject to lower.
622621
populateLowerToTileVectorPatterns(firstPatterns);
623622
GreedyRewriteConfig configFirstPn;
624623
// We only apply the lowering pattern on existing operations
625624
configFirstPn.strictMode = GreedyRewriteStrictness::ExistingOps;
626-
(void)applyPatternsAndFoldGreedily(funcOp, std::move(firstPatterns),
627-
configFirstPn);
625+
(void)applyPatternsGreedily(funcOp, std::move(firstPatterns),
626+
configFirstPn);
628627
// Error case:
629628
// ```
630629
// linalg.copy : <1x32xf32>
@@ -649,10 +648,10 @@ struct LowerToTileVectorPass
649648
vector::populateVectorTransferPermutationMapLoweringPatterns(secondPattern);
650649
// Remove unnessary broadcast operation
651650
vector::populateSinkVectorOpsPatterns(secondPattern);
652-
// Second fold (with the help of the `applyPatternsAndFoldGreedily`
651+
// Second fold (with the help of the `applyPatternsGreedily`
653652
// function) can help us to eliminate redundant operation like consecutive
654653
// read and write.
655-
(void)applyPatternsAndFoldGreedily(funcOp, std::move(secondPattern));
654+
(void)applyPatternsGreedily(funcOp, std::move(secondPattern));
656655
// may need other patterns to reduce redundant operations
657656
}
658657
};

Diff for: lib/gc/Transforms/MemRefToCPURuntime.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ uint64_t getMemRefSizeInBytes(MemRefType memrefType) {
5151
if (!layout.isIdentity()) {
5252
int64_t offset;
5353
SmallVector<int64_t, 4> strides;
54-
if (failed(getStridesAndOffset(memrefType, strides, offset))) {
54+
if (failed(memrefType.getStridesAndOffset(strides, offset))) {
5555
return UINT64_MAX;
5656
}
5757

Diff for: lib/gc/Transforms/MergeNestedForall.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -82,8 +82,7 @@ struct MergeNestedForall
8282

8383
patterns.add<MergeNestedForallLoops>(patterns.getContext());
8484

85-
if (failed(
86-
applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
85+
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns))))
8786
return signalPassFailure();
8887
}
8988
};

Diff for: lib/gc/Transforms/Microkernel/ConvertLinalgToMicrokernel.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,7 @@ class ConvertLinalgToMicrokernel
391391
patterns.add<ConvertContractionOpToBrgemmRewriter<linalg::GenericOp>>(
392392
&getContext());
393393
FrozenRewritePatternSet patternSet(std::move(patterns));
394-
if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet)))
394+
if (failed(applyPatternsGreedily(getOperation(), patternSet)))
395395
signalPassFailure();
396396
}
397397
};

Diff for: lib/gc/Transforms/Microkernel/ConvertMicrokernelToDnnlFunc.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ class ConvertBrgemmDispatchOpRewriter
6363
SmallVector<Value, 10> operands;
6464
SmallVector<Type, 10> operandTypes;
6565
IntegerType integer64 = IntegerType::get(rewriter.getContext(), 64);
66-
FloatType float32 = FloatType::getF32(rewriter.getContext());
66+
FloatType float32 = Float32Type::get(rewriter.getContext());
6767

6868
// M, N, K, LDA, LDB, LDC, stride_a, stride_b
6969
// they are in the same order with BrgemmDispatchOp inputs
@@ -215,7 +215,7 @@ class ConvertMicrokernelToDnnlFunc
215215
&getContext());
216216

217217
FrozenRewritePatternSet patternSet(std::move(patterns));
218-
if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet)))
218+
if (failed(applyPatternsGreedily(getOperation(), patternSet)))
219219
signalPassFailure();
220220
}
221221
};

Diff for: lib/gc/Transforms/Microkernel/EarlyDispatchMicrokernel.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -205,8 +205,7 @@ class EarlyDispatchMicrokernel
205205
// Ignore newly created Ops
206206
GreedyRewriteConfig config;
207207
config.strictMode = GreedyRewriteStrictness::ExistingOps;
208-
if (failed(
209-
applyPatternsAndFoldGreedily(getOperation(), patternSet, config)))
208+
if (failed(applyPatternsGreedily(getOperation(), patternSet, config)))
210209
signalPassFailure();
211210
}
212211
};

Diff for: lib/gc/Transforms/Microkernel/ExpandMicrokernel.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ class ExpandMicrokernel
275275
patterns.add<ExpandMicrokernelBrgemmRewriter>(&getContext());
276276

277277
FrozenRewritePatternSet patternSet(std::move(patterns));
278-
if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet)))
278+
if (failed(applyPatternsGreedily(getOperation(), patternSet)))
279279
signalPassFailure();
280280
}
281281
};

Diff for: lib/gc/Transforms/Microkernel/MergeBranchMicrokernelContext.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ class MergeBranchMicrokernelContext
296296
patterns.add<ScfIndexSwitchRewriter>(&getContext(), dispatchAnalysis);
297297
FrozenRewritePatternSet patternSet(std::move(patterns));
298298

299-
if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet))) {
299+
if (failed(applyPatternsGreedily(getOperation(), patternSet))) {
300300
signalPassFailure();
301301
}
302302
}

Diff for: lib/gc/Transforms/Microkernel/MicrokernelInvariantCodeMotion.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -421,8 +421,7 @@ class MicrokernelInvariantCodeMotion
421421
// Ignore newly created Ops
422422
GreedyRewriteConfig config;
423423
config.strictMode = GreedyRewriteStrictness::ExistingOps;
424-
if (failed(
425-
applyPatternsAndFoldGreedily(getOperation(), patternSet, config))) {
424+
if (failed(applyPatternsGreedily(getOperation(), patternSet, config))) {
426425
signalPassFailure();
427426
}
428427
}

Diff for: lib/gc/Transforms/OneDNNGraphToLinalg.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -515,8 +515,7 @@ struct ConvertOneDNNGraphToLinalg
515515
MatMulOpBatchFlatten
516516
// clang-format on
517517
>(ctx);
518-
if (failed(applyPatternsAndFoldGreedily(getOperation(),
519-
std::move(patternsPre)))) {
518+
if (failed(applyPatternsGreedily(getOperation(), std::move(patternsPre)))) {
520519
signalPassFailure();
521520
}
522521
// ==========================================

Diff for: lib/gc/Transforms/Utils/ValueUtils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ FailureOr<SmallVector<int64_t>> getStrides(Value value) {
110110
auto memrefType = cast<MemRefType>(valueType);
111111
SmallVector<int64_t> strides;
112112
int64_t offset;
113-
if (failed(getStridesAndOffset(memrefType, strides, offset)))
113+
if (failed(memrefType.getStridesAndOffset(strides, offset)))
114114
return failure();
115115
return strides;
116116
}

Diff for: src/dnnl/JsonParser.h

+3-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (C) 2024 Intel Corporation
2+
* Copyright (C) 2025 Intel Corporation
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.
@@ -12,7 +12,6 @@
1212
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
* See the License for the specific language governing permissions
1414
* and limitations under the License.
15-
*
1615
* SPDX-License-Identifier: Apache-2.0
1716
*/
1817

@@ -179,8 +178,8 @@ class JsonParser {
179178
GC_DTYPE("u8", b.getIntegerType(8, true)),
180179
GC_DTYPE("f64", b.getF64Type()),
181180
GC_DTYPE("boolean", b.getI1Type()),
182-
GC_DTYPE("f8_e5m2", b.getFloat8E5M2Type()),
183-
GC_DTYPE("f8_e4m3", b.getFloat8E4M3FNType()),
181+
GC_DTYPE("f8_e5m2", mlir::Float8E5M2Type::get(b.getContext())),
182+
GC_DTYPE("f8_e4m3", mlir::Float8E4M3Type::get(b.getContext())),
184183
GC_DTYPE("s4", b.getIntegerType(4, false)),
185184
GC_DTYPE("u4", b.getIntegerType(4, true)),
186185
};

0 commit comments

Comments
 (0)