Skip to content

Commit

Permalink
Integrate LLVM at llvm/llvm-project@13c761789753
Browse files Browse the repository at this point in the history
Updates LLVM usage to match
[13c761789753](llvm/llvm-project@13c761789753)

PiperOrigin-RevId: 717293402
  • Loading branch information
Google-ML-Automation committed Jan 20, 2025
1 parent 50b222c commit 57ed743
Show file tree
Hide file tree
Showing 16 changed files with 4,172 additions and 700 deletions.
1,337 changes: 1,143 additions & 194 deletions third_party/llvm/generated.patch

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions third_party/llvm/workspace.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ load("//third_party:repo.bzl", "tf_http_archive")

def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "bf17016a92bc8a23d2cdd2b51355dd4eb5019c68"
LLVM_SHA256 = "ba09f12e5019f5aca531b1733275f0a10b181d6f894deb1a4610e017f76b172a"
LLVM_COMMIT = "13c761789753862a7cc31a2a26f23010afa668b9"
LLVM_SHA256 = "587f3eda6d00d751cbfc69fa5a15475ae4232e191ace04031b343e4e8ae16355"

tf_http_archive(
name = name,
Expand Down
1,898 changes: 1,675 additions & 223 deletions third_party/shardy/temporary.patch

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions third_party/shardy/workspace.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")

def repo():
SHARDY_COMMIT = "293e28a2b7c745c82fc5de99dad207e29340e7e0"
SHARDY_SHA256 = "36e38a2a7d23ba3c5385a4dc8651d682269c6a8dcf71b9a4cca5522cc32b7216"
SHARDY_COMMIT = "a45b0ae83803b4edb0602f3f5b342571a41b8e91"
SHARDY_SHA256 = "29f97d1838f463a6985f255fc29c80aa0517780a6b08fe1d01e3083a7f573942"

tf_http_archive(
name = "shardy",
Expand Down
12 changes: 12 additions & 0 deletions third_party/stablehlo/temporary.patch
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
diff --ruN a/stablehlo/examples/c++/ExampleAdd.cpp b/stablehlo/examples/c++/ExampleAdd.cpp
--- stablehlo/examples/c++/ExampleAdd.cpp
+++ stablehlo/examples/c++/ExampleAdd.cpp
@@ -49,7 +49,7 @@
/** create function **/
// create function argument and result types.
auto tensorType =
- mlir::RankedTensorType::get({3, 4}, mlir::FloatType::getF32(&context));
+ mlir::RankedTensorType::get({3, 4}, mlir::Float32Type::get(&context));
auto func_type =
mlir::FunctionType::get(&context, {tensorType, tensorType}, {tensorType});

diff --ruN a/stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir b/stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir
--- stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir
+++ stablehlo/stablehlo/conversions/tosa/tests/nullary.mlir
Expand Down
127 changes: 127 additions & 0 deletions third_party/triton/llvm_integration/cl717293402.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@

--- a/include/triton/Conversion/MLIRTypes.h 2024-07-03 07:14:55.000000000 -0700
+++ b/include/triton/Conversion/MLIRTypes.h 2025-01-19 13:19:21.000000000 -0800
@@ -21,10 +21,10 @@
}

// Float types
-inline Type f16Ty(MLIRContext *ctx) { return FloatType::getF16(ctx); }
-inline Type f32Ty(MLIRContext *ctx) { return FloatType::getF32(ctx); }
-inline Type f64Ty(MLIRContext *ctx) { return FloatType::getF64(ctx); }
-inline Type bf16Ty(MLIRContext *ctx) { return FloatType::getBF16(ctx); }
+inline Type f16Ty(MLIRContext *ctx) { return Float16Type::get(ctx); }
+inline Type f32Ty(MLIRContext *ctx) { return Float32Type::get(ctx); }
+inline Type f64Ty(MLIRContext *ctx) { return Float64Type::get(ctx); }
+inline Type bf16Ty(MLIRContext *ctx) { return BFloat16Type::get(ctx); }

inline bool isFloat(Type type) {
return type.isF32() || type.isF64() || type.isF16() || type.isF128() ||

--- a/lib/Dialect/TritonGPU/IR/Ops.cpp 2025-01-15 12:52:52.000000000 -0800
+++ b/lib/Dialect/TritonGPU/IR/Ops.cpp 2025-01-19 13:19:21.000000000 -0800
@@ -15,7 +15,7 @@
auto xTy = getSrc().getType();
auto scaleTy = getScale().getType();

- if (xTy.getElementType() != FloatType::getBF16(getContext()) &&
+ if (xTy.getElementType() != BFloat16Type::get(getContext()) &&
xTy.getElementType() != IntegerType::get(getContext(), 8)) {
return emitOpError("element type of the first operand must be bf16 or i8");
}
@@ -111,7 +111,7 @@
auto newShape = SmallVector<int64_t>(xShape);
if (!encoding) {
newShape.back() *= 2;
- retTy = RankedTensorType::get(xShape, FloatType::getBF16(ctx));
+ retTy = RankedTensorType::get(xShape, BFloat16Type::get(ctx));
} else {
auto oldEncoding = cast<DotOperandEncodingAttr>(encoding);
auto newVEncoding = DotOperandEncodingAttr::get(
@@ -123,7 +123,7 @@
const bool hasBatch = xShape.size() == 3;
const int kIdx = (opIdx == 0 ? 1 : 0) + hasBatch;
newShape[kIdx] *= 2;
- retTy = RankedTensorType::get(newShape, FloatType::getBF16(ctx),
+ retTy = RankedTensorType::get(newShape, BFloat16Type::get(ctx),
newVEncoding);
}
inferredReturnTypes.push_back(retTy);

--- a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp 2025-01-15 12:52:52.000000000 -0800
+++ b/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp 2025-01-19 13:19:22.000000000 -0800
@@ -56,9 +56,9 @@
else if (constraint == 'l')
ty = IntegerType::get(rewriter.getContext(), 64);
else if (constraint == 'f')
- ty = FloatType::getF32(rewriter.getContext());
+ ty = Float32Type::get(rewriter.getContext());
else if (constraint == 'd')
- ty = FloatType::getF64(rewriter.getContext());
+ ty = Float64Type::get(rewriter.getContext());
else {
assert(false && "Unsupported constraint");
}

--- a/unittest/Dialect/TritonGPU/DialectTest.cpp 2025-01-15 12:52:52.000000000 -0800
+++ b/unittest/Dialect/TritonGPU/DialectTest.cpp 2025-01-19 13:19:23.000000000 -0800
@@ -492,10 +492,10 @@
llvm::to_vector(llvm::reverse(llvm::seq<unsigned>(rank))));

auto srcTy = RankedTensorType::get(
- srcShape, FloatType::getF32(&ctx),
+ srcShape, Float32Type::get(&ctx),
BlockedEncodingAttr::get(&ctx, sizePerThread, threadsPerWarp,
warpsPerCTA, order, ctaLayout));
- auto dstTy = RankedTensorType::get(dstShape, FloatType::getF32(&ctx));
+ auto dstTy = RankedTensorType::get(dstShape, Float32Type::get(&ctx));

bool couldReshape = false;
testReshape(srcTy, dstTy, /*expectedDstEnc=*/std::nullopt,
@@ -526,7 +526,7 @@
ctx.getOrLoadDialect<TritonGPUDialect>();
ctaLayout =
triton::gpu::CTALayoutAttr::get(&ctx, ctaPerCGA, ctaSplit, ctaOrder);
- f16Ty = FloatType::getF16(&ctx);
+ f16Ty = Float16Type::get(&ctx);
}

triton::gpu::AMDMfmaEncodingAttr createMFMA(int mDim, int nDim,
@@ -692,7 +692,7 @@
ASSERT_EQ(linearLayout, expandedLL);

// Test that methods of DistributedEncoding return the same values
- Type eltTy = FloatType::getF32(&ctx);
+ Type eltTy = Float32Type::get(&ctx);

ASSERT_EQ(getOrder(distributedEncoding), linearEncoding.getRepOrder());
ASSERT_EQ(cast<triton::gpu::TritonGPU_AttrTrait>(distributedEncoding)

--- a/unittest/Dialect/TritonGPU/DumpLayoutTest.cpp 2024-10-31 04:36:20.000000000 -0700
+++ b/unittest/Dialect/TritonGPU/DumpLayoutTest.cpp 2025-01-19 13:19:23.000000000 -0800
@@ -182,7 +182,7 @@
{1}, /* ord, row-major */
{1}); /* cOrd */

- auto elemTy = FloatType::getF16(sharedLayout.getContext());
+ auto elemTy = Float16Type::get(sharedLayout.getContext());
auto tensorType = RankedTensorType::get({32}, elemTy, sharedLayout);
std::string layout = getLayoutStr(tensorType, /*useHWPointOfView=*/false);
assertSameStr(refStr, layout);
@@ -237,7 +237,7 @@
{1, 0}, /* ord, row-major */
{1, 0}); /* cOrd */

- auto elemTy = FloatType::getF16(sharedLayout.getContext());
+ auto elemTy = Float16Type::get(sharedLayout.getContext());
auto tensorType = RankedTensorType::get({8, 32}, elemTy, sharedLayout);
std::string layout = getLayoutStr(tensorType, /*useHWPointOfView=*/false);
assertSameStr(refStr, layout);
@@ -510,7 +510,7 @@
{1, 0}, /* ord, row-major */
{1, 0}); /* cOrd */

- auto elemTyHW = FloatType::getF16(sharedLayoutHW.getContext());
+ auto elemTyHW = Float16Type::get(sharedLayoutHW.getContext());
auto tensorTypeHW = RankedTensorType::get({8, 32}, elemTyHW, sharedLayoutHW);

std::string layoutHW = getLayoutStr(tensorTypeHW, /*useHWPointOfView=*/true);
1 change: 1 addition & 0 deletions third_party/triton/llvm_integration/series.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,6 @@ LLVM nor MLIR integrator, please do not add any patches to this list.
"""

llvm_patch_list = [
"//third_party/triton:llvm_integration/cl717293402.patch",
# Add new patches just above this line
]
Loading

0 comments on commit 57ed743

Please sign in to comment.