From dc343810ee616ee58a97787840c91ca8c71471d5 Mon Sep 17 00:00:00 2001 From: Tim Harvey Date: Mon, 16 Oct 2023 21:03:05 +0000 Subject: [PATCH] Changed all instances of "sparse compiler" to "sparsifier" in comments. --- .../Linalg/Transforms/ElementwiseOpFusion.cpp | 2 +- .../Transforms/BufferizableOpInterfaceImpl.cpp | 2 +- .../SparseTensor/Transforms/SparseGPUCodegen.cpp | 4 ++-- .../Transforms/SparseTensorDescriptor.cpp | 2 +- .../Transforms/SparseTensorRewriting.cpp | 6 +++--- .../Transforms/SparseVectorization.cpp | 16 ++++++++-------- .../SparseTensor/Transforms/Sparsification.cpp | 10 +++++----- mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp | 4 ++-- .../SparseTensor/one_shot_bufferize_invalid.mlir | 2 +- mlir/test/Dialect/SparseTensor/rejected.mlir | 4 ++-- .../CPU/sparse_quantized_matmul.mlir | 2 +- .../SparseTensor/CPU/sparse_reductions_min.mlir | 2 +- .../Dialect/SparseTensor/CPU/sparse_storage.mlir | 2 +- .../Dialect/SparseTensor/CPU/sparse_unary.mlir | 2 +- .../SparseTensor/python/tools/sparse_compiler.py | 2 +- 15 files changed, 31 insertions(+), 31 deletions(-) diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp index 17346607fa9cd7..cebf9fdfe98bf6 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -1770,7 +1770,7 @@ struct RemoveOutsDependency : public OpRewritePattern { if (!operandType) continue; - // If outs is sparse, leave it to the sparse compiler. + // If outs is sparse, leave it to the sparsifier. if (sparse_tensor::getSparseTensorEncoding(operandVal.getType())) continue; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp index d54cd9ad8cdbe7..3f4ae1f67de150 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -32,7 +32,7 @@ struct SparseBufferizableOpInterfaceExternalModel LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options) const { return op->emitError( - "sparse_tensor ops must be bufferized with the sparse compiler"); + "sparse_tensor ops must be bufferized with the sparsifier"); } }; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp index a6e963181816f7..9d20cb9138c07f 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This is a prototype GPU codegenerator for the sparse compiler. +// This is a prototype GPU codegenerator for the sparsifier. // The objective is to eventually use the right combination of // direct code generation and libary calls into vendor-specific // highly optimized sparse libraries (e.g. cuSparse for CUDA). @@ -1196,7 +1196,7 @@ rewriteSDDMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT, //===----------------------------------------------------------------------===// /// Proof-of-concept rewriter. This rule generates a GPU implementation -/// for each outermost forall loop generated by the sparse compiler. +/// for each outermost forall loop generated by the sparsifier. /// TODO: right now works with parallelization-strategy=dense-outer-loop /// but give this its own flags in the future struct ForallRewriter : public OpRewritePattern { diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp index 5c363b0c781d5b..c175f39e3bee65 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp @@ -65,7 +65,7 @@ SparseTensorTypeToBufferConverter::SparseTensorTypeToBufferConverter() { if (!getSparseTensorEncoding(tp)) // Not a sparse tensor. return std::nullopt; - // Sparse compiler knows how to cancel out these casts. + // The sparsifier knows how to cancel out these casts. return genTuple(builder, loc, tp, inputs); }); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp index 277903dc55b743..34b690f09584f0 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -393,8 +393,8 @@ struct FuseTensorCast : public OpRewritePattern { }; /// Rewrites a sequence of operations for sparse tensor selections in to -/// semi-ring operations such that they can be compiled correctly by the sparse -/// compiler. E.g., transforming the following sequence +/// semi-ring operations such that they can be compiled correctly by the +/// sparsifier. E.g., transforming the following sequence /// /// %sel = arith.select %cond, %sp1, %sp2 /// @@ -1053,7 +1053,7 @@ struct ConvertRewriter : public OpRewritePattern { const RankedTensorType bufferTp = getBufferType(dstTp, !dstTp.isIdentity() && !fromSparseConst); // Only imposes foreach order on dense constant (which will be statically - // sorted by the sparse compiler), otherwise the rotated loop sequence + // sorted by the sparsifier), otherwise the rotated loop sequence // results to bad cache locality. const AffineMapAttr foreachOrder = (!dstTp.isIdentity() && fromSparseConst) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp index 93ee0647b7b5a6..01f32054b390fc 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp @@ -6,9 +6,9 @@ // //===----------------------------------------------------------------------===// // -// A pass that converts loops generated by the sparse compiler into a form that +// A pass that converts loops generated by the sparsifier into a form that // can exploit SIMD instructions of the target architecture. Note that this pass -// ensures the sparse compiler can generate efficient SIMD (including ArmSVE +// ensures the sparsifier can generate efficient SIMD (including ArmSVE // support) with proper separation of concerns as far as sparsification and // vectorization is concerned. However, this pass is not the final abstraction // level we want, and not the general vectorizer we want either. It forms a good @@ -105,7 +105,7 @@ static Value genVectorInvariantValue(PatternRewriter &rewriter, VL vl, /// Generates a vectorized load lhs = a[ind[lo:hi]] or lhs = a[lo:hi], /// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note -/// that the sparse compiler can only generate indirect loads in +/// that the sparsifier can only generate indirect loads in /// the last index, i.e. back(). static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl, Value mem, ArrayRef idxs, Value vmask) { @@ -124,7 +124,7 @@ static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl, /// Generates a vectorized store a[ind[lo:hi]] = rhs or a[lo:hi] = rhs /// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note -/// that the sparse compiler can only generate indirect stores in +/// that the sparsifier can only generate indirect stores in /// the last index, i.e. back(). static void genVectorStore(PatternRewriter &rewriter, Location loc, Value mem, ArrayRef idxs, Value vmask, Value rhs) { @@ -219,8 +219,8 @@ static Value genVectorReducInit(PatternRewriter &rewriter, Location loc, /// The first call (!codegen) does the analysis. Then, on success, the second /// call (codegen) yields the proper vector form in the output parameter /// vector 'idxs'. This mechanism ensures that analysis and rewriting code -/// stay in sync. Note that the analyis part is simple because the sparse -/// compiler only generates relatively simple subscript expressions. +/// stay in sync. Note that the analyis part is simple because the +/// sparsifier only generates relatively simple subscript expressions. /// /// See https://llvm.org/docs/GetElementPtr.html for some background on /// the complications described below. @@ -359,7 +359,7 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp, /// The first call (!codegen) does the analysis. Then, on success, the second /// call (codegen) yields the proper vector form in the output parameter 'vexp'. /// This mechanism ensures that analysis and rewriting code stay in sync. Note -/// that the analyis part is simple because the sparse compiler only generates +/// that the analyis part is simple because the sparsifier only generates /// relatively simple expressions inside the for-loops. static bool vectorizeExpr(PatternRewriter &rewriter, scf::ForOp forOp, VL vl, Value exp, bool codegen, Value vmask, Value &vexp) { @@ -616,7 +616,7 @@ struct ForOpRewriter : public OpRewritePattern { LogicalResult matchAndRewrite(scf::ForOp op, PatternRewriter &rewriter) const override { // Check for single block, unit-stride for-loop that is generated by - // sparse compiler, which means no data dependence analysis is required, + // sparsifier, which means no data dependence analysis is required, // and its loop-body is very restricted in form. if (!op.getRegion().hasOneBlock() || !isConstantIntValue(op.getStep(), 1) || !op->hasAttr(LoopEmitter::getLoopEmitterLoopAttrName())) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp index fee32a5717f62a..08848595f610a7 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -124,7 +124,7 @@ struct AffineDimCollector : public AffineExprVisitor { } // namespace //===----------------------------------------------------------------------===// -// Sparse compiler analysis methods. +// Sparsifier analysis methods. //===----------------------------------------------------------------------===// // TODO: the "idx"-vs-"ldx" naming convention is not self-explanatory, @@ -840,7 +840,7 @@ static bool computeIterationGraph(CodegenEnv &env, SortMask mask, } //===----------------------------------------------------------------------===// -// Sparse compiler synthesis methods (statements and expressions). +// Sparsifier synthesis methods (statements and expressions). //===----------------------------------------------------------------------===// /// Local bufferization of all dense and sparse data structures. @@ -1139,7 +1139,7 @@ inline static Value genInvariantValue(CodegenEnv &env, ExprId exp) { return env.exp(exp).val; } -/// Semi-ring branches are simply inlined by the sparse compiler. Prior +/// Semi-ring branches are simply inlined by the sparsifier. Prior /// analysis has verified that all computations are "local" to the inlined /// branch or otherwise invariantly defined outside the loop nest, with the /// exception of index computations, which need to be relinked to actual @@ -1562,7 +1562,7 @@ static void endIf(CodegenEnv &env, OpBuilder &builder, scf::IfOp ifOp, } //===----------------------------------------------------------------------===// -// Sparse compiler synthesis methods (loop sequence). +// Sparsifier synthesis methods (loop sequence). //===----------------------------------------------------------------------===// /// Starts a loop sequence at given level. Returns true if @@ -1926,7 +1926,7 @@ static void genResult(CodegenEnv &env, RewriterBase &rewriter) { } //===----------------------------------------------------------------------===// -// Sparse compiler rewriting methods. +// Sparsifier rewriting methods. //===----------------------------------------------------------------------===// namespace { diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp index 4143efbd0ab28e..50d1efb6aafd9e 100644 --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -1219,7 +1219,7 @@ Type Merger::inferType(ExprId e, Value src) const { return dtp; } -/// Ensures that sparse compiler can generate code for expression. +/// Ensures that sparsifier can generate code for expression. static bool isAdmissibleBranchExp(Operation *op, Block *block, Value v) { // Arguments are always admissible. if (isa(v)) @@ -1239,7 +1239,7 @@ static bool isAdmissibleBranchExp(Operation *op, Block *block, Value v) { return true; } -/// Ensures that sparse compiler can generate code for branch. +/// Ensures that sparsifier can generate code for branch. static bool isAdmissibleBranch(Operation *op, Region ®ion) { if (region.empty()) return true; diff --git a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir index 1540d1876d7f06..342d59ab45cdef 100644 --- a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir @@ -5,7 +5,7 @@ }> func.func @sparse_tensor_op(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { - // expected-error @below{{sparse_tensor ops must be bufferized with the sparse compiler}} + // expected-error @below{{sparse_tensor ops must be bufferized with the sparsiifer}} // expected-error @below{{failed to bufferize op}} %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> return %0 : tensor<64xf32, #SparseVector> diff --git a/mlir/test/Dialect/SparseTensor/rejected.mlir b/mlir/test/Dialect/SparseTensor/rejected.mlir index 285766fec04e70..c68c576766b856 100644 --- a/mlir/test/Dialect/SparseTensor/rejected.mlir +++ b/mlir/test/Dialect/SparseTensor/rejected.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s -// The file contains examples that will be rejected by sparse compiler +// The file contains examples that will be rejected by sparsifier // (we expect the linalg.generic unchanged). #SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}> @@ -29,7 +29,7 @@ func.func @sparse_reduction_subi(%argx: tensor, ins(%arga: tensor) outs(%argx: tensor) { ^bb(%a: i32, %x: i32): - // NOTE: `subi %a, %x` is the reason why the program is rejected by the sparse compiler. + // NOTE: `subi %a, %x` is the reason why the program is rejected by the sparsifier. // It is because we do not allow `-outTensor` in reduction loops as it creates cyclic // dependences. %t = arith.subi %a, %x: i32 diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir index 36e0ba888bd81a..8efd5230b60418 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -33,7 +33,7 @@ #DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // An example of a quantized sparse matmul. With the zero offset for the -// sparse input, the sparse compiler generates very efficient code for the +// sparse input, the sparsifier generates very efficient code for the // x(i,j) += (ext(a(i,k)) - 2) * ext(b(k,j)) // operation. module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir index 3fe597100f37ae..06bff6d29972f2 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir @@ -63,7 +63,7 @@ module { // Regular MIN reduction: stored i32 elements AND implicit zeros. // Note that dealing with the implicit zeros is taken care of - // by the sparse compiler to preserve semantics of the "original". + // by the sparsifier to preserve semantics of the "original". func.func @min2(%arga: tensor<32xi32, #SV>, %argx: tensor) -> tensor { %c = tensor.extract %argx[] : tensor %0 = linalg.generic #trait_reduction diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir index aeb6c8cabb7d08..807e9be9836cae 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -68,7 +68,7 @@ module { // // Main driver that initializes a sparse tensor and inspects the sparse - // storage schemes in detail. Note that users of the MLIR sparse compiler + // storage schemes in detail. Note that users of the MLIR sparsifier // are typically not concerned with such details, but the test ensures // everything is working "under the hood". // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir index e5f5d22b698325..8f956fb0532e7c 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir @@ -74,7 +74,7 @@ module { } // Invert the structure of a sparse vector, where missing values are - // filled with 1. For a dense output, the sparse compiler initializes + // filled with 1. For a dense output, the sparsifier initializes // the buffer to all zero at all other places. func.func @vector_complement_dense(%arga: tensor) -> tensor { %c = arith.constant 0 : index diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py index d549a9a0954c6c..72d325b2b6f473 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py @@ -2,7 +2,7 @@ # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# This file contains the sparse compiler class. +# This file contains the SparseCompiler class. from mlir import execution_engine from mlir import ir