Skip to content

Commit

Permalink
Changed all instances of "sparse compiler" to "sparsifier" in comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
t-harvey committed Oct 16, 2023
1 parent 2b2d79f commit dc34381
Show file tree
Hide file tree
Showing 15 changed files with 31 additions and 31 deletions.
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1770,7 +1770,7 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
if (!operandType)
continue;

// If outs is sparse, leave it to the sparse compiler.
// If outs is sparse, leave it to the sparsifier.
if (sparse_tensor::getSparseTensorEncoding(operandVal.getType()))
continue;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ struct SparseBufferizableOpInterfaceExternalModel
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
const BufferizationOptions &options) const {
return op->emitError(
"sparse_tensor ops must be bufferized with the sparse compiler");
"sparse_tensor ops must be bufferized with the sparsifier");
}
};

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
// This is a prototype GPU codegenerator for the sparse compiler.
// This is a prototype GPU codegenerator for the sparsifier.
// The objective is to eventually use the right combination of
// direct code generation and libary calls into vendor-specific
// highly optimized sparse libraries (e.g. cuSparse for CUDA).
Expand Down Expand Up @@ -1196,7 +1196,7 @@ rewriteSDDMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT,
//===----------------------------------------------------------------------===//

/// Proof-of-concept rewriter. This rule generates a GPU implementation
/// for each outermost forall loop generated by the sparse compiler.
/// for each outermost forall loop generated by the sparsifier.
/// TODO: right now works with parallelization-strategy=dense-outer-loop
/// but give this its own flags in the future
struct ForallRewriter : public OpRewritePattern<scf::ParallelOp> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ SparseTensorTypeToBufferConverter::SparseTensorTypeToBufferConverter() {
if (!getSparseTensorEncoding(tp))
// Not a sparse tensor.
return std::nullopt;
// Sparse compiler knows how to cancel out these casts.
// The sparsifier knows how to cancel out these casts.
return genTuple(builder, loc, tp, inputs);
});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -393,8 +393,8 @@ struct FuseTensorCast : public OpRewritePattern<tensor::CastOp> {
};

/// Rewrites a sequence of operations for sparse tensor selections in to
/// semi-ring operations such that they can be compiled correctly by the sparse
/// compiler. E.g., transforming the following sequence
/// semi-ring operations such that they can be compiled correctly by the
/// sparsifier. E.g., transforming the following sequence
///
/// %sel = arith.select %cond, %sp1, %sp2
///
Expand Down Expand Up @@ -1053,7 +1053,7 @@ struct ConvertRewriter : public OpRewritePattern<ConvertOp> {
const RankedTensorType bufferTp =
getBufferType(dstTp, !dstTp.isIdentity() && !fromSparseConst);
// Only imposes foreach order on dense constant (which will be statically
// sorted by the sparse compiler), otherwise the rotated loop sequence
// sorted by the sparsifier), otherwise the rotated loop sequence
// results to bad cache locality.
const AffineMapAttr foreachOrder =
(!dstTp.isIdentity() && fromSparseConst)
Expand Down
16 changes: 8 additions & 8 deletions mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
//
// A pass that converts loops generated by the sparse compiler into a form that
// A pass that converts loops generated by the sparsifier into a form that
// can exploit SIMD instructions of the target architecture. Note that this pass
// ensures the sparse compiler can generate efficient SIMD (including ArmSVE
// ensures the sparsifier can generate efficient SIMD (including ArmSVE
// support) with proper separation of concerns as far as sparsification and
// vectorization is concerned. However, this pass is not the final abstraction
// level we want, and not the general vectorizer we want either. It forms a good
Expand Down Expand Up @@ -105,7 +105,7 @@ static Value genVectorInvariantValue(PatternRewriter &rewriter, VL vl,

/// Generates a vectorized load lhs = a[ind[lo:hi]] or lhs = a[lo:hi],
/// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note
/// that the sparse compiler can only generate indirect loads in
/// that the sparsifier can only generate indirect loads in
/// the last index, i.e. back().
static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,
Value mem, ArrayRef<Value> idxs, Value vmask) {
Expand All @@ -124,7 +124,7 @@ static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,

/// Generates a vectorized store a[ind[lo:hi]] = rhs or a[lo:hi] = rhs
/// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note
/// that the sparse compiler can only generate indirect stores in
/// that the sparsifier can only generate indirect stores in
/// the last index, i.e. back().
static void genVectorStore(PatternRewriter &rewriter, Location loc, Value mem,
ArrayRef<Value> idxs, Value vmask, Value rhs) {
Expand Down Expand Up @@ -219,8 +219,8 @@ static Value genVectorReducInit(PatternRewriter &rewriter, Location loc,
/// The first call (!codegen) does the analysis. Then, on success, the second
/// call (codegen) yields the proper vector form in the output parameter
/// vector 'idxs'. This mechanism ensures that analysis and rewriting code
/// stay in sync. Note that the analyis part is simple because the sparse
/// compiler only generates relatively simple subscript expressions.
/// stay in sync. Note that the analyis part is simple because the
/// sparsifier only generates relatively simple subscript expressions.
///
/// See https://llvm.org/docs/GetElementPtr.html for some background on
/// the complications described below.
Expand Down Expand Up @@ -359,7 +359,7 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp,
/// The first call (!codegen) does the analysis. Then, on success, the second
/// call (codegen) yields the proper vector form in the output parameter 'vexp'.
/// This mechanism ensures that analysis and rewriting code stay in sync. Note
/// that the analyis part is simple because the sparse compiler only generates
/// that the analyis part is simple because the sparsifier only generates
/// relatively simple expressions inside the for-loops.
static bool vectorizeExpr(PatternRewriter &rewriter, scf::ForOp forOp, VL vl,
Value exp, bool codegen, Value vmask, Value &vexp) {
Expand Down Expand Up @@ -616,7 +616,7 @@ struct ForOpRewriter : public OpRewritePattern<scf::ForOp> {
LogicalResult matchAndRewrite(scf::ForOp op,
PatternRewriter &rewriter) const override {
// Check for single block, unit-stride for-loop that is generated by
// sparse compiler, which means no data dependence analysis is required,
// sparsifier, which means no data dependence analysis is required,
// and its loop-body is very restricted in form.
if (!op.getRegion().hasOneBlock() || !isConstantIntValue(op.getStep(), 1) ||
!op->hasAttr(LoopEmitter::getLoopEmitterLoopAttrName()))
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ struct AffineDimCollector : public AffineExprVisitor<AffineDimCollector> {
} // namespace

//===----------------------------------------------------------------------===//
// Sparse compiler analysis methods.
// Sparsifier analysis methods.
//===----------------------------------------------------------------------===//

// TODO: the "idx"-vs-"ldx" naming convention is not self-explanatory,
Expand Down Expand Up @@ -840,7 +840,7 @@ static bool computeIterationGraph(CodegenEnv &env, SortMask mask,
}

//===----------------------------------------------------------------------===//
// Sparse compiler synthesis methods (statements and expressions).
// Sparsifier synthesis methods (statements and expressions).
//===----------------------------------------------------------------------===//

/// Local bufferization of all dense and sparse data structures.
Expand Down Expand Up @@ -1139,7 +1139,7 @@ inline static Value genInvariantValue(CodegenEnv &env, ExprId exp) {
return env.exp(exp).val;
}

/// Semi-ring branches are simply inlined by the sparse compiler. Prior
/// Semi-ring branches are simply inlined by the sparsifier. Prior
/// analysis has verified that all computations are "local" to the inlined
/// branch or otherwise invariantly defined outside the loop nest, with the
/// exception of index computations, which need to be relinked to actual
Expand Down Expand Up @@ -1562,7 +1562,7 @@ static void endIf(CodegenEnv &env, OpBuilder &builder, scf::IfOp ifOp,
}

//===----------------------------------------------------------------------===//
// Sparse compiler synthesis methods (loop sequence).
// Sparsifier synthesis methods (loop sequence).
//===----------------------------------------------------------------------===//

/// Starts a loop sequence at given level. Returns true if
Expand Down Expand Up @@ -1926,7 +1926,7 @@ static void genResult(CodegenEnv &env, RewriterBase &rewriter) {
}

//===----------------------------------------------------------------------===//
// Sparse compiler rewriting methods.
// Sparsifier rewriting methods.
//===----------------------------------------------------------------------===//

namespace {
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1219,7 +1219,7 @@ Type Merger::inferType(ExprId e, Value src) const {
return dtp;
}

/// Ensures that sparse compiler can generate code for expression.
/// Ensures that sparsifier can generate code for expression.
static bool isAdmissibleBranchExp(Operation *op, Block *block, Value v) {
// Arguments are always admissible.
if (isa<BlockArgument>(v))
Expand All @@ -1239,7 +1239,7 @@ static bool isAdmissibleBranchExp(Operation *op, Block *block, Value v) {
return true;
}

/// Ensures that sparse compiler can generate code for branch.
/// Ensures that sparsifier can generate code for branch.
static bool isAdmissibleBranch(Operation *op, Region &region) {
if (region.empty())
return true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
}>

func.func @sparse_tensor_op(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
// expected-error @below{{sparse_tensor ops must be bufferized with the sparse compiler}}
// expected-error @below{{sparse_tensor ops must be bufferized with the sparsiifer}}
// expected-error @below{{failed to bufferize op}}
%0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
return %0 : tensor<64xf32, #SparseVector>
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/rejected.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -sparsification | FileCheck %s


// The file contains examples that will be rejected by sparse compiler
// The file contains examples that will be rejected by sparsifier
// (we expect the linalg.generic unchanged).
#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>

Expand Down Expand Up @@ -29,7 +29,7 @@ func.func @sparse_reduction_subi(%argx: tensor<i32>,
ins(%arga: tensor<?xi32, #SparseVector>)
outs(%argx: tensor<i32>) {
^bb(%a: i32, %x: i32):
// NOTE: `subi %a, %x` is the reason why the program is rejected by the sparse compiler.
// NOTE: `subi %a, %x` is the reason why the program is rejected by the sparsifier.
// It is because we do not allow `-outTensor` in reduction loops as it creates cyclic
// dependences.
%t = arith.subi %a, %x: i32
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>

// An example of a quantized sparse matmul. With the zero offset for the
// sparse input, the sparse compiler generates very efficient code for the
// sparse input, the sparsifier generates very efficient code for the
// x(i,j) += (ext(a(i,k)) - 2) * ext(b(k,j))
// operation.
module {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ module {

// Regular MIN reduction: stored i32 elements AND implicit zeros.
// Note that dealing with the implicit zeros is taken care of
// by the sparse compiler to preserve semantics of the "original".
// by the sparsifier to preserve semantics of the "original".
func.func @min2(%arga: tensor<32xi32, #SV>, %argx: tensor<i32>) -> tensor<i32> {
%c = tensor.extract %argx[] : tensor<i32>
%0 = linalg.generic #trait_reduction
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
module {
//
// Main driver that initializes a sparse tensor and inspects the sparse
// storage schemes in detail. Note that users of the MLIR sparse compiler
// storage schemes in detail. Note that users of the MLIR sparsifier
// are typically not concerned with such details, but the test ensures
// everything is working "under the hood".
//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ module {
}

// Invert the structure of a sparse vector, where missing values are
// filled with 1. For a dense output, the sparse compiler initializes
// filled with 1. For a dense output, the sparsifier initializes
// the buffer to all zero at all other places.
func.func @vector_complement_dense(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32> {
%c = arith.constant 0 : index
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

# This file contains the sparse compiler class.
# This file contains the SparseCompiler class.

from mlir import execution_engine
from mlir import ir
Expand Down

0 comments on commit dc34381

Please sign in to comment.