From 6bae28e89915bc2b2e1423eed65ea8b854ad2051 Mon Sep 17 00:00:00 2001 From: Anders Leino Date: Tue, 14 Jan 2025 14:44:27 +0200 Subject: [PATCH] Add validation for atomic operations Diagnose an error if the destination of the atomic operation is not appropriate, where appropriate means it's either: - 'groupshared' - from a device buffer This closes #5989. --- source/slang/slang-diagnostic-defs.h | 6 ++ source/slang/slang-emit.cpp | 2 + source/slang/slang-ir-insts.h | 7 +++ source/slang/slang-ir-validate.cpp | 92 ++++++++++++++++++++++++++++ source/slang/slang-ir-validate.h | 2 + 5 files changed, 109 insertions(+) diff --git a/source/slang/slang-diagnostic-defs.h b/source/slang/slang-diagnostic-defs.h index 7eddc16a93..e82164b717 100644 --- a/source/slang/slang-diagnostic-defs.h +++ b/source/slang/slang-diagnostic-defs.h @@ -2251,6 +2251,12 @@ DIAGNOSTIC( multiSampledTextureDoesNotAllowWrites, "cannot write to a multisampled texture with target '$0'.") +DIAGNOSTIC( + 41403, + Error, + invalidAtomicDestinationPointer, + "cannot perform atomic operation because destination is neither groupshared nor from a device buffer.") + // // 5xxxx - Target code generation. // diff --git a/source/slang/slang-emit.cpp b/source/slang/slang-emit.cpp index 4fee5c4407..86a976495a 100644 --- a/source/slang/slang-emit.cpp +++ b/source/slang/slang-emit.cpp @@ -1322,6 +1322,8 @@ Result linkAndOptimizeIR( byteAddressBufferOptions); } + validateAtomicOperations(sink, irModule->getModuleInst()); + // For CUDA targets only, we will need to turn operations // the implicitly reference the "active mask" into ones // that use (and pass around) an explicit mask instead. diff --git a/source/slang/slang-ir-insts.h b/source/slang/slang-ir-insts.h index f46586aa2b..7ba3ecde0d 100644 --- a/source/slang/slang-ir-insts.h +++ b/source/slang/slang-ir-insts.h @@ -2491,6 +2491,13 @@ struct IRGetElementPtr : IRInst IRInst* getIndex() { return getOperand(1); } }; +struct IRGetOffsetPtr : IRInst +{ + IR_LEAF_ISA(GetOffsetPtr); + IRInst* getBase() { return getOperand(0); } + IRInst* getOffset() { return getOperand(1); } +}; + struct IRRWStructuredBufferGetElementPtr : IRInst { IR_LEAF_ISA(RWStructuredBufferGetElementPtr); diff --git a/source/slang/slang-ir-validate.cpp b/source/slang/slang-ir-validate.cpp index 4652b011fe..32c740fe4c 100644 --- a/source/slang/slang-ir-validate.cpp +++ b/source/slang/slang-ir-validate.cpp @@ -410,4 +410,96 @@ void validateIRModuleIfEnabled(CodeGenContext* codeGenContext, IRModule* module) validateIRModule(module, sink); } +// Returns whether 'dst' is a valid destination for atomic operations, meaning +// it leads either to 'groupshared' or 'device buffer' memory. +static bool isValidAtomicDest(IRInst* dst) +{ + bool isGroupShared = as(dst->getRate()); + if (isGroupShared) + return true; + + if (as(dst)) + return true; + if (as(dst)) + return true; + + if (auto ptrType = as(dst->getDataType())) + { + switch (ptrType->getAddressSpace()) + { + case AddressSpace::Global: + case AddressSpace::GroupShared: + case AddressSpace::StorageBuffer: + case AddressSpace::UserPointer: + return true; + default: + break; + } + } + + if (as(dst)) + { + switch (dst->getDataType()->getOp()) + { + case kIROp_GLSLShaderStorageBufferType: + case kIROp_TextureType: + return true; + default: + return false; + } + } + + if (auto param = as(dst)) + if (auto outType = as(param->getDataType())) + if (outType->getAddressSpace() == AddressSpace::GroupShared) + return true; + if (auto getElementPtr = as(dst)) + return isValidAtomicDest(getElementPtr->getBase()); + if (auto getOffsetPtr = as(dst)) + return isValidAtomicDest(getOffsetPtr->getBase()); + if (auto fieldAddress = as(dst)) + return isValidAtomicDest(fieldAddress->getBase()); + + return false; +} + +void validateAtomicOperations(DiagnosticSink* sink, IRInst* inst) +{ + // There may be unused functions containing violations after address space specialization. + if (auto func = as(inst)) + if(!(func->hasUses() || func->findDecoration())) + return; + + switch (inst->getOp()) + { + case kIROp_AtomicLoad: + case kIROp_AtomicStore: + case kIROp_AtomicExchange: + case kIROp_AtomicCompareExchange: + case kIROp_AtomicAdd: + case kIROp_AtomicSub: + case kIROp_AtomicAnd: + case kIROp_AtomicOr: + case kIROp_AtomicXor: + case kIROp_AtomicMin: + case kIROp_AtomicMax: + case kIROp_AtomicInc: + case kIROp_AtomicDec: + { + IRInst* destinationPtr = inst->getOperand(0); + if (!isValidAtomicDest(destinationPtr)) + sink->diagnose(inst->sourceLoc, Diagnostics::invalidAtomicDestinationPointer); + } + break; + + default: + break; + } + + for (auto child : inst->getModifiableChildren()) + { + validateAtomicOperations(sink, child); + } +} + } // namespace Slang diff --git a/source/slang/slang-ir-validate.h b/source/slang/slang-ir-validate.h index d10aff7a00..420fad94dd 100644 --- a/source/slang/slang-ir-validate.h +++ b/source/slang/slang-ir-validate.h @@ -38,4 +38,6 @@ void validateIRModuleIfEnabled(CodeGenContext* codeGenContext, IRModule* module) void disableIRValidationAtInsert(); void enableIRValidationAtInsert(); +void validateAtomicOperations(DiagnosticSink* sink, IRInst* inst); + } // namespace Slang