Merge branch 'jeffbolznv-memory_scope_semantics'

This commit is contained in:
John Kessenich 2018-09-07 08:56:46 -06:00
commit ddfb4a8022
28 changed files with 5544 additions and 4242 deletions

View file

@ -39,5 +39,6 @@ static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit
static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage";
static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
#endif // #ifndef GLSLextKHR_H

View file

@ -129,6 +129,10 @@ protected:
spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier);
spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier);
spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier);
spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type);
spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
spv::Scope TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration);
spv::ImageFormat TranslateImageFormat(const glslang::TType& type);
spv::SelectionControlMask TranslateSelectionControl(const glslang::TIntermSelection&) const;
@ -327,13 +331,15 @@ spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useSto
}
// Translate glslang type to SPIR-V memory decorations.
void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory)
void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory, bool useVulkanMemoryModel)
{
if (qualifier.coherent)
memory.push_back(spv::DecorationCoherent);
if (qualifier.volatil) {
memory.push_back(spv::DecorationVolatile);
memory.push_back(spv::DecorationCoherent);
if (!useVulkanMemoryModel) {
if (qualifier.coherent)
memory.push_back(spv::DecorationCoherent);
if (qualifier.volatil) {
memory.push_back(spv::DecorationVolatile);
memory.push_back(spv::DecorationCoherent);
}
}
if (qualifier.restrict)
memory.push_back(spv::DecorationRestrict);
@ -450,6 +456,105 @@ spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glsl
return spv::DecorationMax;
}
spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
{
if (!glslangIntermediate->usingVulkanMemoryModel() || coherentFlags.isImage) {
return spv::MemoryAccessMaskNone;
}
spv::MemoryAccessMask mask = spv::MemoryAccessMaskNone;
if (coherentFlags.volatil ||
coherentFlags.coherent ||
coherentFlags.devicecoherent ||
coherentFlags.queuefamilycoherent ||
coherentFlags.workgroupcoherent ||
coherentFlags.subgroupcoherent) {
mask = mask | spv::MemoryAccessMakePointerAvailableKHRMask |
spv::MemoryAccessMakePointerVisibleKHRMask;
}
if (coherentFlags.nonprivate) {
mask = mask | spv::MemoryAccessNonPrivatePointerKHRMask;
}
if (coherentFlags.volatil) {
mask = mask | spv::MemoryAccessVolatileMask;
}
if (mask != spv::MemoryAccessMaskNone) {
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
}
return mask;
}
spv::ImageOperandsMask TGlslangToSpvTraverser::TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
{
if (!glslangIntermediate->usingVulkanMemoryModel()) {
return spv::ImageOperandsMaskNone;
}
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
if (coherentFlags.volatil ||
coherentFlags.coherent ||
coherentFlags.devicecoherent ||
coherentFlags.queuefamilycoherent ||
coherentFlags.workgroupcoherent ||
coherentFlags.subgroupcoherent) {
mask = mask | spv::ImageOperandsMakeTexelAvailableKHRMask |
spv::ImageOperandsMakeTexelVisibleKHRMask;
}
if (coherentFlags.nonprivate) {
mask = mask | spv::ImageOperandsNonPrivateTexelKHRMask;
}
if (coherentFlags.volatil) {
mask = mask | spv::ImageOperandsVolatileTexelKHRMask;
}
if (mask != spv::ImageOperandsMaskNone) {
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
}
return mask;
}
spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCoherent(const glslang::TType& type)
{
spv::Builder::AccessChain::CoherentFlags flags;
flags.coherent = type.getQualifier().coherent;
flags.devicecoherent = type.getQualifier().devicecoherent;
flags.queuefamilycoherent = type.getQualifier().queuefamilycoherent;
// shared variables are implicitly workgroupcoherent in GLSL.
flags.workgroupcoherent = type.getQualifier().workgroupcoherent ||
type.getQualifier().storage == glslang::EvqShared;
flags.subgroupcoherent = type.getQualifier().subgroupcoherent;
// *coherent variables are implicitly nonprivate in GLSL
flags.nonprivate = type.getQualifier().nonprivate ||
type.getQualifier().subgroupcoherent ||
type.getQualifier().workgroupcoherent ||
type.getQualifier().queuefamilycoherent ||
type.getQualifier().devicecoherent ||
type.getQualifier().coherent;
flags.volatil = type.getQualifier().volatil;
flags.isImage = type.getBasicType() == glslang::EbtSampler;
return flags;
}
spv::Scope TGlslangToSpvTraverser::TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
{
spv::Scope scope;
if (coherentFlags.coherent) {
// coherent defaults to Device scope in the old model, QueueFamilyKHR scope in the new model
scope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
} else if (coherentFlags.devicecoherent) {
scope = spv::ScopeDevice;
} else if (coherentFlags.queuefamilycoherent) {
scope = spv::ScopeQueueFamilyKHR;
} else if (coherentFlags.workgroupcoherent) {
scope = spv::ScopeWorkgroup;
} else if (coherentFlags.subgroupcoherent) {
scope = spv::ScopeSubgroup;
} else {
scope = spv::ScopeMax;
}
if (glslangIntermediate->usingVulkanMemoryModel() && scope == spv::ScopeDevice) {
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
}
return scope;
}
// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate
// associated capabilities when required. For some built-in variables, a capability
// is generated only when using the variable in an executable instruction, but not when
@ -979,6 +1084,16 @@ void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& pa
child.sample = true;
if (parent.coherent)
child.coherent = true;
if (parent.devicecoherent)
child.devicecoherent = true;
if (parent.queuefamilycoherent)
child.queuefamilycoherent = true;
if (parent.workgroupcoherent)
child.workgroupcoherent = true;
if (parent.subgroupcoherent)
child.subgroupcoherent = true;
if (parent.nonprivate)
child.nonprivate = true;
if (parent.volatil)
child.volatil = true;
if (parent.restrict)
@ -1045,7 +1160,12 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, const gl
builder.setSourceText(text);
}
stdBuiltins = builder.import("GLSL.std.450");
builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
if (glslangIntermediate->usingVulkanMemoryModel()) {
builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelVulkanKHR);
builder.addExtension(spv::E_SPV_KHR_vulkan_memory_model);
} else {
builder.setMemoryModel(spv::AddressingModelLogical, spv::MemoryModelGLSL450);
}
shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str());
entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str());
@ -1351,7 +1471,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
// store the result
builder.setAccessChain(lValue);
multiTypeStore(node->getType(), rValue);
multiTypeStore(node->getLeft()->getType(), rValue);
// assignments are expressions having an rValue after they are evaluated...
builder.clearAccessChain();
@ -1388,7 +1508,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
}
// normal case for indexing array or structure or block
builder.accessChainPush(builder.makeIntConstant(spvIndex));
builder.accessChainPush(builder.makeIntConstant(spvIndex), TranslateCoherent(node->getLeft()->getType()));
// Add capabilities here for accessing PointSize and clip/cull distance.
// We have deferred generation of associated capabilities until now.
@ -1424,7 +1544,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector())
builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()));
else
builder.accessChainPush(index);
builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()));
}
return false;
case glslang::EOpVectorSwizzle:
@ -1658,11 +1778,11 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
builder.setAccessChainRValue(result);
return false;
} else if (node->getOp() == glslang::EOpImageStore ||
#ifdef AMD_EXTENSIONS
} else if (node->getOp() == glslang::EOpImageStore || node->getOp() == glslang::EOpImageStoreLod) {
#else
} else if (node->getOp() == glslang::EOpImageStore) {
node->getOp() == glslang::EOpImageStoreLod ||
#endif
node->getOp() == glslang::EOpImageAtomicStore) {
// "imageStore" is a special case, which has no result
return false;
}
@ -1952,6 +2072,10 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
// These all have 0 operands and will naturally finish up in the code below for 0 operands
break;
case glslang::EOpAtomicStore:
noReturnValue = true;
// fallthrough
case glslang::EOpAtomicLoad:
case glslang::EOpAtomicAdd:
case glslang::EOpAtomicMin:
case glslang::EOpAtomicMax:
@ -2050,6 +2174,8 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
case glslang::EOpAtomicXor:
case glslang::EOpAtomicExchange:
case glslang::EOpAtomicCompSwap:
case glslang::EOpAtomicLoad:
case glslang::EOpAtomicStore:
case glslang::EOpAtomicCounterAdd:
case glslang::EOpAtomicCounterSubtract:
case glslang::EOpAtomicCounterMin:
@ -2876,7 +3002,7 @@ void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
qualifier.storage == glslang::EvqBuffer) {
// Add memory decorations only to top-level members of shader storage block
std::vector<spv::Decoration> memory;
TranslateMemoryDecoration(memberQualifier, memory);
TranslateMemoryDecoration(memberQualifier, memory, glslangIntermediate->usingVulkanMemoryModel());
for (unsigned int i = 0; i < memory.size(); ++i)
builder.addMemberDecoration(spvType, member, memory[i]);
}
@ -2987,8 +3113,15 @@ spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arra
spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type)
{
spv::Id nominalTypeId = builder.accessChainGetInferredType();
spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
coherentFlags |= TranslateCoherent(type);
spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type),
TranslateNonUniformDecoration(type.getQualifier()), nominalTypeId);
TranslateNonUniformDecoration(type.getQualifier()),
nominalTypeId,
spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask),
TranslateMemoryScope(coherentFlags));
// Need to convert to abstract types when necessary
if (type.getBasicType() == glslang::EbtBool) {
@ -3044,7 +3177,12 @@ void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::I
}
}
builder.accessChainStore(rvalue);
spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
coherentFlags |= TranslateCoherent(type);
builder.accessChainStore(rvalue,
spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerVisibleKHRMask),
TranslateMemoryScope(coherentFlags));
}
// For storing when types match at the glslang level, but not might match at the
@ -3090,7 +3228,7 @@ void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id
// set up the target storage
builder.clearAccessChain();
builder.setAccessChainLValue(lValue);
builder.accessChainPush(builder.makeIntConstant(index));
builder.accessChainPush(builder.makeIntConstant(index), TranslateCoherent(type));
// store the member
multiTypeStore(glslangElementType, elementRValue);
@ -3110,7 +3248,7 @@ void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id
// set up the target storage
builder.clearAccessChain();
builder.setAccessChainLValue(lValue);
builder.accessChainPush(builder.makeIntConstant(m));
builder.accessChainPush(builder.makeIntConstant(m), TranslateCoherent(type));
// store the member
multiTypeStore(glslangMemberType, memberRValue);
@ -3287,11 +3425,11 @@ bool TGlslangToSpvTraverser::originalParam(glslang::TStorageQualifier qualifier,
// Make all the functions, skeletally, without actually visiting their bodies.
void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions)
{
const auto getParamDecorations = [](std::vector<spv::Decoration>& decorations, const glslang::TType& type) {
const auto getParamDecorations = [](std::vector<spv::Decoration>& decorations, const glslang::TType& type, bool useVulkanMemoryModel) {
spv::Decoration paramPrecision = TranslatePrecisionDecoration(type);
if (paramPrecision != spv::NoPrecision)
decorations.push_back(paramPrecision);
TranslateMemoryDecoration(type.getQualifier(), decorations);
TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel);
};
for (int f = 0; f < (int)glslFunctions.size(); ++f) {
@ -3330,7 +3468,7 @@ void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslF
typeId = builder.makePointer(spv::StorageClassFunction, typeId);
else
rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId());
getParamDecorations(paramDecorations[p], paramType);
getParamDecorations(paramDecorations[p], paramType, glslangIntermediate->usingVulkanMemoryModel());
paramTypes.push_back(typeId);
}
@ -3420,6 +3558,8 @@ void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate&
case glslang::EOpImageAtomicXor:
case glslang::EOpImageAtomicExchange:
case glslang::EOpImageAtomicCompSwap:
case glslang::EOpImageAtomicLoad:
case glslang::EOpImageAtomicStore:
if (i == 0)
lvalue = true;
break;
@ -3547,8 +3687,10 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
builder.setLine(node->getLoc().line);
// Process a GLSL texturing op (will be SPV image)
const glslang::TSampler sampler = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType().getSampler()
: node->getAsUnaryNode()->getOperand()->getAsTyped()->getType().getSampler();
const glslang::TType &imageType = node->getAsAggregate() ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType()
: node->getAsUnaryNode()->getOperand()->getAsTyped()->getType();
const glslang::TSampler sampler = imageType.getSampler();
#ifdef AMD_EXTENSIONS
bool f16ShadowCompare = (sampler.shadow && node->getAsAggregate())
? node->getAsAggregate()->getSequence()[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16
@ -3651,22 +3793,38 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
#else
if (node->getOp() == glslang::EOpImageLoad) {
#endif
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
if (sampler.ms) {
spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
operands.push_back(imageOperands);
spv::IdImmediate imageOperand = { true, *opIt };
operands.push_back(imageOperand);
mask = mask | spv::ImageOperandsSampleMask;
}
#ifdef AMD_EXTENSIONS
} else if (cracked.lod) {
if (cracked.lod) {
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
operands.push_back(imageOperands);
spv::IdImmediate imageOperand = { true, *opIt };
operands.push_back(imageOperand);
#endif
mask = mask | spv::ImageOperandsLodMask;
}
#endif
mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
if (mask) {
spv::IdImmediate imageOperands = { false, (unsigned int)mask };
operands.push_back(imageOperands);
}
if (mask & spv::ImageOperandsSampleMask) {
spv::IdImmediate imageOperand = { true, *opIt++ };
operands.push_back(imageOperand);
}
#ifdef AMD_EXTENSIONS
if (mask & spv::ImageOperandsLodMask) {
spv::IdImmediate imageOperand = { true, *opIt++ };
operands.push_back(imageOperand);
}
#endif
if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
operands.push_back(imageOperand);
}
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
@ -3683,29 +3841,52 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
#else
} else if (node->getOp() == glslang::EOpImageStore) {
#endif
if (sampler.ms) {
spv::IdImmediate texel = { true, *(opIt + 1) };
operands.push_back(texel);
spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
operands.push_back(imageOperands);
spv::IdImmediate imageOperand = { true, *opIt };
operands.push_back(imageOperand);
#ifdef AMD_EXTENSIONS
} else if (cracked.lod) {
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
// Push the texel value before the operands
#ifdef AMD_EXTENSIONS
if (sampler.ms || cracked.lod) {
#else
if (sampler.ms) {
#endif
spv::IdImmediate texel = { true, *(opIt + 1) };
operands.push_back(texel);
spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
operands.push_back(imageOperands);
spv::IdImmediate imageOperand = { true, *opIt };
operands.push_back(imageOperand);
#endif
} else {
spv::IdImmediate texel = { true, *opIt };
operands.push_back(texel);
}
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
if (sampler.ms) {
mask = mask | spv::ImageOperandsSampleMask;
}
#ifdef AMD_EXTENSIONS
if (cracked.lod) {
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
mask = mask | spv::ImageOperandsLodMask;
}
#endif
mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelVisibleKHRMask);
if (mask) {
spv::IdImmediate imageOperands = { false, (unsigned int)mask };
operands.push_back(imageOperands);
}
if (mask & spv::ImageOperandsSampleMask) {
spv::IdImmediate imageOperand = { true, *opIt++ };
operands.push_back(imageOperand);
}
#ifdef AMD_EXTENSIONS
if (mask & spv::ImageOperandsLodMask) {
spv::IdImmediate imageOperand = { true, *opIt++ };
operands.push_back(imageOperand);
}
#endif
if (mask & spv::ImageOperandsMakeTexelAvailableKHRMask) {
spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
operands.push_back(imageOperand);
}
builder.createNoResultOp(spv::OpImageWrite, operands);
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat);
@ -3719,21 +3900,37 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
if (sampler.ms) {
spv::IdImmediate imageOperands = { false, spv::ImageOperandsSampleMask };
operands.push_back(imageOperands);
spv::IdImmediate imageOperand = { true, *opIt++ };
operands.push_back(imageOperand);
mask = mask | spv::ImageOperandsSampleMask;
}
#ifdef AMD_EXTENSIONS
} else if (cracked.lod) {
if (cracked.lod) {
builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
spv::IdImmediate imageOperands = { false, spv::ImageOperandsLodMask };
mask = mask | spv::ImageOperandsLodMask;
}
#endif
mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
if (mask) {
spv::IdImmediate imageOperands = { false, (unsigned int)mask };
operands.push_back(imageOperands);
}
if (mask & spv::ImageOperandsSampleMask) {
spv::IdImmediate imageOperand = { true, *opIt++ };
operands.push_back(imageOperand);
}
#ifdef AMD_EXTENSIONS
if (mask & spv::ImageOperandsLodMask) {
spv::IdImmediate imageOperand = { true, *opIt++ };
operands.push_back(imageOperand);
}
#endif
if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
operands.push_back(imageOperand);
}
// Create the return type that was a special structure
@ -3756,7 +3953,14 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
spv::IdImmediate sample = { true, sampler.ms ? *(opIt++) : builder.makeUintConstant(0) };
operands.push_back(sample);
spv::Id resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
spv::Id resultTypeId;
// imageAtomicStore has a void return type so base the pointer type on
// the type of the value operand.
if (node->getOp() == glslang::EOpImageAtomicStore) {
resultTypeId = builder.makePointer(spv::StorageClassImage, builder.getTypeId(operands[2].word));
} else {
resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
}
spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands);
std::vector<spv::Id> operands;
@ -3972,6 +4176,16 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
}
}
// nonprivate
if (imageType.getQualifier().nonprivate) {
params.nonprivate = true;
}
// volatile
if (imageType.getQualifier().volatil) {
params.volatil = true;
}
std::vector<spv::Id> result( 1,
builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather, noImplicitLod, params)
);
@ -5340,8 +5554,14 @@ spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv
opCode = spv::OpAtomicIDecrement;
break;
case glslang::EOpAtomicCounter:
case glslang::EOpImageAtomicLoad:
case glslang::EOpAtomicLoad:
opCode = spv::OpAtomicLoad;
break;
case glslang::EOpAtomicStore:
case glslang::EOpImageAtomicStore:
opCode = spv::OpAtomicStore;
break;
default:
assert(0);
break;
@ -5352,36 +5572,82 @@ spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv
// Sort out the operands
// - mapping from glslang -> SPV
// - there are extra SPV operands with no glslang source
// - there are extra SPV operands that are optional in glslang
// - compare-exchange swaps the value and comparator
// - compare-exchange has an extra memory semantics
// - EOpAtomicCounterDecrement needs a post decrement
std::vector<spv::Id> spvAtomicOperands; // hold the spv operands
auto opIt = operands.begin(); // walk the glslang operands
spvAtomicOperands.push_back(*(opIt++));
spvAtomicOperands.push_back(builder.makeUintConstant(spv::ScopeDevice)); // TBD: what is the correct scope?
spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone)); // TBD: what are the correct memory semantics?
if (opCode == spv::OpAtomicCompareExchange) {
// There are 2 memory semantics for compare-exchange. And the operand order of "comparator" and "new value" in GLSL
// differs from that in SPIR-V. Hence, special processing is required.
spvAtomicOperands.push_back(builder.makeUintConstant(spv::MemorySemanticsMaskNone));
spvAtomicOperands.push_back(*(opIt + 1));
spvAtomicOperands.push_back(*opIt);
opIt += 2;
spv::Id pointerId = 0, compareId = 0, valueId = 0;
// scope defaults to Device in the old model, QueueFamilyKHR in the new model
spv::Id scopeId;
if (glslangIntermediate->usingVulkanMemoryModel()) {
scopeId = builder.makeUintConstant(spv::ScopeQueueFamilyKHR);
} else {
scopeId = builder.makeUintConstant(spv::ScopeDevice);
}
// semantics default to relaxed
spv::Id semanticsId = builder.makeUintConstant(spv::MemorySemanticsMaskNone);
spv::Id semanticsId2 = semanticsId;
pointerId = operands[0];
if (opCode == spv::OpAtomicIIncrement || opCode == spv::OpAtomicIDecrement) {
// no additional operands
} else if (opCode == spv::OpAtomicCompareExchange) {
compareId = operands[1];
valueId = operands[2];
if (operands.size() > 3) {
scopeId = operands[3];
semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[4]) | builder.getConstantScalar(operands[5]));
semanticsId2 = builder.makeUintConstant(builder.getConstantScalar(operands[6]) | builder.getConstantScalar(operands[7]));
}
} else if (opCode == spv::OpAtomicLoad) {
if (operands.size() > 1) {
scopeId = operands[1];
semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]));
}
} else {
// atomic store or RMW
valueId = operands[1];
if (operands.size() > 2) {
scopeId = operands[2];
semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[3]) | builder.getConstantScalar(operands[4]));
}
}
// Add the rest of the operands, skipping any that were dealt with above.
for (; opIt != operands.end(); ++opIt)
spvAtomicOperands.push_back(*opIt);
// Check for capabilities
unsigned semanticsImmediate = builder.getConstantScalar(semanticsId) | builder.getConstantScalar(semanticsId2);
if (semanticsImmediate & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
}
spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
if (glslangIntermediate->usingVulkanMemoryModel() && builder.getConstantScalar(scopeId) == spv::ScopeDevice) {
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
}
// GLSL and HLSL atomic-counter decrement return post-decrement value,
// while SPIR-V returns pre-decrement value. Translate between these semantics.
if (op == glslang::EOpAtomicCounterDecrement)
resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
std::vector<spv::Id> spvAtomicOperands; // hold the spv operands
spvAtomicOperands.push_back(pointerId);
spvAtomicOperands.push_back(scopeId);
spvAtomicOperands.push_back(semanticsId);
if (opCode == spv::OpAtomicCompareExchange) {
spvAtomicOperands.push_back(semanticsId2);
spvAtomicOperands.push_back(valueId);
spvAtomicOperands.push_back(compareId);
} else if (opCode != spv::OpAtomicLoad && opCode != spv::OpAtomicIIncrement && opCode != spv::OpAtomicIDecrement) {
spvAtomicOperands.push_back(valueId);
}
return resultId;
if (opCode == spv::OpAtomicStore) {
builder.createNoResultOp(opCode, spvAtomicOperands);
return 0;
} else {
spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
// GLSL and HLSL atomic-counter decrement return post-decrement value,
// while SPIR-V returns pre-decrement value. Translate between these semantics.
if (op == glslang::EOpAtomicCounterDecrement)
resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
return resultId;
}
}
// Create group invocation operations.
@ -6282,7 +6548,41 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
libCall = spv::InterpolateAtVertexAMD;
break;
#endif
case glslang::EOpBarrier:
{
// This is for the extended controlBarrier function, with four operands.
// The unextended barrier() goes through createNoArgOperation.
assert(operands.size() == 4);
unsigned int executionScope = builder.getConstantScalar(operands[0]);
unsigned int memoryScope = builder.getConstantScalar(operands[1]);
unsigned int semantics = builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]);
builder.createControlBarrier((spv::Scope)executionScope, (spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
}
if (glslangIntermediate->usingVulkanMemoryModel() && (executionScope == spv::ScopeDevice || memoryScope == spv::ScopeDevice)) {
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
}
return 0;
}
break;
case glslang::EOpMemoryBarrier:
{
// This is for the extended memoryBarrier function, with three operands.
// The unextended memoryBarrier() goes through createNoArgOperation.
assert(operands.size() == 3);
unsigned int memoryScope = builder.getConstantScalar(operands[0]);
unsigned int semantics = builder.getConstantScalar(operands[1]) | builder.getConstantScalar(operands[2]);
builder.createMemoryBarrier((spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
}
if (glslangIntermediate->usingVulkanMemoryModel() && memoryScope == spv::ScopeDevice) {
builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
}
return 0;
}
break;
default:
return 0;
}
@ -6351,7 +6651,8 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
// Intrinsics with no arguments (or no return value, and no precision).
spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId)
{
// TODO: get the barrier operands correct
// GLSL memory barriers use queuefamily scope in new model, device scope in old model
spv::Scope memoryBarrierScope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
switch (op) {
case glslang::EOpEmitVertex:
@ -6362,11 +6663,14 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv:
return 0;
case glslang::EOpBarrier:
if (glslangIntermediate->getStage() == EShLangTessControl) {
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
// TODO: prefer the following, when available:
// builder.createControlBarrier(spv::ScopePatch, spv::ScopePatch,
// spv::MemorySemanticsPatchMask |
// spv::MemorySemanticsAcquireReleaseMask);
if (glslangIntermediate->usingVulkanMemoryModel()) {
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
spv::MemorySemanticsOutputMemoryKHRMask |
spv::MemorySemanticsAcquireReleaseMask);
builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
} else {
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
}
} else {
builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
spv::MemorySemanticsWorkgroupMemoryMask |
@ -6374,24 +6678,24 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv:
}
return 0;
case glslang::EOpMemoryBarrier:
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAllMemory |
spv::MemorySemanticsAcquireReleaseMask);
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAllMemory |
spv::MemorySemanticsAcquireReleaseMask);
return 0;
case glslang::EOpMemoryBarrierAtomicCounter:
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsAtomicCounterMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAtomicCounterMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
return 0;
case glslang::EOpMemoryBarrierBuffer:
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsUniformMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
return 0;
case glslang::EOpMemoryBarrierImage:
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsImageMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsImageMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
return 0;
case glslang::EOpMemoryBarrierShared:
builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsWorkgroupMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsWorkgroupMemoryMask |
spv::MemorySemanticsAcquireReleaseMask);
return 0;
case glslang::EOpGroupMemoryBarrier:
builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsAllMemory |
@ -6520,7 +6824,7 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
if (symbol->getType().isImage()) {
std::vector<spv::Decoration> memory;
TranslateMemoryDecoration(symbol->getType().getQualifier(), memory);
TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, glslangIntermediate->usingVulkanMemoryModel());
for (unsigned int i = 0; i < memory.size(); ++i)
builder.addDecoration(id, memory[i]);
}

View file

@ -1216,19 +1216,35 @@ Id Builder::createUndefined(Id type)
}
// Comments in header
void Builder::createStore(Id rValue, Id lValue)
void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
{
Instruction* store = new Instruction(OpStore);
store->addIdOperand(lValue);
store->addIdOperand(rValue);
if (memoryAccess != MemoryAccessMaskNone) {
store->addImmediateOperand(memoryAccess);
if (memoryAccess & spv::MemoryAccessMakePointerAvailableKHRMask) {
store->addIdOperand(makeUintConstant(scope));
}
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(store));
}
// Comments in header
Id Builder::createLoad(Id lValue)
Id Builder::createLoad(Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
{
Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad);
load->addIdOperand(lValue);
if (memoryAccess != MemoryAccessMaskNone) {
load->addImmediateOperand(memoryAccess);
if (memoryAccess & spv::MemoryAccessMakePointerVisibleKHRMask) {
load->addIdOperand(makeUintConstant(scope));
}
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(load));
return load->getResultId();
@ -1361,6 +1377,16 @@ void Builder::createNoResultOp(Op opCode, Id operand)
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
// An opcode that has multiple operands, no result id, and no type
void Builder::createNoResultOp(Op opCode, const std::vector<Id>& operands)
{
Instruction* op = new Instruction(opCode);
for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
op->addIdOperand(*it);
}
buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
}
// An opcode that has multiple operands, no result id, and no type
void Builder::createNoResultOp(Op opCode, const std::vector<IdImmediate>& operands)
{
@ -1679,6 +1705,12 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask);
texArgs[numArgs++] = parameters.lodClamp;
}
if (parameters.nonprivate) {
mask = mask | ImageOperandsNonPrivateTexelKHRMask;
}
if (parameters.volatil) {
mask = mask | ImageOperandsVolatileTexelKHRMask;
}
if (mask == ImageOperandsMaskNone)
--numArgs; // undo speculative reservation for the mask argument
else
@ -2352,6 +2384,7 @@ void Builder::clearAccessChain()
accessChain.component = NoResult;
accessChain.preSwizzleBaseType = NoType;
accessChain.isRValue = false;
accessChain.coherentFlags.clear();
}
// Comments in header
@ -2378,7 +2411,7 @@ void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizz
}
// Comments in header
void Builder::accessChainStore(Id rvalue)
void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
{
assert(accessChain.isRValue == false);
@ -2396,11 +2429,11 @@ void Builder::accessChainStore(Id rvalue)
source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle);
}
createStore(source, base);
createStore(source, base, memoryAccess, scope);
}
// Comments in header
Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType)
Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess, spv::Scope scope)
{
Id id;
@ -2444,7 +2477,7 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu
} else {
transferAccessChainSwizzle(true);
// load through the access chain
id = createLoad(collapseAccessChain());
id = createLoad(collapseAccessChain(), memoryAccess, scope);
setPrecision(id, precision);
addDecoration(id, nonUniform);
}

View file

@ -274,10 +274,10 @@ public:
Id createUndefined(Id type);
// Store into an Id and return the l-value
void createStore(Id rValue, Id lValue);
void createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
// Load from an Id and return it
Id createLoad(Id lValue);
Id createLoad(Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
// Create an OpAccessChain instruction
Id createAccessChain(StorageClass, Id base, const std::vector<Id>& offsets);
@ -296,6 +296,7 @@ public:
void createNoResultOp(Op);
void createNoResultOp(Op, Id operand);
void createNoResultOp(Op, const std::vector<Id>& operands);
void createNoResultOp(Op, const std::vector<IdImmediate>& operands);
void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask);
void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics);
@ -365,6 +366,8 @@ public:
Id component;
Id texelOut;
Id lodClamp;
bool nonprivate;
bool volatil;
};
// Select the correct texture operation based on all inputs, and emit the correct instruction
@ -504,6 +507,43 @@ public:
Id component; // a dynamic component index, can coexist with a swizzle, done after the swizzle, NoResult if not present
Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; NoType unless a swizzle or component is present
bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value
// Accumulate whether anything in the chain of structures has coherent decorations.
struct CoherentFlags {
unsigned coherent : 1;
unsigned devicecoherent : 1;
unsigned queuefamilycoherent : 1;
unsigned workgroupcoherent : 1;
unsigned subgroupcoherent : 1;
unsigned nonprivate : 1;
unsigned volatil : 1;
unsigned isImage : 1;
void clear() {
coherent = 0;
devicecoherent = 0;
queuefamilycoherent = 0;
workgroupcoherent = 0;
subgroupcoherent = 0;
nonprivate = 0;
volatil = 0;
isImage = 0;
}
CoherentFlags() { clear(); }
CoherentFlags operator |=(const CoherentFlags &other) {
coherent |= other.coherent;
devicecoherent |= other.devicecoherent;
queuefamilycoherent |= other.queuefamilycoherent;
workgroupcoherent |= other.workgroupcoherent;
subgroupcoherent |= other.subgroupcoherent;
nonprivate |= other.nonprivate;
volatil |= other.volatil;
isImage |= other.isImage;
return *this;
}
};
CoherentFlags coherentFlags;
};
//
@ -533,9 +573,10 @@ public:
}
// push offset onto the end of the chain
void accessChainPush(Id offset)
void accessChainPush(Id offset, AccessChain::CoherentFlags coherentFlags)
{
accessChain.indexChain.push_back(offset);
accessChain.coherentFlags |= coherentFlags;
}
// push new swizzle onto the end of any existing swizzle, merging into a single swizzle
@ -553,10 +594,10 @@ public:
}
// use accessChain and swizzle to store value
void accessChainStore(Id rvalue);
void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
// use accessChain and swizzle to load an r-value
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType);
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax);
// get the direct pointer for an l-value
Id accessChainGetLValue();

View file

@ -535,6 +535,11 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
case OperandLiteralString:
numOperands -= disassembleString();
break;
case OperandMemoryAccess:
outputMask(OperandMemoryAccess, stream[word++]);
--numOperands;
disassembleIds(numOperands);
return;
default:
assert(operandClass >= OperandSource && operandClass < OperandOpcode);

View file

@ -117,9 +117,10 @@ const char* AddressingString(int addr)
const char* MemoryString(int mem)
{
switch (mem) {
case 0: return "Simple";
case 1: return "GLSL450";
case 2: return "OpenCL";
case MemoryModelSimple: return "Simple";
case MemoryModelGLSL450: return "GLSL450";
case MemoryModelOpenCL: return "OpenCL";
case MemoryModelVulkanKHR: return "VulkanKHR";
default: return "Bad";
}
@ -499,19 +500,23 @@ const char* ImageChannelDataTypeString(int type)
}
}
const int ImageOperandsCeiling = 8;
const int ImageOperandsCeiling = 12;
const char* ImageOperandsString(int format)
{
switch (format) {
case 0: return "Bias";
case 1: return "Lod";
case 2: return "Grad";
case 3: return "ConstOffset";
case 4: return "Offset";
case 5: return "ConstOffsets";
case 6: return "Sample";
case 7: return "MinLod";
case ImageOperandsBiasShift: return "Bias";
case ImageOperandsLodShift: return "Lod";
case ImageOperandsGradShift: return "Grad";
case ImageOperandsConstOffsetShift: return "ConstOffset";
case ImageOperandsOffsetShift: return "Offset";
case ImageOperandsConstOffsetsShift: return "ConstOffsets";
case ImageOperandsSampleShift: return "Sample";
case ImageOperandsMinLodShift: return "MinLod";
case ImageOperandsMakeTexelAvailableKHRShift: return "MakeTexelAvailableKHR";
case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR";
case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR";
case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR";
case ImageOperandsCeiling:
default:
@ -645,12 +650,17 @@ const char* MemorySemanticsString(int mem)
}
}
const int MemoryAccessCeiling = 6;
const char* MemoryAccessString(int mem)
{
switch (mem) {
case 0: return "Volatile";
case 1: return "Aligned";
case 2: return "Nontemporal";
case MemoryAccessVolatileShift: return "Volatile";
case MemoryAccessAlignedShift: return "Aligned";
case MemoryAccessNontemporalShift: return "Nontemporal";
case MemoryAccessMakePointerAvailableKHRShift: return "MakePointerAvailableKHR";
case MemoryAccessMakePointerVisibleKHRShift: return "MakePointerVisibleKHR";
case MemoryAccessNonPrivatePointerKHRShift: return "NonPrivatePointerKHR";
default: return "Bad";
}
@ -833,6 +843,9 @@ const char* CapabilityString(int info)
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "CapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "CapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
case CapabilityVulkanMemoryModelKHR: return "CapabilityVulkanMemoryModelKHR";
case CapabilityVulkanMemoryModelDeviceScopeKHR: return "CapabilityVulkanMemoryModelDeviceScopeKHR";
default: return "Bad";
}
}
@ -1245,6 +1258,7 @@ EnumParameters DecorationParams[DecorationCeiling];
EnumParameters LoopControlParams[FunctionControlCeiling];
EnumParameters SelectionControlParams[SelectControlCeiling];
EnumParameters FunctionControlParams[FunctionControlCeiling];
EnumParameters MemoryAccessParams[MemoryAccessCeiling];
// Set up all the parameterizing descriptions of the opcodes, operands, etc.
void Parameterize()
@ -1400,7 +1414,7 @@ void Parameterize()
OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true);
OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true);
OperandClassParams[OperandMemorySemantics].set(0, MemorySemanticsString, nullptr, true);
OperandClassParams[OperandMemoryAccess].set(0, MemoryAccessString, nullptr, true);
OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true);
OperandClassParams[OperandScope].set(0, ScopeString, nullptr);
OperandClassParams[OperandGroupOperation].set(0, GroupOperationString, nullptr);
OperandClassParams[OperandKernelEnqueueFlags].set(0, KernelEnqueueFlagsString, nullptr);
@ -1522,10 +1536,14 @@ void Parameterize()
InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true);
InstructionDesc[OpLoad].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpLoad].operands.push(OperandId, "", true);
InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpStore].operands.push(OperandId, "'Object'");
InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true);
InstructionDesc[OpStore].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpStore].operands.push(OperandId, "", true);
InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'");

View file

@ -87,6 +87,7 @@ enum MemoryModel {
MemoryModelSimple = 0,
MemoryModelGLSL450 = 1,
MemoryModelOpenCL = 2,
MemoryModelVulkanKHR = 3,
MemoryModelMax = 0x7fffffff,
};
@ -275,6 +276,10 @@ enum ImageOperandsShift {
ImageOperandsConstOffsetsShift = 5,
ImageOperandsSampleShift = 6,
ImageOperandsMinLodShift = 7,
ImageOperandsMakeTexelAvailableKHRShift = 8,
ImageOperandsMakeTexelVisibleKHRShift = 9,
ImageOperandsNonPrivateTexelKHRShift = 10,
ImageOperandsVolatileTexelKHRShift = 11,
ImageOperandsMax = 0x7fffffff,
};
@ -288,6 +293,10 @@ enum ImageOperandsMask {
ImageOperandsConstOffsetsMask = 0x00000020,
ImageOperandsSampleMask = 0x00000040,
ImageOperandsMinLodMask = 0x00000080,
ImageOperandsMakeTexelAvailableKHRMask = 0x00000100,
ImageOperandsMakeTexelVisibleKHRMask = 0x00000200,
ImageOperandsNonPrivateTexelKHRMask = 0x00000400,
ImageOperandsVolatileTexelKHRMask = 0x00000800,
};
enum FPFastMathModeShift {
@ -528,6 +537,9 @@ enum MemorySemanticsShift {
MemorySemanticsCrossWorkgroupMemoryShift = 9,
MemorySemanticsAtomicCounterMemoryShift = 10,
MemorySemanticsImageMemoryShift = 11,
MemorySemanticsOutputMemoryKHRShift = 12,
MemorySemanticsMakeAvailableKHRShift = 13,
MemorySemanticsMakeVisibleKHRShift = 14,
MemorySemanticsMax = 0x7fffffff,
};
@ -543,12 +555,18 @@ enum MemorySemanticsMask {
MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
MemorySemanticsImageMemoryMask = 0x00000800,
MemorySemanticsOutputMemoryKHRMask = 0x00001000,
MemorySemanticsMakeAvailableKHRMask = 0x00002000,
MemorySemanticsMakeVisibleKHRMask = 0x00004000,
};
enum MemoryAccessShift {
MemoryAccessVolatileShift = 0,
MemoryAccessAlignedShift = 1,
MemoryAccessNontemporalShift = 2,
MemoryAccessMakePointerAvailableKHRShift = 3,
MemoryAccessMakePointerVisibleKHRShift = 4,
MemoryAccessNonPrivatePointerKHRShift = 5,
MemoryAccessMax = 0x7fffffff,
};
@ -557,6 +575,9 @@ enum MemoryAccessMask {
MemoryAccessVolatileMask = 0x00000001,
MemoryAccessAlignedMask = 0x00000002,
MemoryAccessNontemporalMask = 0x00000004,
MemoryAccessMakePointerAvailableKHRMask = 0x00000008,
MemoryAccessMakePointerVisibleKHRMask = 0x00000010,
MemoryAccessNonPrivatePointerKHRMask = 0x00000020,
};
enum Scope {
@ -565,6 +586,7 @@ enum Scope {
ScopeWorkgroup = 2,
ScopeSubgroup = 3,
ScopeInvocation = 4,
ScopeQueueFamilyKHR = 5,
ScopeMax = 0x7fffffff,
};
@ -708,6 +730,8 @@ enum Capability {
CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310,
CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311,
CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312,
CapabilityVulkanMemoryModelKHR = 5345,
CapabilityVulkanMemoryModelDeviceScopeKHR = 5346,
CapabilitySubgroupShuffleINTEL = 5568,
CapabilitySubgroupBufferBlockIOINTEL = 5569,
CapabilitySubgroupImageBlockIOINTEL = 5570,