Reserve unused std140 uniform block in reflection, and fix uniform block matrix layout (#2041)
According to the spec glsl4.60.7: 4.4.5. Uniform and Shader Storage Block Layout Qualifiers: "The packed qualifier overrides only std140, std430, and shared; other qualifiers are inherited. When packed is used, no shareable layout is guaranteed. The compiler and linker can optimize memory use based on what variables actively get used and on other criteria. Offsets must be queried, as there is no other way of guaranteeing where (and which) variables reside within the block" we should reserve std140 block and shared block in reflection. According to the spec glsl4.60.7: 4.4.5. Uniform and Shader Storage Block Layout Qualifiers: "The row_major and column_major qualifiers only affect the layout of matrices, including all matrices contained in structures and arrays they are applied to, to all depths of nesting. These qualifiers can be applied to other types, but will have no effect." We need ensure all matrix block member been effect. Support EShMsgKeepUncalled in reflection EShMsgKeepUncalled is a link message for link program. We need only one option to control uncalled function optimization. If we set EShMsgKeepUncalled as false in link time, linker won't be keep the uncall function sequence in AST, and if we set EShMsgKeepUncalled as true in link time, linker will keep all uncalled function sequence in AST. So, in reflecte time, we just only travers all function sequence. It make EShMsgKeepUncalled only work at linker, and can effect reflection. Recursively layout packing to "block member" layout packing isn't set recursively, it causes TReflection::getOffsets doesn't work correctly.
This commit is contained in:
parent
c6874320de
commit
24dcbd1b1f
9 changed files with 315 additions and 101 deletions
|
|
@ -7593,6 +7593,8 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
|
|||
fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation);
|
||||
fixXfbOffsets(currentBlockQualifier, typeList);
|
||||
fixBlockUniformOffsets(currentBlockQualifier, typeList);
|
||||
fixBlockUniformLayoutMatrix(currentBlockQualifier, &typeList, nullptr);
|
||||
fixBlockUniformLayoutPacking(currentBlockQualifier, &typeList, nullptr);
|
||||
for (unsigned int member = 0; member < typeList.size(); ++member)
|
||||
layoutTypeCheck(typeList[member].loc, *typeList[member].type);
|
||||
|
||||
|
|
@ -7963,6 +7965,101 @@ void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typ
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Spread LayoutMatrix to uniform block member, if a uniform block member is a struct,
|
||||
// we need spread LayoutMatrix to this struct member too. and keep this rule for recursive.
|
||||
//
|
||||
void TParseContext::fixBlockUniformLayoutMatrix(TQualifier& qualifier, TTypeList* originTypeList,
|
||||
TTypeList* tmpTypeList)
|
||||
{
|
||||
assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
|
||||
for (unsigned int member = 0; member < originTypeList->size(); ++member) {
|
||||
if (qualifier.layoutPacking != ElpNone) {
|
||||
if (tmpTypeList == nullptr) {
|
||||
if (((*originTypeList)[member].type->isMatrix() ||
|
||||
(*originTypeList)[member].type->getBasicType() == EbtStruct) &&
|
||||
(*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
|
||||
(*originTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
|
||||
}
|
||||
} else {
|
||||
if (((*tmpTypeList)[member].type->isMatrix() ||
|
||||
(*tmpTypeList)[member].type->getBasicType() == EbtStruct) &&
|
||||
(*tmpTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
|
||||
(*tmpTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
|
||||
TQualifier* memberQualifier = nullptr;
|
||||
// block member can be declare a matrix style, so it should be update to the member's style
|
||||
if ((*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
|
||||
memberQualifier = &qualifier;
|
||||
} else {
|
||||
memberQualifier = &((*originTypeList)[member].type->getQualifier());
|
||||
}
|
||||
|
||||
const TType* tmpType = tmpTypeList == nullptr ?
|
||||
(*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
|
||||
|
||||
fixBlockUniformLayoutMatrix(*memberQualifier, (*originTypeList)[member].type->getWritableStruct(),
|
||||
tmpType->getWritableStruct());
|
||||
|
||||
const TTypeList* structure = recordStructCopy(matrixFixRecord, (*originTypeList)[member].type, tmpType);
|
||||
|
||||
if (tmpTypeList == nullptr) {
|
||||
(*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
||||
}
|
||||
if (tmpTypeList != nullptr) {
|
||||
(*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Spread LayoutPacking to block member, if a block member is a struct, we need spread LayoutPacking to
|
||||
// this struct member too. and keep this rule for recursive.
|
||||
//
|
||||
void TParseContext::fixBlockUniformLayoutPacking(TQualifier& qualifier, TTypeList* originTypeList,
|
||||
TTypeList* tmpTypeList)
|
||||
{
|
||||
assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
|
||||
for (unsigned int member = 0; member < originTypeList->size(); ++member) {
|
||||
if (qualifier.layoutPacking != ElpNone) {
|
||||
if (tmpTypeList == nullptr) {
|
||||
if ((*originTypeList)[member].type->getQualifier().layoutPacking == ElpNone) {
|
||||
(*originTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
|
||||
}
|
||||
} else {
|
||||
if ((*tmpTypeList)[member].type->getQualifier().layoutPacking == ElpNone) {
|
||||
(*tmpTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
|
||||
// Deep copy the type in pool.
|
||||
// Because, struct use in different block may have different layout qualifier.
|
||||
// We have to new a object to distinguish between them.
|
||||
const TType* tmpType = tmpTypeList == nullptr ?
|
||||
(*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
|
||||
|
||||
fixBlockUniformLayoutPacking(qualifier, (*originTypeList)[member].type->getWritableStruct(),
|
||||
tmpType->getWritableStruct());
|
||||
|
||||
const TTypeList* structure = recordStructCopy(packingFixRecord, (*originTypeList)[member].type, tmpType);
|
||||
|
||||
if (tmpTypeList == nullptr) {
|
||||
(*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
||||
}
|
||||
if (tmpTypeList != nullptr) {
|
||||
(*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For an identifier that is already declared, add more qualification to it.
|
||||
void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier)
|
||||
{
|
||||
|
|
@ -8421,5 +8518,43 @@ TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expre
|
|||
return switchNode;
|
||||
}
|
||||
|
||||
//
|
||||
// When a struct used in block, and has it's own layout packing, layout matrix,
|
||||
// record the origin structure of a struct to map, and Record the structure copy to the copy table,
|
||||
//
|
||||
const TTypeList* TParseContext::recordStructCopy(TStructRecord& record, const TType* originType, const TType* tmpType)
|
||||
{
|
||||
size_t memberCount = tmpType->getStruct()->size();
|
||||
size_t originHash = 0, tmpHash = 0;
|
||||
std::hash<size_t> hasher;
|
||||
for (uint32_t i = 0; i < memberCount; i++) {
|
||||
size_t originMemberHash = hasher(originType->getStruct()->at(i).type->getQualifier().layoutPacking +
|
||||
originType->getStruct()->at(i).type->getQualifier().layoutMatrix);
|
||||
size_t tmpMemberHash = hasher(tmpType->getStruct()->at(i).type->getQualifier().layoutPacking +
|
||||
tmpType->getStruct()->at(i).type->getQualifier().layoutMatrix);
|
||||
originHash = hasher((originHash ^ originMemberHash) << 1);
|
||||
tmpHash = hasher((tmpHash ^ tmpMemberHash) << 1);
|
||||
}
|
||||
const TTypeList* originStruct = originType->getStruct();
|
||||
const TTypeList* tmpStruct = tmpType->getStruct();
|
||||
if (originHash != tmpHash) {
|
||||
auto fixRecords = record.find(originStruct);
|
||||
if (fixRecords != record.end()) {
|
||||
auto fixRecord = fixRecords->second.find(tmpHash);
|
||||
if (fixRecord != fixRecords->second.end()) {
|
||||
return fixRecord->second;
|
||||
} else {
|
||||
record[originStruct][tmpHash] = tmpStruct;
|
||||
return tmpStruct;
|
||||
}
|
||||
} else {
|
||||
record[originStruct] = std::map<size_t, const TTypeList*>();
|
||||
record[originStruct][tmpHash] = tmpStruct;
|
||||
return tmpStruct;
|
||||
}
|
||||
}
|
||||
return originStruct;
|
||||
}
|
||||
|
||||
} // end namespace glslang
|
||||
|
||||
|
|
|
|||
|
|
@ -68,6 +68,7 @@ class TScanContext;
|
|||
class TPpContext;
|
||||
|
||||
typedef std::set<int> TIdSetType;
|
||||
typedef std::map<const TTypeList*, std::map<size_t, const TTypeList*>> TStructRecord;
|
||||
|
||||
//
|
||||
// Sharable code (as well as what's in TParseVersions) across
|
||||
|
|
@ -418,12 +419,15 @@ public:
|
|||
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
|
||||
void fixXfbOffsets(TQualifier&, TTypeList&);
|
||||
void fixBlockUniformOffsets(TQualifier&, TTypeList&);
|
||||
void fixBlockUniformLayoutMatrix(TQualifier&, TTypeList*, TTypeList*);
|
||||
void fixBlockUniformLayoutPacking(TQualifier&, TTypeList*, TTypeList*);
|
||||
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
|
||||
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
|
||||
void invariantCheck(const TSourceLoc&, const TQualifier&);
|
||||
void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
|
||||
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
|
||||
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
|
||||
const TTypeList* recordStructCopy(TStructRecord&, const TType*, const TType*);
|
||||
|
||||
#ifndef GLSLANG_WEB
|
||||
TAttributeType attributeFromName(const TString& name) const;
|
||||
|
|
@ -484,6 +488,8 @@ protected:
|
|||
bool anyIndexLimits;
|
||||
TIdSetType inductiveLoopIds;
|
||||
TVector<TIntermTyped*> needsIndexLimitationChecking;
|
||||
TStructRecord matrixFixRecord;
|
||||
TStructRecord packingFixRecord;
|
||||
|
||||
//
|
||||
// Geometry shader input arrays:
|
||||
|
|
|
|||
|
|
@ -77,10 +77,10 @@ namespace glslang {
|
|||
// This is in the glslang namespace directly so it can be a friend of TReflection.
|
||||
//
|
||||
|
||||
class TReflectionTraverser : public TLiveTraverser {
|
||||
class TReflectionTraverser : public TIntermTraverser {
|
||||
public:
|
||||
TReflectionTraverser(const TIntermediate& i, TReflection& r) :
|
||||
TLiveTraverser(i), reflection(r) { }
|
||||
TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { }
|
||||
|
||||
virtual bool visitBinary(TVisit, TIntermBinary* node);
|
||||
virtual void visitSymbol(TIntermSymbol* base);
|
||||
|
|
@ -92,11 +92,37 @@ public:
|
|||
if (processedDerefs.find(&base) == processedDerefs.end()) {
|
||||
processedDerefs.insert(&base);
|
||||
|
||||
uint32_t blockIndex = -1;
|
||||
uint32_t offset = -1;
|
||||
TList<TIntermBinary*> derefs;
|
||||
TString baseName = base.getName();
|
||||
|
||||
if (base.getType().getBasicType() == EbtBlock) {
|
||||
offset = 0;
|
||||
bool anonymous = IsAnonymous(baseName);
|
||||
const TString& blockName = base.getType().getTypeName();
|
||||
|
||||
if (!anonymous)
|
||||
baseName = blockName;
|
||||
else
|
||||
baseName = "";
|
||||
|
||||
if (base.getType().isArray()) {
|
||||
TType derefType(base.getType(), 0);
|
||||
|
||||
assert(!anonymous);
|
||||
for (int e = 0; e < base.getType().getCumulativeArraySize(); ++e)
|
||||
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
|
||||
intermediate.getBlockSize(base.getType()));
|
||||
}
|
||||
else
|
||||
blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
|
||||
}
|
||||
|
||||
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
|
||||
// the dereference change expected by blowUpActiveAggregate.
|
||||
TList<TIntermBinary*> derefs;
|
||||
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
|
||||
base.getQualifier().storage, true);
|
||||
blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, 0,
|
||||
base.getQualifier().storage, updateStageMasks);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -155,9 +181,9 @@ public:
|
|||
void getOffsets(const TType& type, TVector<int>& offsets)
|
||||
{
|
||||
const TTypeList& memberList = *type.getStruct();
|
||||
|
||||
int memberSize = 0;
|
||||
int offset = 0;
|
||||
|
||||
for (size_t m = 0; m < offsets.size(); ++m) {
|
||||
// if the user supplied an offset, snap to it now
|
||||
if (memberList[m].type->getQualifier().hasOffset())
|
||||
|
|
@ -334,7 +360,8 @@ public:
|
|||
|
||||
for (int i = 0; i < arrayIterateSize; ++i) {
|
||||
TString newBaseName = name;
|
||||
newBaseName.append(TString("[") + String(i) + "]");
|
||||
if (terminalType->getBasicType() != EbtBlock)
|
||||
newBaseName.append(TString("[") + String(i) + "]");
|
||||
TType derefType(*terminalType, 0);
|
||||
if (offset >= 0)
|
||||
offset = baseOffset + stride * i;
|
||||
|
|
@ -643,13 +670,17 @@ public:
|
|||
|
||||
blocks.back().numMembers = countAggregateMembers(type);
|
||||
|
||||
EShLanguageMask& stages = blocks.back().stages;
|
||||
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
||||
if (updateStageMasks) {
|
||||
EShLanguageMask& stages = blocks.back().stages;
|
||||
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
||||
}
|
||||
} else {
|
||||
blockIndex = it->second;
|
||||
|
||||
EShLanguageMask& stages = blocks[blockIndex].stages;
|
||||
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
||||
if (updateStageMasks) {
|
||||
EShLanguageMask& stages = blocks[blockIndex].stages;
|
||||
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
||||
}
|
||||
}
|
||||
|
||||
return blockIndex;
|
||||
|
|
@ -995,8 +1026,10 @@ public:
|
|||
return type.isArray() ? type.getOuterArraySize() : 1;
|
||||
}
|
||||
|
||||
const TIntermediate& intermediate;
|
||||
TReflection& reflection;
|
||||
std::set<const TIntermNode*> processedDerefs;
|
||||
bool updateStageMasks;
|
||||
|
||||
protected:
|
||||
TReflectionTraverser(TReflectionTraverser&);
|
||||
|
|
@ -1029,8 +1062,15 @@ bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
|
|||
// To reflect non-dereferenced objects.
|
||||
void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
|
||||
{
|
||||
if (base->getQualifier().storage == EvqUniform)
|
||||
addUniform(*base);
|
||||
if (base->getQualifier().storage == EvqUniform) {
|
||||
if (base->getBasicType() == EbtBlock) {
|
||||
if (reflection.options & EShReflectionSharedStd140Blocks) {
|
||||
addUniform(*base);
|
||||
}
|
||||
} else {
|
||||
addUniform(*base);
|
||||
}
|
||||
}
|
||||
|
||||
if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
|
||||
(intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
|
||||
|
|
@ -1135,15 +1175,39 @@ bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
|
|||
|
||||
TReflectionTraverser it(intermediate, *this);
|
||||
|
||||
// put the entry point on the list of functions to process
|
||||
it.pushFunction(intermediate.getEntryPointMangledName().c_str());
|
||||
|
||||
// process all the functions
|
||||
while (! it.functions.empty()) {
|
||||
TIntermNode* function = it.functions.back();
|
||||
it.functions.pop_back();
|
||||
function->traverse(&it);
|
||||
for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) {
|
||||
if (sequnence->getAsAggregate() != nullptr) {
|
||||
if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) {
|
||||
it.updateStageMasks = false;
|
||||
TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
|
||||
for (auto& sequnence : linkerObjects->getSequence()) {
|
||||
auto pNode = sequnence->getAsSymbolNode();
|
||||
if (pNode != nullptr && pNode->getQualifier().storage == EvqUniform &&
|
||||
(options & EShReflectionSharedStd140Blocks)) {
|
||||
if (pNode->getBasicType() == EbtBlock) {
|
||||
// collect std140 and shared uniform block form AST
|
||||
if (pNode->getQualifier().layoutPacking == ElpStd140 ||
|
||||
pNode->getQualifier().layoutPacking == ElpShared) {
|
||||
pNode->traverse(&it);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This traverser will travers all function in AST.
|
||||
// If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled.
|
||||
// When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function.
|
||||
// This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function.
|
||||
//
|
||||
// If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false.
|
||||
// When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST.
|
||||
// So, travers all function node can equivalent to travers live function.
|
||||
it.updateStageMasks = true;
|
||||
sequnence->getAsAggregate()->traverse(&it);
|
||||
}
|
||||
}
|
||||
}
|
||||
it.updateStageMasks = true;
|
||||
|
||||
buildCounterIndices(intermediate);
|
||||
buildUniformStageMask(intermediate);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue