summaryrefslogtreecommitdiffstatshomepage
path: root/3rdparty/bgfx/3rdparty/glslang/hlsl/hlslParseHelper.cpp
diff options
context:
space:
mode:
Diffstat (limited to '3rdparty/bgfx/3rdparty/glslang/hlsl/hlslParseHelper.cpp')
-rw-r--r--3rdparty/bgfx/3rdparty/glslang/hlsl/hlslParseHelper.cpp2180
1 files changed, 1662 insertions, 518 deletions
diff --git a/3rdparty/bgfx/3rdparty/glslang/hlsl/hlslParseHelper.cpp b/3rdparty/bgfx/3rdparty/glslang/hlsl/hlslParseHelper.cpp
index fca8452ab04..48b94b62c4f 100644
--- a/3rdparty/bgfx/3rdparty/glslang/hlsl/hlslParseHelper.cpp
+++ b/3rdparty/bgfx/3rdparty/glslang/hlsl/hlslParseHelper.cpp
@@ -48,6 +48,7 @@
#include <functional>
#include <cctype>
#include <array>
+#include <set>
namespace glslang {
@@ -58,14 +59,14 @@ HlslParseContext::HlslParseContext(TSymbolTable& symbolTable, TIntermediate& int
TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
contextPragma(true, false),
loopNestingLevel(0), annotationNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
- inEntryPoint(false),
postEntryPointReturn(false),
limits(resources.limits),
- entryPointOutput(nullptr),
builtInIoIndex(nullptr),
builtInIoBase(nullptr),
nextInLocation(0), nextOutLocation(0),
- sourceEntryPointName(sourceEntryPointName)
+ sourceEntryPointName(sourceEntryPointName),
+ entryPointFunction(nullptr),
+ entryPointFunctionBody(nullptr)
{
globalUniformDefaults.clear();
globalUniformDefaults.layoutMatrix = ElmRowMajor;
@@ -161,6 +162,18 @@ bool HlslParseContext::shouldConvertLValue(const TIntermNode* node) const
return false;
}
+void HlslParseContext::growGlobalUniformBlock(TSourceLoc& loc, TType& memberType, TString& memberName, TTypeList* newTypeList)
+{
+ newTypeList = nullptr;
+ correctUniform(memberType.getQualifier());
+ if (memberType.isStruct()) {
+ auto it = ioTypeMap.find(memberType.getStruct());
+ if (it != ioTypeMap.end() && it->second.uniform)
+ newTypeList = it->second.uniform;
+ }
+ TParseContextBase::growGlobalUniformBlock(loc, memberType, memberName, newTypeList);
+}
+
//
// Return a TLayoutFormat corresponding to the given texture type.
//
@@ -530,7 +543,7 @@ bool HlslParseContext::parseMatrixSwizzleSelector(const TSourceLoc& loc, const T
error(loc, "matrix component swizzle missing", compString.c_str(), "");
return false;
}
- startPos[numComps++] = c + 1;
+ startPos[numComps++] = (int)c + 1;
}
}
@@ -588,10 +601,10 @@ int HlslParseContext::getMatrixComponentsColumn(int rows, const TSwizzleSelector
//
// Handle seeing a variable identifier in the grammar.
//
-TIntermTyped* HlslParseContext::handleVariable(const TSourceLoc& loc, TSymbol* symbol, const TString* string)
+TIntermTyped* HlslParseContext::handleVariable(const TSourceLoc& loc, const TString* string)
{
- if (symbol == nullptr)
- symbol = symbolTable.find(*string);
+ int thisDepth;
+ TSymbol* symbol = symbolTable.find(*string, thisDepth);
if (symbol && symbol->getAsVariable() && symbol->getAsVariable()->isUserType()) {
error(loc, "expected symbol, not user-defined type", string->c_str(), "");
return nullptr;
@@ -601,14 +614,21 @@ TIntermTyped* HlslParseContext::handleVariable(const TSourceLoc& loc, TSymbol* s
if (symbol && symbol->getNumExtensions())
requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str());
- const TVariable* variable;
+ const TVariable* variable = nullptr;
const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr;
TIntermTyped* node = nullptr;
if (anon) {
- // It was a member of an anonymous container.
+ // It was a member of an anonymous container, which could be a 'this' structure.
// Create a subtree for its dereference.
- variable = anon->getAnonContainer().getAsVariable();
+ if (thisDepth > 0) {
+ variable = getImplicitThis(thisDepth);
+ if (variable == nullptr)
+ error(loc, "cannot access member variables (static member function?)", "this", "");
+ }
+ if (variable == nullptr)
+ variable = anon->getAnonContainer().getAsVariable();
+
TIntermTyped* container = intermediate.addSymbol(*variable, loc);
TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc);
node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc);
@@ -678,6 +698,22 @@ TIntermTyped* HlslParseContext::handleBracketOperator(const TSourceLoc& loc, TIn
}
}
+ // Handle operator[] on structured buffers: this indexes into the array element of the buffer.
+ // indexStructBufferContent returns nullptr if it isn't a structuredbuffer (SSBO).
+ TIntermTyped* sbArray = indexStructBufferContent(loc, base);
+ if (sbArray != nullptr) {
+ if (sbArray == nullptr)
+ return nullptr;
+
+ // Now we'll apply the [] index to that array
+ const TOperator idxOp = (index->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
+
+ TIntermTyped* element = intermediate.addIndex(idxOp, sbArray, index, loc);
+ const TType derefType(sbArray->getType(), 0);
+ element->setType(derefType);
+ return element;
+ }
+
return nullptr;
}
@@ -709,7 +745,7 @@ TIntermTyped* HlslParseContext::handleBracketDereference(const TSourceLoc& loc,
else {
// at least one of base and index is variable...
- if (base->getAsSymbolNode() && (wasFlattened(base) || shouldFlatten(base->getType()))) {
+ if (base->getAsSymbolNode() && (wasFlattened(base) || shouldFlattenUniform(base->getType()))) {
if (index->getQualifier().storage != EvqConst)
error(loc, "Invalid variable index to flattened array", base->getAsSymbolNode()->getName().c_str(), "");
@@ -776,68 +812,45 @@ TIntermTyped* HlslParseContext::handleUnaryMath(const TSourceLoc& loc, const cha
return childNode;
}
+//
+// Return true if the name is a struct buffer method
+//
+bool HlslParseContext::isStructBufferMethod(const TString& name) const
+{
+ return
+ name == "GetDimensions" ||
+ name == "Load" ||
+ name == "Load2" ||
+ name == "Load3" ||
+ name == "Load4" ||
+ name == "Store" ||
+ name == "Store2" ||
+ name == "Store3" ||
+ name == "Store4" ||
+ name == "InterlockedAdd" ||
+ name == "InterlockedAnd" ||
+ name == "InterlockedCompareExchange" ||
+ name == "InterlockedCompareStore" ||
+ name == "InterlockedExchange" ||
+ name == "InterlockedMax" ||
+ name == "InterlockedMin" ||
+ name == "InterlockedOr" ||
+ name == "InterlockedXor";
+}
//
-// Handle seeing a base.field dereference in the grammar.
+// Handle seeing a base.field dereference in the grammar, where 'field' is a
+// swizzle or member variable.
//
TIntermTyped* HlslParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field)
{
variableCheck(base);
- //
- // methods can't be resolved until we later see the function-calling syntax.
- // Save away the name in the AST for now. Processing is completed in
- // handleLengthMethod(), etc.
- //
- if (field == "length") {
- return intermediate.addMethod(base, TType(EbtInt), &field, loc);
- } else if (field == "CalculateLevelOfDetail" ||
- field == "CalculateLevelOfDetailUnclamped" ||
- field == "Gather" ||
- field == "GatherRed" ||
- field == "GatherGreen" ||
- field == "GatherBlue" ||
- field == "GatherAlpha" ||
- field == "GatherCmp" ||
- field == "GatherCmpRed" ||
- field == "GatherCmpGreen" ||
- field == "GatherCmpBlue" ||
- field == "GatherCmpAlpha" ||
- field == "GetDimensions" ||
- field == "GetSamplePosition" ||
- field == "Load" ||
- field == "Sample" ||
- field == "SampleBias" ||
- field == "SampleCmp" ||
- field == "SampleCmpLevelZero" ||
- field == "SampleGrad" ||
- field == "SampleLevel") {
- // If it's not a method on a sampler object, we fall through in case it is a struct member.
- if (base->getType().getBasicType() == EbtSampler) {
- const TSampler& sampler = base->getType().getSampler();
- if (! sampler.isPureSampler()) {
- const int vecSize = sampler.isShadow() ? 1 : 4; // TODO: handle arbitrary sample return sizes
- return intermediate.addMethod(base, TType(sampler.type, EvqTemporary, vecSize), &field, loc);
- }
- }
- } else if (field == "Append" ||
- field == "RestartStrip") {
- // We cannot check the type here: it may be sanitized if we're not compiling a geometry shader, but
- // the code is around in the shader source.
- return intermediate.addMethod(base, TType(EbtVoid), &field, loc);
- }
-
- // It's not .length() if we get to here.
-
if (base->isArray()) {
error(loc, "cannot apply to an array:", ".", field.c_str());
-
return base;
}
- // It's neither an array nor .length() if we get here,
- // leaving swizzles and struct/block dereferences.
-
TIntermTyped* result = base;
if (base->isVector() || base->isScalar()) {
TSwizzleSelectors<TVectorSelector> selectors;
@@ -921,7 +934,7 @@ TIntermTyped* HlslParseContext::handleDotDereference(const TSourceLoc& loc, TInt
}
}
if (fieldFound) {
- if (base->getAsSymbolNode() && (wasFlattened(base) || shouldFlatten(base->getType()))) {
+ if (base->getAsSymbolNode() && (wasFlattened(base) || shouldFlattenUniform(base->getType()))) {
result = flattenAccess(base, member);
} else {
// Update the base and member to access if this was a split structure.
@@ -946,17 +959,28 @@ TIntermTyped* HlslParseContext::handleDotDereference(const TSourceLoc& loc, TInt
return result;
}
-// Determine whether we should split this type
-bool HlslParseContext::shouldSplit(const TType& type)
+//
+// Return true if the field should be treated as a built-in method.
+// Return false otherwise.
+//
+bool HlslParseContext::isBuiltInMethod(const TSourceLoc&, TIntermTyped* base, const TString& field)
{
- if (! inEntryPoint)
+ if (base == nullptr)
return false;
- const TStorageQualifier qualifier = type.getQualifier().storage;
+ variableCheck(base);
- // If it contains interstage IO, but not ONLY interstage IO, split the struct.
- return type.isStruct() && type.containsBuiltInInterstageIO(language) &&
- (qualifier == EvqVaryingIn || qualifier == EvqVaryingOut);
+ if (base->getType().getBasicType() == EbtSampler) {
+ return true;
+ } else if (isStructBufferType(base->getType()) && isStructBufferMethod(field)) {
+ return true;
+ } else if (field == "Append" ||
+ field == "RestartStrip") {
+ // We cannot check the type here: it may be sanitized if we're not compiling a geometry shader, but
+ // the code is around in the shader source.
+ return true;
+ } else
+ return false;
}
// Split the type of the given node into two structs:
@@ -984,7 +1008,7 @@ void HlslParseContext::split(const TVariable& variable)
{
const TType& type = variable.getType();
- TString name = (&variable == entryPointOutput) ? "" : variable.getName();
+ TString name = variable.getName();
// Create a new variable:
TType& splitType = split(*type.clone(), name);
@@ -1010,7 +1034,7 @@ TType& HlslParseContext::split(TType& type, TString name, const TType* outerStru
if (type.isStruct()) {
TTypeList* userStructure = type.getWritableStruct();
- // Get iterator to (now at end) set of builtin iterstage IO members
+ // Get iterator to (now at end) set of builtin interstage IO members
const auto firstIo = std::stable_partition(userStructure->begin(), userStructure->end(),
[this](const TTypeLoc& t) {return !t.type->isBuiltInInterstageIO(language);});
@@ -1042,35 +1066,13 @@ TType& HlslParseContext::split(TType& type, TString name, const TType* outerStru
return type;
}
-// Determine whether we should flatten an arbitrary type.
-bool HlslParseContext::shouldFlatten(const TType& type) const
-{
- return shouldFlattenIO(type) || shouldFlattenUniform(type);
-}
-
-// Is this an IO variable that can't be passed down the stack?
-// E.g., pipeline inputs to the vertex stage and outputs from the fragment stage.
-bool HlslParseContext::shouldFlattenIO(const TType& type) const
-{
- if (! inEntryPoint)
- return false;
-
- const TStorageQualifier qualifier = type.getQualifier().storage;
-
- if (!type.isStruct())
- return false;
-
- return ((language == EShLangVertex && qualifier == EvqVaryingIn) ||
- (language == EShLangFragment && qualifier == EvqVaryingOut));
-}
-
// Is this a uniform array which should be flattened?
bool HlslParseContext::shouldFlattenUniform(const TType& type) const
{
const TStorageQualifier qualifier = type.getQualifier().storage;
- return ((type.isArray() && intermediate.getFlattenUniformArrays()) || type.isStruct()) &&
- qualifier == EvqUniform &&
+ return qualifier == EvqUniform &&
+ ((type.isArray() && intermediate.getFlattenUniformArrays()) || type.isStruct()) &&
type.containsOpaque();
}
@@ -1131,8 +1133,8 @@ int HlslParseContext::flatten(const TSourceLoc& loc, const TVariable& variable,
// Add a single flattened member to the flattened data being tracked for the composite
// Returns true for the final flattening level.
int HlslParseContext::addFlattenedMember(const TSourceLoc& loc,
- const TVariable& variable, const TType& type, TFlattenData& flattenData,
- const TString& memberName, bool track)
+ const TVariable& variable, const TType& type, TFlattenData& flattenData,
+ const TString& memberName, bool track)
{
if (isFinalFlattening(type)) {
// This is as far as we flatten. Insert the variable.
@@ -1146,7 +1148,7 @@ int HlslParseContext::addFlattenedMember(const TSourceLoc& loc,
flattenData.members.push_back(memberVariable);
if (track)
- trackLinkageDeferred(*memberVariable);
+ trackLinkage(*memberVariable);
return static_cast<int>(flattenData.offsets.size())-1; // location of the member reference
} else {
@@ -1158,12 +1160,9 @@ int HlslParseContext::addFlattenedMember(const TSourceLoc& loc,
// Figure out the mapping between an aggregate's top members and an
// equivalent set of individual variables.
//
-// N.B. Erases memory of I/O-related annotations in the original type's member,
-// effecting a transfer of this information to the flattened variable form.
-//
// Assumes shouldFlatten() or equivalent was called first.
int HlslParseContext::flattenStruct(const TSourceLoc& loc, const TVariable& variable, const TType& type,
- TFlattenData& flattenData, TString name)
+ TFlattenData& flattenData, TString name)
{
assert(type.isStruct());
@@ -1180,9 +1179,6 @@ int HlslParseContext::flattenStruct(const TSourceLoc& loc, const TVariable& vari
const int mpos = addFlattenedMember(loc, variable, dereferencedType, flattenData, memberName, false);
flattenData.offsets[pos++] = mpos;
-
- // N.B. Erase I/O-related annotations from the source-type member.
- dereferencedType.getQualifier().makeTemporary();
}
return start;
@@ -1374,6 +1370,17 @@ TIntermTyped* HlslParseContext::splitAccessStruct(const TSourceLoc& loc, TInterm
}
}
+// Pass through to base class after remembering builtin mappings.
+void HlslParseContext::trackLinkage(TSymbol& symbol)
+{
+ TBuiltInVariable biType = symbol.getType().getQualifier().builtIn;
+ if (biType != EbvNone)
+ builtInLinkageSymbols[biType] = symbol.clone();
+
+ TParseContextBase::trackLinkage(symbol);
+}
+
+
// Variables that correspond to the user-interface in and out of a stage
// (not the built-in interface) are assigned locations and
// registered as a linkage node (part of the stage's external interface).
@@ -1393,6 +1400,7 @@ void HlslParseContext::assignLocations(TVariable& variable)
nextOutLocation += intermediate.computeTypeLocationSize(variable.getType());
}
}
+
trackLinkage(variable);
}
};
@@ -1403,12 +1411,7 @@ void HlslParseContext::assignLocations(TVariable& variable)
assignLocation(**member);
} else if (wasSplit(variable.getUniqueId())) {
TVariable* splitIoVar = getSplitIoVar(&variable);
- const TTypeList* structure = splitIoVar->getType().getStruct();
- // Struct splitting can produce empty structures if the only members of the
- // struct were builtin interstage IO types. Only assign locations if it
- // isn't a struct, or is a non-empty struct.
- if (structure == nullptr || !structure->empty())
- assignLocation(*splitIoVar);
+ assignLocation(*splitIoVar);
} else {
assignLocation(variable);
}
@@ -1418,7 +1421,7 @@ void HlslParseContext::assignLocations(TVariable& variable)
// Handle seeing a function declarator in the grammar. This is the precursor
// to recognizing a function prototype or function definition.
//
-TFunction& HlslParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype)
+void HlslParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype)
{
//
// Multiple declarations of the same function name are allowed.
@@ -1446,13 +1449,6 @@ TFunction& HlslParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFu
// other forms of name collisions.
if (! symbolTable.insert(function))
error(loc, "function name is redeclaration of existing name", function.getName().c_str(), "");
-
- //
- // If this is a redeclaration, it could also be a definition,
- // in which case, we need to use the parameter names from this one, and not the one that's
- // being redeclared. So, pass back this declaration, not the one in the symbol table.
- //
- return function;
}
// Add interstage IO variables to the linkage in canonical order.
@@ -1472,72 +1468,12 @@ void HlslParseContext::addInterstageIoToLinkage()
// We have to (potentially) track two IO blocks, one in, one out. E.g, a GS may have a
// PerVertex block in both directions, possibly with different members.
- static const TStorageQualifier ioType[2] = { EvqVaryingIn, EvqVaryingOut };
- static const char* blockName[2] = { "PerVertex_in", "PerVertex_out" };
-
- TTypeList* ioBlockTypes[2] = { nullptr, nullptr };
- TArraySizes* ioBlockArray[2] = { nullptr, nullptr };
-
for (int idx = 0; idx < int(io.size()); ++idx) {
TVariable* var = interstageBuiltInIo[io[idx]];
// Add the loose interstage IO to the linkage
if (var->getType().isLooseAndBuiltIn(language))
- trackLinkageDeferred(*var);
-
- // Add the PerVertex interstage IO to the IO block
- if (var->getType().isPerVertexAndBuiltIn(language)) {
- int blockId = 0;
- switch (var->getType().getQualifier().storage) {
- case EvqVaryingIn: blockId = 0; break;
- case EvqVaryingOut: blockId = 1; break;
- default: assert(0 && "Invalid storage qualifier");
- }
-
- // Lazy creation of type list only if we end up needing it.
- if (ioBlockTypes[blockId] == nullptr)
- ioBlockTypes[blockId] = new TTypeList();
-
- TTypeLoc member = { new TType(EbtVoid), loc };
- member.type->shallowCopy(var->getType());
- member.type->setFieldName(var->getName());
-
- // We may have collected these from different parts of different structures. If their
- // array dimensions are not the same, we don't know what to do, so issue an error.
- if (member.type->isArray()) {
- if (ioBlockArray[blockId] == nullptr) {
- ioBlockArray[blockId] = &member.type->getArraySizes();
- } else {
- if (*ioBlockArray[blockId] != member.type->getArraySizes())
- error(loc, "PerVertex block array dimension mismatch", "", "");
- }
- member.type->clearArraySizes();
- }
-
- ioBlockTypes[blockId]->push_back(member);
- }
- }
-
- // If there were PerVertex items, add the block to the linkage. Handle in and out separately.
- for (int blockId = 0; blockId <= 1; ++blockId) {
- if (ioBlockTypes[blockId] != nullptr) {
- const TString* instanceName = NewPoolTString(blockName[blockId]);
- TQualifier blockQualifier;
-
- blockQualifier.clear();
- blockQualifier.storage = ioType[blockId];
-
- TType blockType(ioBlockTypes[blockId], *instanceName, blockQualifier);
-
- if (ioBlockArray[blockId] != nullptr)
- blockType.newArraySizes(*ioBlockArray[blockId]);
-
- TVariable* ioBlock = new TVariable(instanceName, blockType);
- if (!symbolTable.insert(*ioBlock))
- error(loc, "block instance name redefinition", ioBlock->getName().c_str(), "");
- else
- trackLinkageDeferred(*ioBlock);
- }
+ trackLinkage(*var);
}
}
@@ -1545,8 +1481,10 @@ void HlslParseContext::addInterstageIoToLinkage()
// Handle seeing the function prototype in front of a function definition in the grammar.
// The body is handled after this function returns.
//
+// Returns an aggregate of parameter-symbol nodes.
+//
TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function,
- const TAttributeMap& attributes)
+ const TAttributeMap& attributes, TIntermNode*& entryPointTree)
{
currentCaller = function.getMangledName();
TSymbol* symbol = symbolTable.find(function.getMangledName());
@@ -1573,12 +1511,7 @@ TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& l
// Entry points need different I/O and other handling, transform it so the
// rest of this function doesn't care.
- transformEntryPoint(loc, function, attributes);
-
- // Insert the $Global constant buffer.
- // TODO: this design fails if new members are declared between function definitions.
- if (! insertGlobalUniformBlock())
- error(loc, "failed to insert the global constant buffer", "uniform", "");
+ entryPointTree = transformEntryPoint(loc, function, attributes);
//
// New symbol table scope for body of function plus its arguments
@@ -1597,40 +1530,26 @@ TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& l
for (int i = 0; i < function.getParamCount(); i++) {
TParameter& param = function[i];
if (param.name != nullptr) {
- TType* sanitizedType;
-
- // If we're not in the entry point, parameters are sanitized types.
- if (inEntryPoint)
- sanitizedType = param.type;
- else
- sanitizedType = sanitizeType(param.type);
-
- TVariable *variable = new TVariable(param.name, *sanitizedType);
+ TVariable *variable = new TVariable(param.name, *param.type);
+ if (i == 0 && function.hasImplicitThis()) {
+ // Anonymous 'this' members are already in a symbol-table level,
+ // and we need to know what function parameter to map them to.
+ symbolTable.makeInternalVariable(*variable);
+ pushImplicitThis(variable);
+ }
// Insert the parameters with name in the symbol table.
if (! symbolTable.insert(*variable))
error(loc, "redefinition", variable->getName().c_str(), "");
- else {
- // get IO straightened out
- if (inEntryPoint) {
- if (shouldFlatten(*param.type))
- flatten(loc, *variable);
- if (shouldSplit(*param.type))
- split(*variable);
- assignLocations(*variable);
- }
-
- // Transfer ownership of name pointer to symbol table.
- param.name = nullptr;
-
- // Add the parameter to the AST
- paramNodes = intermediate.growAggregate(paramNodes,
- intermediate.addSymbol(*variable, loc),
- loc);
- }
+ // Add the parameter to the AST
+ paramNodes = intermediate.growAggregate(paramNodes,
+ intermediate.addSymbol(*variable, loc),
+ loc);
} else
paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc);
}
+ if (function.hasIllegalImplicitThis())
+ pushImplicitThis(nullptr);
intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc);
loopNestingLevel = 0;
@@ -1641,33 +1560,48 @@ TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& l
}
//
-// Do all special handling for the entry point.
+// Do all special handling for the entry point, including wrapping
+// the shader's entry point with the official entry point that will call it.
+//
+// The following:
//
-void HlslParseContext::transformEntryPoint(const TSourceLoc& loc, TFunction& function, const TAttributeMap& attributes)
+// retType shaderEntryPoint(args...) // shader declared entry point
+// { body }
+//
+// Becomes
+//
+// out retType ret;
+// in iargs<that are input>...;
+// out oargs<that are output> ...;
+//
+// void shaderEntryPoint() // synthesized, but official, entry point
+// {
+// args<that are input> = iargs...;
+// ret = @shaderEntryPoint(args...);
+// oargs = args<that are output>...;
+// }
+//
+// The symbol table will still map the original entry point name to the
+// the modified function and it's new name:
+//
+// symbol table: shaderEntryPoint -> @shaderEntryPoint
+//
+// Returns nullptr if no entry-point tree was built, otherwise, returns
+// a subtree that creates the entry point.
+//
+TIntermNode* HlslParseContext::transformEntryPoint(const TSourceLoc& loc, TFunction& userFunction, const TAttributeMap& attributes)
{
- inEntryPoint = function.getName().compare(intermediate.getEntryPointName().c_str()) == 0;
-
- if (!inEntryPoint) {
- remapNonEntryPointIO(function);
- return;
+ // if we aren't in the entry point, fix the IO as such and exit
+ if (userFunction.getName().compare(intermediate.getEntryPointName().c_str()) != 0) {
+ remapNonEntryPointIO(userFunction);
+ return nullptr;
}
- // entry point logic...
-
- intermediate.setEntryPointMangledName(function.getMangledName().c_str());
- intermediate.incrementEntryPointCount();
+ entryPointFunction = &userFunction; // needed in finish()
- // Handle parameters and return value
- remapEntryPointIO(function);
- if (entryPointOutput) {
- if (shouldFlatten(entryPointOutput->getType()))
- flatten(loc, *entryPointOutput);
- if (shouldSplit(entryPointOutput->getType()))
- split(*entryPointOutput);
- assignLocations(*entryPointOutput);
- }
+ // entry point logic...
- // Handle function attributes
+ // Handle entry-point function attributes
const TIntermAggregate* numThreads = attributes[EatNumThreads];
if (numThreads != nullptr) {
const TIntermSequence& sequence = numThreads->getSequence();
@@ -1675,9 +1609,223 @@ void HlslParseContext::transformEntryPoint(const TSourceLoc& loc, TFunction& fun
for (int lid = 0; lid < int(sequence.size()); ++lid)
intermediate.setLocalSize(lid, sequence[lid]->getAsConstantUnion()->getConstArray()[0].getIConst());
}
+
+ // MaxVertexCount
const TIntermAggregate* maxVertexCount = attributes[EatMaxVertexCount];
- if (maxVertexCount != nullptr)
- intermediate.setVertices(maxVertexCount->getSequence()[0]->getAsConstantUnion()->getConstArray()[0].getIConst());
+ if (maxVertexCount != nullptr) {
+ if (! intermediate.setVertices(maxVertexCount->getSequence()[0]->getAsConstantUnion()->getConstArray()[0].getIConst())) {
+ error(loc, "cannot change previously set maxvertexcount attribute", "", "");
+ }
+ }
+
+ // Handle [patchconstantfunction("...")]
+ const TIntermAggregate* pcfAttr = attributes[EatPatchConstantFunc];
+ if (pcfAttr != nullptr) {
+ const TConstUnion& pcfName = pcfAttr->getSequence()[0]->getAsConstantUnion()->getConstArray()[0];
+
+ if (pcfName.getType() != EbtString) {
+ error(loc, "invalid patch constant function", "", "");
+ } else {
+ patchConstantFunctionName = *pcfName.getSConst();
+ }
+ }
+
+ // Handle [domain("...")]
+ const TIntermAggregate* domainAttr = attributes[EatDomain];
+ if (domainAttr != nullptr) {
+ const TConstUnion& domainType = domainAttr->getSequence()[0]->getAsConstantUnion()->getConstArray()[0];
+ if (domainType.getType() != EbtString) {
+ error(loc, "invalid domain", "", "");
+ } else {
+ TString domainStr = *domainType.getSConst();
+ std::transform(domainStr.begin(), domainStr.end(), domainStr.begin(), ::tolower);
+
+ TLayoutGeometry domain = ElgNone;
+
+ if (domainStr == "tri") {
+ domain = ElgTriangles;
+ } else if (domainStr == "quad") {
+ domain = ElgQuads;
+ } else if (domainStr == "isoline") {
+ domain = ElgIsolines;
+ } else {
+ error(loc, "unsupported domain type", domainStr.c_str(), "");
+ }
+
+ if (! intermediate.setInputPrimitive(domain)) {
+ error(loc, "cannot change previously set domain", TQualifier::getGeometryString(domain), "");
+ }
+ }
+ }
+
+ // Handle [outputtoplogy("...")]
+ const TIntermAggregate* topologyAttr = attributes[EatOutputTopology];
+ if (topologyAttr != nullptr) {
+ const TConstUnion& topoType = topologyAttr->getSequence()[0]->getAsConstantUnion()->getConstArray()[0];
+ if (topoType.getType() != EbtString) {
+ error(loc, "invalid outputtoplogy", "", "");
+ } else {
+ TString topologyStr = *topoType.getSConst();
+ std::transform(topologyStr.begin(), topologyStr.end(), topologyStr.begin(), ::tolower);
+
+ TVertexOrder topology = EvoNone;
+
+ if (topologyStr == "point") {
+ topology = EvoNone;
+ } else if (topologyStr == "line") {
+ topology = EvoNone;
+ } else if (topologyStr == "triangle_cw") {
+ topology = EvoCw;
+ } else if (topologyStr == "triangle_ccw") {
+ topology = EvoCcw;
+ } else {
+ error(loc, "unsupported outputtoplogy type", topologyStr.c_str(), "");
+ }
+
+ if (topology != EvoNone) {
+ if (! intermediate.setVertexOrder(topology)) {
+ error(loc, "cannot change previously set outputtopology", TQualifier::getVertexOrderString(topology), "");
+ }
+ }
+ }
+ }
+
+ // Handle [partitioning("...")]
+ const TIntermAggregate* partitionAttr = attributes[EatPartitioning];
+ if (partitionAttr != nullptr) {
+ const TConstUnion& partType = partitionAttr->getSequence()[0]->getAsConstantUnion()->getConstArray()[0];
+ if (partType.getType() != EbtString) {
+ error(loc, "invalid partitioning", "", "");
+ } else {
+ TString partitionStr = *partType.getSConst();
+ std::transform(partitionStr.begin(), partitionStr.end(), partitionStr.begin(), ::tolower);
+
+ TVertexSpacing partitioning = EvsNone;
+
+ if (partitionStr == "integer") {
+ partitioning = EvsEqual;
+ } else if (partitionStr == "fractional_even") {
+ partitioning = EvsFractionalEven;
+ } else if (partitionStr == "fractional_odd") {
+ partitioning = EvsFractionalOdd;
+ //} else if (partition == "pow2") { // TODO: currently nothing to map this to.
+ } else {
+ error(loc, "unsupported partitioning type", partitionStr.c_str(), "");
+ }
+
+ if (! intermediate.setVertexSpacing(partitioning))
+ error(loc, "cannot change previously set partitioning", TQualifier::getVertexSpacingString(partitioning), "");
+ }
+ }
+
+ // Handle [outputcontrolpoints("...")]
+ const TIntermAggregate* outputControlPoints = attributes[EatOutputControlPoints];
+ if (outputControlPoints != nullptr) {
+ const TConstUnion& ctrlPointConst = outputControlPoints->getSequence()[0]->getAsConstantUnion()->getConstArray()[0];
+ if (ctrlPointConst.getType() != EbtInt) {
+ error(loc, "invalid outputcontrolpoints", "", "");
+ } else {
+ const int ctrlPoints = ctrlPointConst.getIConst();
+ if (! intermediate.setVertices(ctrlPoints)) {
+ error(loc, "cannot change previously set outputcontrolpoints attribute", "", "");
+ }
+ }
+ }
+
+ // Move parameters and return value to shader in/out
+ TVariable* entryPointOutput; // gets created in remapEntryPointIO
+ TVector<TVariable*> inputs;
+ TVector<TVariable*> outputs;
+ remapEntryPointIO(userFunction, entryPointOutput, inputs, outputs);
+
+ // Further this return/in/out transform by flattening, splitting, and assigning locations
+ const auto makeVariableInOut = [&](TVariable& variable) {
+ if (variable.getType().isStruct()) {
+ const TStorageQualifier qualifier = variable.getType().getQualifier().storage;
+ // struct inputs to the vertex stage and outputs from the fragment stage must be flattened
+ if ((language == EShLangVertex && qualifier == EvqVaryingIn) ||
+ (language == EShLangFragment && qualifier == EvqVaryingOut))
+ flatten(loc, variable);
+ // Mixture of IO and non-IO must be split
+ else if (variable.getType().containsBuiltInInterstageIO(language))
+ split(variable);
+ }
+ assignLocations(variable);
+ };
+ if (entryPointOutput)
+ makeVariableInOut(*entryPointOutput);
+ for (auto it = inputs.begin(); it != inputs.end(); ++it)
+ makeVariableInOut(*(*it));
+ for (auto it = outputs.begin(); it != outputs.end(); ++it)
+ makeVariableInOut(*(*it));
+
+ // Synthesize the call
+
+ pushScope(); // matches the one in handleFunctionBody()
+
+ // new signature
+ TType voidType(EbtVoid);
+ TFunction synthEntryPoint(&userFunction.getName(), voidType);
+ TIntermAggregate* synthParams = new TIntermAggregate();
+ intermediate.setAggregateOperator(synthParams, EOpParameters, voidType, loc);
+ intermediate.setEntryPointMangledName(synthEntryPoint.getMangledName().c_str());
+ intermediate.incrementEntryPointCount();
+ TFunction callee(&userFunction.getName(), voidType); // call based on old name, which is still in the symbol table
+
+ // change original name
+ userFunction.addPrefix("@"); // change the name in the function, but not in the symbol table
+
+ // Copy inputs (shader-in -> calling arg), while building up the call node
+ TVector<TVariable*> argVars;
+ TIntermAggregate* synthBody = new TIntermAggregate();
+ auto inputIt = inputs.begin();
+ TIntermTyped* callingArgs = nullptr;
+ for (int i = 0; i < userFunction.getParamCount(); i++) {
+ TParameter& param = userFunction[i];
+ argVars.push_back(makeInternalVariable(*param.name, *param.type));
+ argVars.back()->getWritableType().getQualifier().makeTemporary();
+ TIntermSymbol* arg = intermediate.addSymbol(*argVars.back());
+ handleFunctionArgument(&callee, callingArgs, arg);
+ if (param.type->getQualifier().isParamInput()) {
+ intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign, arg,
+ intermediate.addSymbol(**inputIt)));
+ inputIt++;
+ }
+ }
+
+ // Call
+ currentCaller = synthEntryPoint.getMangledName();
+ TIntermTyped* callReturn = handleFunctionCall(loc, &callee, callingArgs);
+ currentCaller = userFunction.getMangledName();
+
+ // Return value
+ if (entryPointOutput)
+ intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign,
+ intermediate.addSymbol(*entryPointOutput), callReturn));
+ else
+ intermediate.growAggregate(synthBody, callReturn);
+
+ // Output copies
+ auto outputIt = outputs.begin();
+ for (int i = 0; i < userFunction.getParamCount(); i++) {
+ TParameter& param = userFunction[i];
+ if (param.type->getQualifier().isParamOutput()) {
+ intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign,
+ intermediate.addSymbol(**outputIt),
+ intermediate.addSymbol(*argVars[i])));
+ outputIt++;
+ }
+ }
+
+ // Put the pieces together to form a full function subtree
+ // for the synthesized entry point.
+ synthBody->setOperator(EOpSequence);
+ TIntermNode* synthFunctionDef = synthParams;
+ handleFunctionBody(loc, synthEntryPoint, synthBody, synthFunctionDef);
+
+ entryPointFunctionBody = synthBody;
+
+ return synthFunctionDef;
}
void HlslParseContext::handleFunctionBody(const TSourceLoc& loc, TFunction& function, TIntermNode* functionBody, TIntermNode*& node)
@@ -1687,6 +1835,8 @@ void HlslParseContext::handleFunctionBody(const TSourceLoc& loc, TFunction& func
node->getAsAggregate()->setName(function.getMangledName().c_str());
popScope();
+ if (function.hasImplicitThis())
+ popImplicitThis();
if (function.getType().getBasicType() != EbtVoid && ! functionReturnsValue)
error(loc, "function does not return a value:", "", function.getName().c_str());
@@ -1695,45 +1845,48 @@ void HlslParseContext::handleFunctionBody(const TSourceLoc& loc, TFunction& func
// AST I/O is done through shader globals declared in the 'in' or 'out'
// storage class. An HLSL entry point has a return value, input parameters
// and output parameters. These need to get remapped to the AST I/O.
-void HlslParseContext::remapEntryPointIO(TFunction& function)
+void HlslParseContext::remapEntryPointIO(TFunction& function, TVariable*& returnValue,
+ TVector<TVariable*>& inputs, TVector<TVariable*>& outputs)
{
- // Will auto-assign locations here to the inputs/outputs defined by the entry point
-
- const auto remapType = [&](TType& type) {
- const auto remapBuiltInType = [&](TType& type) {
- switch (type.getQualifier().builtIn) {
- case EbvFragDepthGreater:
- intermediate.setDepth(EldGreater);
- type.getQualifier().builtIn = EbvFragDepth;
- break;
- case EbvFragDepthLesser:
- intermediate.setDepth(EldLess);
- type.getQualifier().builtIn = EbvFragDepth;
- break;
- default:
- break;
+ // Do the actual work to make a type be a shader input or output variable,
+ // and clear the original to be non-IO (for use as a normal function parameter/return).
+ const auto makeIoVariable = [this](const char* name, TType& type, TStorageQualifier storage) -> TVariable* {
+ TVariable* ioVariable = makeInternalVariable(name, type);
+ clearUniformInputOutput(type.getQualifier());
+ if (type.getStruct() != nullptr) {
+ auto newLists = ioTypeMap.find(ioVariable->getType().getStruct());
+ if (newLists != ioTypeMap.end()) {
+ if (storage == EvqVaryingIn && newLists->second.input)
+ ioVariable->getWritableType().setStruct(newLists->second.input);
+ else if (storage == EvqVaryingOut && newLists->second.output)
+ ioVariable->getWritableType().setStruct(newLists->second.output);
}
- };
- remapBuiltInType(type);
- if (type.isStruct()) {
- auto members = *type.getStruct();
- for (auto member = members.begin(); member != members.end(); ++member)
- remapBuiltInType(*member->type);
}
+ if (storage == EvqVaryingIn)
+ correctInput(ioVariable->getWritableType().getQualifier());
+ else
+ correctOutput(ioVariable->getWritableType().getQualifier());
+ ioVariable->getWritableType().getQualifier().storage = storage;
+ return ioVariable;
};
// return value is actually a shader-scoped output (out)
- if (function.getType().getBasicType() != EbtVoid) {
- entryPointOutput = makeInternalVariable("@entryPointOutput", function.getType());
- entryPointOutput->getWritableType().getQualifier().storage = EvqVaryingOut;
- remapType(function.getWritableType());
- }
+ if (function.getType().getBasicType() == EbtVoid)
+ returnValue = nullptr;
+ else
+ returnValue = makeIoVariable("@entryPointOutput", function.getWritableType(), EvqVaryingOut);
// parameters are actually shader-scoped inputs and outputs (in or out)
for (int i = 0; i < function.getParamCount(); i++) {
TType& paramType = *function[i].type;
- paramType.getQualifier().storage = paramType.getQualifier().isParamInput() ? EvqVaryingIn : EvqVaryingOut;
- remapType(paramType);
+ if (paramType.getQualifier().isParamInput()) {
+ TVariable* argAsGlobal = makeIoVariable(function[i].name->c_str(), paramType, EvqVaryingIn);
+ inputs.push_back(argAsGlobal);
+ }
+ if (paramType.getQualifier().isParamOutput()) {
+ TVariable* argAsGlobal = makeIoVariable(function[i].name->c_str(), paramType, EvqVaryingOut);
+ outputs.push_back(argAsGlobal);
+ }
}
}
@@ -1741,15 +1894,15 @@ void HlslParseContext::remapEntryPointIO(TFunction& function)
// declares entry point IO built-ins, but these have to be undone.
void HlslParseContext::remapNonEntryPointIO(TFunction& function)
{
- const auto remapBuiltInType = [&](TType& type) { type.getQualifier().builtIn = EbvNone; };
-
// return value
if (function.getType().getBasicType() != EbtVoid)
- remapBuiltInType(function.getWritableType());
+ clearUniformInputOutput(function.getWritableType().getQualifier());
- // parameters
+ // parameters.
+ // References to structuredbuffer types are left unmodified
for (int i = 0; i < function.getParamCount(); i++)
- remapBuiltInType(*function[i].type);
+ if (!isReference(*function[i].type))
+ clearUniformInputOutput(function[i].type->getQualifier());
}
// Handle function returns, including type conversions to the function return type
@@ -1771,23 +1924,7 @@ TIntermNode* HlslParseContext::handleReturnValue(const TSourceLoc& loc, TIntermT
}
}
- // The entry point needs to send any return value to the entry-point output instead.
- // So, a subtree is built up, as a two-part sequence, with the first part being an
- // assignment subtree, and the second part being a return with no value.
- //
- // Otherwise, for a non entry point, just return a return statement.
- if (inEntryPoint) {
- assert(entryPointOutput != nullptr); // should have been error tested at the beginning
- TIntermSymbol* left = new TIntermSymbol(entryPointOutput->getUniqueId(), entryPointOutput->getName(),
- entryPointOutput->getType());
- TIntermNode* returnSequence = handleAssign(loc, EOpAssign, left, value);
- returnSequence = intermediate.makeAggregate(returnSequence);
- returnSequence = intermediate.growAggregate(returnSequence, intermediate.addBranch(EOpReturn, loc), loc);
- returnSequence->getAsAggregate()->setOperator(EOpSequence);
-
- return returnSequence;
- } else
- return intermediate.addBranch(EOpReturn, value, loc);
+ return intermediate.addBranch(EOpReturn, value, loc);
}
void HlslParseContext::handleFunctionArgument(TFunction* function,
@@ -1878,6 +2015,11 @@ TIntermTyped* HlslParseContext::handleAssign(const TSourceLoc& loc, TOperator op
int memberIdx = 0;
+ // When dealing with split arrayed structures of builtins, the arrayness is moved to the extracted builtin
+ // variables, which is awkward when copying between split and unsplit structures. This variable tracks
+ // array indirections so they can be percolated from outer structs to inner variables.
+ std::vector <int> arrayElement;
+
// We track the outer-most aggregate, so that we can use its storage class later.
const TIntermTyped* outerLeft = left;
const TIntermTyped* outerRight = right;
@@ -1894,7 +2036,14 @@ TIntermTyped* HlslParseContext::handleAssign(const TSourceLoc& loc, TOperator op
if (split && derefType.isBuiltInInterstageIO(language)) {
// copy from interstage IO builtin if needed
- subTree = intermediate.addSymbol(*interstageBuiltInIo[tInterstageIoData(derefType, outer->getType())]);
+ subTree = intermediate.addSymbol(*interstageBuiltInIo.find(HlslParseContext::tInterstageIoData(derefType, outer->getType()))->second);
+
+ // Arrayness of builtIn symbols isn't handled by the normal recursion: it's been extracted and moved to the builtin.
+ if (subTree->getType().isArray() && !arrayElement.empty()) {
+ const TType splitDerefType(subTree->getType(), arrayElement.back());
+ subTree = intermediate.addIndex(EOpIndexDirect, subTree, intermediate.addConstantUnion(arrayElement.back(), loc), loc);
+ subTree->setType(splitDerefType);
+ }
} else if (flattened && isFinalFlattening(derefType)) {
subTree = intermediate.addSymbol(*flatVariables[memberIdx++]);
} else {
@@ -1925,7 +2074,9 @@ TIntermTyped* HlslParseContext::handleAssign(const TSourceLoc& loc, TOperator op
// array case
for (int element=0; element < left->getType().getOuterArraySize(); ++element) {
- // Add a new AST symbol node if we have a temp variable holding a complex RHS.
+ arrayElement.push_back(element);
+
+ // Add a new AST symbol node if we have a temp variable holding a complex RHS.
TIntermTyped* subLeft = getMember(true, left, element, left, element);
TIntermTyped* subRight = getMember(false, right, element, right, element);
@@ -1936,6 +2087,8 @@ TIntermTyped* HlslParseContext::handleAssign(const TSourceLoc& loc, TOperator op
assignList = intermediate.growAggregate(assignList, intermediate.addAssign(op, subLeft, subRight, loc), loc);
else
traverse(subLeft, subRight, subSplitLeft, subSplitRight);
+
+ arrayElement.pop_back();
}
} else if (left->getType().isStruct()) {
// struct case
@@ -1947,6 +2100,10 @@ TIntermTyped* HlslParseContext::handleAssign(const TSourceLoc& loc, TOperator op
int memberL = 0;
int memberR = 0;
+ // Handle empty structure assignment
+ if (int(membersL.size()) == 0 && int(membersR.size()) == 0)
+ assignList = intermediate.growAggregate(assignList, intermediate.addAssign(op, left, right, loc), loc);
+
for (int member = 0; member < int(membersL.size()); ++member) {
const TType& typeL = *membersL[member].type;
const TType& typeR = *membersR[member].type;
@@ -2019,7 +2176,7 @@ TIntermTyped* HlslParseContext::handleAssignToMatrixSwizzle(const TSourceLoc& lo
TIntermTyped* vectorAssign = nullptr;
if (vector == nullptr) {
// create a new intermediate vector variable to assign to
- TType vectorType(matrix->getBasicType(), EvqTemporary, matrix->getQualifier().precision, swizzle.size()/2);
+ TType vectorType(matrix->getBasicType(), EvqTemporary, matrix->getQualifier().precision, (int)swizzle.size()/2);
vector = intermediate.addSymbol(*makeInternalVariable("intermVec", vectorType), loc);
// assign the right to the new vector
@@ -2103,6 +2260,240 @@ TIntermAggregate* HlslParseContext::handleSamplerTextureCombine(const TSourceLoc
}
//
+// Decompose structure buffer methods into AST
+//
+void HlslParseContext::decomposeStructBufferMethods(const TSourceLoc& loc, TIntermTyped*& node, TIntermNode* arguments)
+{
+ if (!node || !node->getAsOperator())
+ return;
+
+ const TOperator op = node->getAsOperator()->getOp();
+ TIntermAggregate* argAggregate = arguments ? arguments->getAsAggregate() : nullptr;
+ if (argAggregate == nullptr)
+ return;
+
+ // Buffer is the object upon which method is called, so always arg 0
+ TIntermTyped* bufferObj = argAggregate->getSequence()[0]->getAsTyped();
+
+ // Index to obtain the runtime sized array out of the buffer.
+ TIntermTyped* argArray = indexStructBufferContent(loc, bufferObj);
+ if (argArray == nullptr)
+ return; // It might not be a struct buffer method.
+
+ switch (op) {
+ case EOpMethodLoad:
+ {
+ TIntermTyped* argIndex = argAggregate->getSequence()[1]->getAsTyped(); // index
+
+ // Byte address buffers index in bytes (only multiples of 4 permitted... not so much a byte address
+ // buffer then, but that's what it calls itself.
+ const bool isByteAddressBuffer = (argArray->getBasicType() == EbtUint);
+ if (isByteAddressBuffer)
+ argIndex = intermediate.addBinaryNode(EOpRightShift, argIndex, intermediate.addConstantUnion(2, loc, true),
+ loc, TType(EbtInt));
+
+ // Index into the array to find the item being loaded.
+ const TOperator idxOp = (argIndex->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
+
+ node = intermediate.addIndex(idxOp, argArray, argIndex, loc);
+
+ const TType derefType(argArray->getType(), 0);
+ node->setType(derefType);
+ }
+
+ break;
+
+ case EOpMethodLoad2:
+ case EOpMethodLoad3:
+ case EOpMethodLoad4:
+ {
+ TIntermTyped* argIndex = argAggregate->getSequence()[1]->getAsTyped(); // index
+
+ TOperator constructOp = EOpNull;
+ int size = 0;
+
+ switch (op) {
+ case EOpMethodLoad2: size = 2; constructOp = EOpConstructVec2; break;
+ case EOpMethodLoad3: size = 3; constructOp = EOpConstructVec3; break;
+ case EOpMethodLoad4: size = 4; constructOp = EOpConstructVec4; break;
+ default: assert(0);
+ }
+
+ TIntermTyped* body = nullptr;
+
+ // First, we'll store the address in a variable to avoid multiple shifts
+ // (we must convert the byte address to an item address)
+ TIntermTyped* byteAddrIdx = intermediate.addBinaryNode(EOpRightShift, argIndex,
+ intermediate.addConstantUnion(2, loc, true), loc, TType(EbtInt));
+
+ TVariable* byteAddrSym = makeInternalVariable("byteAddrTemp", TType(EbtInt, EvqTemporary));
+ TIntermTyped* byteAddrIdxVar = intermediate.addSymbol(*byteAddrSym, loc);
+
+ body = intermediate.growAggregate(body, intermediate.addAssign(EOpAssign, byteAddrIdxVar, byteAddrIdx, loc));
+
+ TIntermTyped* vec = nullptr;
+
+ // These are only valid on (rw)byteaddressbuffers, so we can always perform the >>2
+ // address conversion.
+ for (int idx=0; idx<size; ++idx) {
+ TIntermTyped* offsetIdx = byteAddrIdxVar;
+
+ // add index offset
+ if (idx != 0)
+ offsetIdx = intermediate.addBinaryNode(EOpAdd, offsetIdx, intermediate.addConstantUnion(idx, loc, true),
+ loc, TType(EbtInt));
+
+ const TOperator idxOp = (offsetIdx->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
+
+ vec = intermediate.growAggregate(vec, intermediate.addIndex(idxOp, argArray, offsetIdx, loc));
+ }
+
+ vec->setType(TType(argArray->getBasicType(), EvqTemporary, size));
+ vec->getAsAggregate()->setOperator(constructOp);
+
+ body = intermediate.growAggregate(body, vec);
+ body->setType(vec->getType());
+ body->getAsAggregate()->setOperator(EOpSequence);
+
+ node = body;
+ }
+
+ break;
+
+ case EOpMethodStore:
+ case EOpMethodStore2:
+ case EOpMethodStore3:
+ case EOpMethodStore4:
+ {
+ TIntermTyped* argIndex = argAggregate->getSequence()[1]->getAsTyped(); // address
+ TIntermTyped* argValue = argAggregate->getSequence()[2]->getAsTyped(); // value
+
+ // Index into the array to find the item being loaded.
+ // Byte address buffers index in bytes (only multiples of 4 permitted... not so much a byte address
+ // buffer then, but that's what it calls itself.
+
+ int size = 0;
+
+ switch (op) {
+ case EOpMethodStore: size = 1; break;
+ case EOpMethodStore2: size = 2; break;
+ case EOpMethodStore3: size = 3; break;
+ case EOpMethodStore4: size = 4; break;
+ default: assert(0);
+ }
+
+ TIntermAggregate* body = nullptr;
+
+ // First, we'll store the address in a variable to avoid multiple shifts
+ // (we must convert the byte address to an item address)
+ TIntermTyped* byteAddrIdx = intermediate.addBinaryNode(EOpRightShift, argIndex,
+ intermediate.addConstantUnion(2, loc, true), loc, TType(EbtInt));
+
+ TVariable* byteAddrSym = makeInternalVariable("byteAddrTemp", TType(EbtInt, EvqTemporary));
+ TIntermTyped* byteAddrIdxVar = intermediate.addSymbol(*byteAddrSym, loc);
+
+ body = intermediate.growAggregate(body, intermediate.addAssign(EOpAssign, byteAddrIdxVar, byteAddrIdx, loc));
+
+ for (int idx=0; idx<size; ++idx) {
+ TIntermTyped* offsetIdx = byteAddrIdxVar;
+ TIntermTyped* idxConst = intermediate.addConstantUnion(idx, loc, true);
+
+ // add index offset
+ if (idx != 0)
+ offsetIdx = intermediate.addBinaryNode(EOpAdd, offsetIdx, idxConst, loc, TType(EbtInt));
+
+ const TOperator idxOp = (offsetIdx->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
+
+ TIntermTyped* lValue = intermediate.addIndex(idxOp, argArray, offsetIdx, loc);
+ TIntermTyped* rValue = (size == 1) ? argValue :
+ intermediate.addIndex(EOpIndexDirect, argValue, idxConst, loc);
+
+ TIntermTyped* assign = intermediate.addAssign(EOpAssign, lValue, rValue, loc);
+
+ body = intermediate.growAggregate(body, assign);
+ }
+
+ body->setOperator(EOpSequence);
+ node = body;
+ }
+
+ break;
+
+ case EOpMethodGetDimensions:
+ {
+ const int numArgs = (int)argAggregate->getSequence().size();
+ TIntermTyped* argNumItems = argAggregate->getSequence()[1]->getAsTyped(); // out num items
+ TIntermTyped* argStride = numArgs > 2 ? argAggregate->getSequence()[2]->getAsTyped() : nullptr; // out stride
+
+ TIntermAggregate* body = nullptr;
+
+ // Length output:
+ if (argArray->getType().isRuntimeSizedArray()) {
+ TIntermTyped* lengthCall = intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, argArray,
+ argNumItems->getType());
+ TIntermTyped* assign = intermediate.addAssign(EOpAssign, argNumItems, lengthCall, loc);
+ body = intermediate.growAggregate(body, assign, loc);
+ } else {
+ const int length = argArray->getType().getOuterArraySize();
+ TIntermTyped* assign = intermediate.addAssign(EOpAssign, argNumItems, intermediate.addConstantUnion(length, loc, true), loc);
+ body = intermediate.growAggregate(body, assign, loc);
+ }
+
+ // Stride output:
+ if (argStride != nullptr) {
+ int size;
+ int stride;
+ intermediate.getBaseAlignment(argArray->getType(), size, stride, false,
+ argArray->getType().getQualifier().layoutMatrix == ElmRowMajor);
+
+ TIntermTyped* assign = intermediate.addAssign(EOpAssign, argStride, intermediate.addConstantUnion(stride, loc, true), loc);
+
+ body = intermediate.growAggregate(body, assign);
+ }
+
+ body->setOperator(EOpSequence);
+ node = body;
+ }
+
+ break;
+
+ case EOpInterlockedAdd:
+ case EOpInterlockedAnd:
+ case EOpInterlockedExchange:
+ case EOpInterlockedMax:
+ case EOpInterlockedMin:
+ case EOpInterlockedOr:
+ case EOpInterlockedXor:
+ case EOpInterlockedCompareExchange:
+ case EOpInterlockedCompareStore:
+ {
+ // We'll replace the first argument with the block dereference, and let
+ // downstream decomposition handle the rest.
+
+ TIntermSequence& sequence = argAggregate->getSequence();
+
+ TIntermTyped* argIndex = sequence[1]->getAsTyped(); // index
+ argIndex = intermediate.addBinaryNode(EOpRightShift, argIndex, intermediate.addConstantUnion(2, loc, true),
+ loc, TType(EbtInt));
+
+ const TOperator idxOp = (argIndex->getQualifier().storage == EvqConst) ? EOpIndexDirect : EOpIndexIndirect;
+ TIntermTyped* element = intermediate.addIndex(idxOp, argArray, argIndex, loc);
+
+ const TType derefType(argArray->getType(), 0);
+ element->setType(derefType);
+
+ // Replace the numeric byte offset parameter with array reference.
+ sequence[1] = element;
+ sequence.erase(sequence.begin(), sequence.begin()+1);
+ }
+ break;
+
+ default:
+ break; // most pass through unchanged
+ }
+}
+
+//
// Decompose DX9 and DX10 sample intrinsics & object methods into AST
//
void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermTyped*& node, TIntermNode* arguments)
@@ -2130,6 +2521,15 @@ void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermType
const TOperator op = node->getAsOperator()->getOp();
const TIntermAggregate* argAggregate = arguments ? arguments->getAsAggregate() : nullptr;
+ // Bail out if not a sampler method
+ if (arguments != nullptr) {
+ if ((argAggregate != nullptr && argAggregate->getSequence()[0]->getAsTyped()->getBasicType() != EbtSampler))
+ return;
+
+ if (argAggregate == nullptr && arguments->getAsTyped()->getBasicType() != EbtSampler)
+ return;
+ }
+
switch (op) {
// **** DX9 intrinsics: ****
case EOpTexture:
@@ -2270,6 +2670,7 @@ void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermType
const TSampler& sampler = texType.getSampler();
const TSamplerDim dim = sampler.dim;
const bool isImage = sampler.isImage();
+ const bool isMs = sampler.isMultiSample();
const int numArgs = (int)argAggregate->getSequence().size();
int numDims = 0;
@@ -2280,6 +2681,7 @@ void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermType
case Esd3D: numDims = 3; break; // W, H, D
case EsdCube: numDims = 2; break; // W, H (cube)
case EsdBuffer: numDims = 1; break; // W (buffers)
+ case EsdRect: numDims = 2; break; // W, H (rect)
default:
assert(0 && "unhandled texture dimension");
}
@@ -2288,17 +2690,31 @@ void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermType
if (sampler.isArrayed())
++numDims;
- // Establish whether we're querying mip levels
- const bool mipQuery = (numArgs > (numDims + 1)) && (!sampler.isMultiSample());
+ // Establish whether the method itself is querying mip levels. This can be false even
+ // if the underlying query requires a MIP level, due to the available HLSL method overloads.
+ const bool mipQuery = (numArgs > (numDims + 1 + (isMs ? 1 : 0)));
+
+ // Establish whether we must use the LOD form of query (even if the method did not supply a mip level to query).
+ // True if:
+ // 1. 1D/2D/3D/Cube AND multisample==0 AND NOT image (those can be sent to the non-LOD query)
+ // or,
+ // 2. There is a LOD (because the non-LOD query cannot be used in that case, per spec)
+ const bool mipRequired =
+ ((dim == Esd1D || dim == Esd2D || dim == Esd3D || dim == EsdCube) && !isMs && !isImage) || // 1...
+ mipQuery; // 2...
// AST assumes integer return. Will be converted to float if required.
TIntermAggregate* sizeQuery = new TIntermAggregate(isImage ? EOpImageQuerySize : EOpTextureQuerySize);
sizeQuery->getSequence().push_back(argTex);
- // If we're querying an explicit LOD, add the LOD, which is always arg #1
- if (mipQuery) {
- TIntermTyped* queryLod = argAggregate->getSequence()[1]->getAsTyped();
+
+ // If we're building an LOD query, add the LOD.
+ if (mipRequired) {
+ // If the base HLSL query had no MIP level given, use level 0.
+ TIntermTyped* queryLod = mipQuery ? argAggregate->getSequence()[1]->getAsTyped() :
+ intermediate.addConstantUnion(0, loc, true);
sizeQuery->getSequence().push_back(queryLod);
}
+
sizeQuery->setType(TType(EbtUint, EvqTemporary, numDims));
sizeQuery->setLoc(loc);
@@ -2589,8 +3005,10 @@ void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermType
// For now, we have nothing to map the component-wise comparison forms
// to, because neither GLSL nor SPIR-V has such an opcode. Issue an
// unimplemented error instead. Most of the machinery is here if that
- // should ever become available.
- if (cmpValues) {
+ // should ever become available. However, red can be passed through
+ // to OpImageDrefGather. G/B/A cannot, because that opcode does not
+ // accept a component.
+ if (cmpValues != 0 && op != EOpMethodGatherCmpRed) {
error(loc, "unimplemented: component-level gather compare", "", "");
return;
}
@@ -2671,14 +3089,16 @@ void HlslParseContext::decomposeSampleMethods(const TSourceLoc& loc, TIntermType
}
// Add comparison value if we have one
- if (argTex->getType().getSampler().isShadow())
+ if (argCmp != nullptr)
txgather->getSequence().push_back(argCmp);
// Add offset (either 1, or an array of 4) if we have one
if (argOffset != nullptr)
txgather->getSequence().push_back(argOffset);
- txgather->getSequence().push_back(argChannel);
+ // Add channel value if the sampler is not shadow
+ if (! argSamp->getType().getSampler().isShadow())
+ txgather->getSequence().push_back(argChannel);
txgather->setType(node->getType());
txgather->setLoc(loc);
@@ -3153,10 +3573,107 @@ void HlslParseContext::decomposeIntrinsic(const TSourceLoc& loc, TIntermTyped*&
}
case EOpF16tof32:
+ {
+ // input uvecN with low 16 bits of each component holding a float16. convert to float32.
+ TIntermTyped* argValue = node->getAsUnaryNode()->getOperand();
+ TIntermTyped* zero = intermediate.addConstantUnion(0, loc, true);
+ const int vecSize = argValue->getType().getVectorSize();
+
+ TOperator constructOp = EOpNull;
+ switch (vecSize) {
+ case 1: constructOp = EOpNull; break; // direct use, no construct needed
+ case 2: constructOp = EOpConstructVec2; break;
+ case 3: constructOp = EOpConstructVec3; break;
+ case 4: constructOp = EOpConstructVec4; break;
+ default: assert(0); break;
+ }
+
+ // For scalar case, we don't need to construct another type.
+ TIntermAggregate* result = (vecSize > 1) ? new TIntermAggregate(constructOp) : nullptr;
+
+ if (result) {
+ result->setType(TType(EbtFloat, EvqTemporary, vecSize));
+ result->setLoc(loc);
+ }
+
+ for (int idx = 0; idx < vecSize; ++idx) {
+ TIntermTyped* idxConst = intermediate.addConstantUnion(idx, loc, true);
+ TIntermTyped* component = argValue->getType().isVector() ?
+ intermediate.addIndex(EOpIndexDirect, argValue, idxConst, loc) : argValue;
+
+ if (component != argValue)
+ component->setType(TType(argValue->getBasicType(), EvqTemporary));
+
+ TIntermTyped* unpackOp = new TIntermUnary(EOpUnpackHalf2x16);
+ unpackOp->setType(TType(EbtFloat, EvqTemporary, 2));
+ unpackOp->getAsUnaryNode()->setOperand(component);
+ unpackOp->setLoc(loc);
+
+ TIntermTyped* lowOrder = intermediate.addIndex(EOpIndexDirect, unpackOp, zero, loc);
+
+ if (result != nullptr) {
+ result->getSequence().push_back(lowOrder);
+ node = result;
+ } else {
+ node = lowOrder;
+ }
+ }
+
+ break;
+ }
+
case EOpF32tof16:
{
- // Temporary until decomposition is available.
- error(loc, "unimplemented intrinsic: handle natively", "f32tof16", "");
+ // input floatN converted to 16 bit float in low order bits of each component of uintN
+ TIntermTyped* argValue = node->getAsUnaryNode()->getOperand();
+
+ TIntermTyped* zero = intermediate.addConstantUnion(0.0, EbtFloat, loc, true);
+ const int vecSize = argValue->getType().getVectorSize();
+
+ TOperator constructOp = EOpNull;
+ switch (vecSize) {
+ case 1: constructOp = EOpNull; break; // direct use, no construct needed
+ case 2: constructOp = EOpConstructUVec2; break;
+ case 3: constructOp = EOpConstructUVec3; break;
+ case 4: constructOp = EOpConstructUVec4; break;
+ default: assert(0); break;
+ }
+
+ // For scalar case, we don't need to construct another type.
+ TIntermAggregate* result = (vecSize > 1) ? new TIntermAggregate(constructOp) : nullptr;
+
+ if (result) {
+ result->setType(TType(EbtUint, EvqTemporary, vecSize));
+ result->setLoc(loc);
+ }
+
+ for (int idx = 0; idx < vecSize; ++idx) {
+ TIntermTyped* idxConst = intermediate.addConstantUnion(idx, loc, true);
+ TIntermTyped* component = argValue->getType().isVector() ?
+ intermediate.addIndex(EOpIndexDirect, argValue, idxConst, loc) : argValue;
+
+ if (component != argValue)
+ component->setType(TType(argValue->getBasicType(), EvqTemporary));
+
+ TIntermAggregate* vec2ComponentAndZero = new TIntermAggregate(EOpConstructVec2);
+ vec2ComponentAndZero->getSequence().push_back(component);
+ vec2ComponentAndZero->getSequence().push_back(zero);
+ vec2ComponentAndZero->setType(TType(EbtFloat, EvqTemporary, 2));
+ vec2ComponentAndZero->setLoc(loc);
+
+ TIntermTyped* packOp = new TIntermUnary(EOpPackHalf2x16);
+ packOp->getAsUnaryNode()->setOperand(vec2ComponentAndZero);
+ packOp->setLoc(loc);
+ packOp->setType(TType(EbtUint, EvqTemporary));
+
+ if (result != nullptr) {
+ result->getSequence().push_back(packOp);
+ node = result;
+ } else {
+ node = packOp;
+ }
+ }
+
break;
}
@@ -3204,9 +3721,7 @@ TIntermTyped* HlslParseContext::handleFunctionCall(const TSourceLoc& loc, TFunct
TIntermTyped* result = nullptr;
TOperator op = function->getBuiltInOp();
- if (op == EOpArrayLength)
- result = handleLengthMethod(loc, function, arguments);
- else if (op != EOpNull) {
+ if (op != EOpNull) {
//
// Then this should be a constructor.
// Don't go through the symbol table for constructors.
@@ -3225,9 +3740,30 @@ TIntermTyped* HlslParseContext::handleFunctionCall(const TSourceLoc& loc, TFunct
//
// Find it in the symbol table.
//
- const TFunction* fnCandidate;
- bool builtIn;
- fnCandidate = findFunction(loc, *function, builtIn, arguments);
+ const TFunction* fnCandidate = nullptr;
+ bool builtIn = false;
+
+ // TODO: this needs improvement: there's no way at present to look up a signature in
+ // the symbol table for an arbitrary type. This is a temporary hack until that ability exists.
+ // It will have false positives, since it doesn't check arg counts or types.
+ if (arguments && arguments->getAsAggregate()) {
+ if (isStructBufferType(arguments->getAsAggregate()->getSequence()[0]->getAsTyped()->getType())) {
+ static const int methodPrefixSize = sizeof(BUILTIN_PREFIX)-1;
+
+ if (function->getName().length() > methodPrefixSize &&
+ isStructBufferMethod(function->getName().substr(methodPrefixSize))) {
+ const TString mangle = function->getName() + "(";
+ TSymbol* symbol = symbolTable.find(mangle, &builtIn);
+
+ if (symbol)
+ fnCandidate = symbol->getAsFunction();
+ }
+ }
+ }
+
+ if (fnCandidate == nullptr)
+ fnCandidate = findFunction(loc, *function, builtIn, arguments);
+
if (fnCandidate) {
// This is a declared function that might map to
// - a built-in operator,
@@ -3273,9 +3809,10 @@ TIntermTyped* HlslParseContext::handleFunctionCall(const TSourceLoc& loc, TFunct
// output conversions.
const TIntermTyped* fnNode = result;
- decomposeIntrinsic(loc, result, arguments); // HLSL->AST intrinsic decompositions
- decomposeSampleMethods(loc, result, arguments); // HLSL->AST sample method decompositions
- decomposeGeometryMethods(loc, result, arguments); // HLSL->AST geometry method decompositions
+ decomposeStructBufferMethods(loc, result, arguments); // HLSL->AST struct buffer method decompositions
+ decomposeIntrinsic(loc, result, arguments); // HLSL->AST intrinsic decompositions
+ decomposeSampleMethods(loc, result, arguments); // HLSL->AST sample method decompositions
+ decomposeGeometryMethods(loc, result, arguments); // HLSL->AST geometry method decompositions
// Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore.
// Built-ins with a single argument aren't called with an aggregate, but they also don't have an output.
@@ -3301,41 +3838,6 @@ TIntermTyped* HlslParseContext::handleFunctionCall(const TSourceLoc& loc, TFunct
return result;
}
-// Finish processing object.length(). This started earlier in handleDotDereference(), where
-// the ".length" part was recognized and semantically checked, and finished here where the
-// function syntax "()" is recognized.
-//
-// Return resulting tree node.
-TIntermTyped* HlslParseContext::handleLengthMethod(const TSourceLoc& loc, TFunction* function, TIntermNode* intermNode)
-{
- int length = 0;
-
- if (function->getParamCount() > 0)
- error(loc, "method does not accept any arguments", function->getName().c_str(), "");
- else {
- const TType& type = intermNode->getAsTyped()->getType();
- if (type.isArray()) {
- if (type.isRuntimeSizedArray()) {
- // Create a unary op and let the back end handle it
- return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
- } else
- length = type.getOuterArraySize();
- } else if (type.isMatrix())
- length = type.getMatrixCols();
- else if (type.isVector())
- length = type.getVectorSize();
- else {
- // we should not get here, because earlier semantic checking should have prevented this path
- error(loc, ".length()", "unexpected use of .length()", "");
- }
- }
-
- if (length == 0)
- length = 1;
-
- return intermediate.addConstantUnion(length, loc);
-}
-
//
// Add any needed implicit conversions for function-call arguments to input parameters.
//
@@ -3659,118 +4161,24 @@ TFunction* HlslParseContext::handleConstructorCall(const TSourceLoc& loc, const
// Handle seeing a "COLON semantic" at the end of a type declaration,
// by updating the type according to the semantic.
//
-void HlslParseContext::handleSemantic(TSourceLoc loc, TQualifier& qualifier, const TString& semantic)
-{
- // TODO: need to know if it's an input or an output
- // The following sketches what needs to be done, but can't be right
- // without taking into account stage and input/output.
-
- TString semanticUpperCase = semantic;
- std::transform(semanticUpperCase.begin(), semanticUpperCase.end(), semanticUpperCase.begin(), ::toupper);
- // in DX9, all outputs had to have a semantic associated with them, that was either consumed
- // by the system or was a specific register assignment
- // in DX10+, only semantics with the SV_ prefix have any meaning beyond decoration
- // Fxc will only accept DX9 style semantics in compat mode
- // Also, in DX10 if a SV value is present as the input of a stage, but isn't appropriate for that
- // stage, it would just be ignored as it is likely there as part of an output struct from one stage
- // to the next
-
- bool bParseDX9 = false;
- if (bParseDX9) {
- if (semanticUpperCase == "PSIZE")
- qualifier.builtIn = EbvPointSize;
- else if (semantic == "FOG")
- qualifier.builtIn = EbvFogFragCoord;
- else if (semanticUpperCase == "DEPTH")
- qualifier.builtIn = EbvFragDepth;
- else if (semanticUpperCase == "VFACE")
- qualifier.builtIn = EbvFace;
- else if (semanticUpperCase == "VPOS")
- qualifier.builtIn = EbvFragCoord;
- }
-
- // SV Position has a different meaning in vertex vs fragment
- if (semanticUpperCase == "SV_POSITION" && language != EShLangFragment)
- qualifier.builtIn = EbvPosition;
- else if (semanticUpperCase == "SV_POSITION" && language == EShLangFragment)
- qualifier.builtIn = EbvFragCoord;
- else if (semanticUpperCase == "SV_CLIPDISTANCE")
- qualifier.builtIn = EbvClipDistance;
- else if (semanticUpperCase == "SV_CULLDISTANCE")
- qualifier.builtIn = EbvCullDistance;
- else if (semanticUpperCase == "SV_VERTEXID")
- qualifier.builtIn = EbvVertexIndex;
- else if (semanticUpperCase == "SV_VIEWPORTARRAYINDEX")
- qualifier.builtIn = EbvViewportIndex;
- else if (semanticUpperCase == "SV_TESSFACTOR")
- qualifier.builtIn = EbvTessLevelOuter;
-
- // Targets are defined 0-7
- else if (semanticUpperCase == "SV_TARGET") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 0;
- } else if (semanticUpperCase == "SV_TARGET0") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 0;
- } else if (semanticUpperCase == "SV_TARGET1") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 1;
- } else if (semanticUpperCase == "SV_TARGET2") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 2;
- } else if (semanticUpperCase == "SV_TARGET3") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 3;
- } else if (semanticUpperCase == "SV_TARGET4") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 4;
- } else if (semanticUpperCase == "SV_TARGET5") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 5;
- } else if (semanticUpperCase == "SV_TARGET6") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 6;
- } else if (semanticUpperCase == "SV_TARGET7") {
- qualifier.builtIn = EbvNone;
- // qualifier.layoutLocation = 7;
- } else if (semanticUpperCase == "SV_SAMPLEINDEX")
- qualifier.builtIn = EbvSampleId;
- else if (semanticUpperCase == "SV_RENDERTARGETARRAYINDEX")
- qualifier.builtIn = EbvLayer;
- else if (semanticUpperCase == "SV_PRIMITIVEID")
- qualifier.builtIn = EbvPrimitiveId;
- else if (semanticUpperCase == "SV_OUTPUTCONTROLPOINTID")
- qualifier.builtIn = EbvInvocationId;
- else if (semanticUpperCase == "SV_ISFRONTFACE")
- qualifier.builtIn = EbvFace;
- else if (semanticUpperCase == "SV_INSTANCEID")
- qualifier.builtIn = EbvInstanceIndex;
- else if (semanticUpperCase == "SV_INSIDETESSFACTOR")
- qualifier.builtIn = EbvTessLevelInner;
- else if (semanticUpperCase == "SV_GSINSTANCEID")
- qualifier.builtIn = EbvInvocationId;
- else if (semanticUpperCase == "SV_DISPATCHTHREADID")
- qualifier.builtIn = EbvGlobalInvocationId;
- else if (semanticUpperCase == "SV_GROUPTHREADID")
- qualifier.builtIn = EbvLocalInvocationId;
- else if (semanticUpperCase == "SV_GROUPID")
- qualifier.builtIn = EbvWorkGroupId;
- else if (semanticUpperCase == "SV_DOMAINLOCATION")
- qualifier.builtIn = EbvTessCoord;
- else if (semanticUpperCase == "SV_DEPTH")
- qualifier.builtIn = EbvFragDepth;
- else if( semanticUpperCase == "SV_COVERAGE")
- qualifier.builtIn = EbvSampleMask;
-
- // TODO, these need to get refined to be more specific
- else if( semanticUpperCase == "SV_DEPTHGREATEREQUAL")
- qualifier.builtIn = EbvFragDepthGreater;
- else if( semanticUpperCase == "SV_DEPTHLESSEQUAL")
- qualifier.builtIn = EbvFragDepthLesser;
- else if( semanticUpperCase == "SV_STENCILREF")
+void HlslParseContext::handleSemantic(TSourceLoc loc, TQualifier& qualifier, TBuiltInVariable builtIn, const TString& upperCase)
+{
+ // adjust for stage in/out
+
+ switch(builtIn) {
+ case EbvPosition:
+ if (language == EShLangFragment)
+ builtIn = EbvFragCoord;
+ break;
+ case EbvStencilRef:
error(loc, "unimplemented; need ARB_shader_stencil_export", "SV_STENCILREF", "");
- else if( semanticUpperCase == "SV_GROUPINDEX")
- error(loc, "unimplemented", "SV_GROUPINDEX", "");
+ break;
+ default:
+ break;
+ }
+
+ qualifier.builtIn = builtIn;
+ qualifier.semanticName = intermediate.addSemanticName(upperCase);
}
//
@@ -4369,7 +4777,7 @@ void HlslParseContext::declareArray(const TSourceLoc& loc, TString& identifier,
symbol = new TVariable(&identifier, type);
symbolTable.insert(*symbol);
if (track && symbolTable.atGlobalLevel())
- trackLinkageDeferred(*symbol);
+ trackLinkage(*symbol);
return;
}
@@ -4600,7 +5008,96 @@ void HlslParseContext::redeclareBuiltinBlock(const TSourceLoc& loc, TTypeList& n
symbolTable.insert(*block);
// Save it in the AST for linker use.
- trackLinkageDeferred(*block);
+ trackLinkage(*block);
+}
+
+//
+// Generate index to the array element in a structure buffer (SSBO)
+//
+TIntermTyped* HlslParseContext::indexStructBufferContent(const TSourceLoc& loc, TIntermTyped* buffer) const
+{
+ // Bail out if not a struct buffer
+ if (buffer == nullptr || ! isStructBufferType(buffer->getType()))
+ return nullptr;
+
+ // Runtime sized array is always the last element.
+ const TTypeList* bufferStruct = buffer->getType().getStruct();
+ TIntermTyped* arrayPosition = intermediate.addConstantUnion(unsigned(bufferStruct->size()-1), loc);
+
+ TIntermTyped* argArray = intermediate.addIndex(EOpIndexDirectStruct, buffer, arrayPosition, loc);
+ argArray->setType(*(*bufferStruct)[bufferStruct->size()-1].type);
+
+ return argArray;
+}
+
+//
+// IFF type is a structuredbuffer/byteaddressbuffer type, return the content
+// (template) type. E.g, StructuredBuffer<MyType> -> MyType. Else return nullptr.
+//
+TType* HlslParseContext::getStructBufferContentType(const TType& type) const
+{
+ if (type.getBasicType() != EbtBlock)
+ return nullptr;
+
+ const int memberCount = (int)type.getStruct()->size();
+ assert(memberCount > 0);
+
+ TType* contentType = (*type.getStruct())[memberCount-1].type;
+
+ return contentType->isRuntimeSizedArray() ? contentType : nullptr;
+}
+
+//
+// If an existing struct buffer has a sharable type, then share it.
+//
+void HlslParseContext::shareStructBufferType(TType& type)
+{
+ // PackOffset must be equivalent to share types on a per-member basis.
+ // Note: cannot use auto type due to recursion. Thus, this is a std::function.
+ const std::function<bool(TType& lhs, TType& rhs)>
+ compareQualifiers = [&](TType& lhs, TType& rhs) -> bool {
+ if (lhs.getQualifier().layoutOffset != rhs.getQualifier().layoutOffset)
+ return false;
+
+ if (lhs.isStruct() != rhs.isStruct())
+ return false;
+
+ if (lhs.isStruct() && rhs.isStruct()) {
+ if (lhs.getStruct()->size() != rhs.getStruct()->size())
+ return false;
+
+ for (int i = 0; i < int(lhs.getStruct()->size()); ++i)
+ if (!compareQualifiers(*(*lhs.getStruct())[i].type, *(*rhs.getStruct())[i].type))
+ return false;
+ }
+
+ return true;
+ };
+
+ // We need to compare certain qualifiers in addition to the type.
+ const auto typeEqual = [compareQualifiers](TType& lhs, TType& rhs) -> bool {
+ if (lhs.getQualifier().readonly != rhs.getQualifier().readonly)
+ return false;
+
+ // If both are structures, recursively look for packOffset equality
+ // as well as type equality.
+ return compareQualifiers(lhs, rhs) && lhs == rhs;
+ };
+
+ // This is an exhaustive O(N) search, but real world shaders have
+ // only a small number of these.
+ for (int idx = 0; idx < int(structBufferTypes.size()); ++idx) {
+ // If the deep structure matches, modulo qualifiers, use it
+ if (typeEqual(*structBufferTypes[idx], type)) {
+ type.shallowCopy(*structBufferTypes[idx]);
+ return;
+ }
+ }
+
+ // Otherwise, remember it:
+ TType* typeCopy = new TType;
+ typeCopy->shallowCopy(type);
+ structBufferTypes.push_back(typeCopy);
}
void HlslParseContext::paramFix(TType& type)
@@ -4613,6 +5110,18 @@ void HlslParseContext::paramFix(TType& type)
case EvqTemporary:
type.getQualifier().storage = EvqIn;
break;
+ case EvqBuffer:
+ {
+ // SSBO parameter. These do not go through the declareBlock path since they are fn parameters.
+ correctUniform(type.getQualifier());
+ TQualifier bufferQualifier = globalBufferDefaults;
+ mergeObjectLayoutQualifiers(bufferQualifier, type.getQualifier(), true);
+ bufferQualifier.storage = type.getQualifier().storage;
+ bufferQualifier.readonly = type.getQualifier().readonly;
+ bufferQualifier.coherent = type.getQualifier().coherent;
+ type.getQualifier() = bufferQualifier;
+ break;
+ }
default:
break;
}
@@ -5337,51 +5846,124 @@ const TFunction* HlslParseContext::findFunction(const TSourceLoc& loc, TFunction
// 'parseType' is the type part of the declaration (to the left)
// 'arraySizes' is the arrayness tagged on the identifier (to the right)
//
-void HlslParseContext::declareTypedef(const TSourceLoc& loc, TString& identifier, const TType& parseType, TArraySizes* /*arraySizes*/)
+void HlslParseContext::declareTypedef(const TSourceLoc& loc, TString& identifier, const TType& parseType)
{
- TType type;
- type.deepCopy(parseType);
-
- TVariable* typeSymbol = new TVariable(&identifier, type, true);
+ TVariable* typeSymbol = new TVariable(&identifier, parseType, true);
if (! symbolTable.insert(*typeSymbol))
error(loc, "name already defined", "typedef", identifier.c_str());
}
-// Type sanitization: return existing sanitized (temporary) type if there is one, else make new one.
-TType* HlslParseContext::sanitizeType(TType* type)
+// Do everything necessary to handle a struct declaration, including
+// making IO aliases because HLSL allows mixed IO in a struct that specializes
+// based on the usage (input, output, uniform, none).
+void HlslParseContext::declareStruct(const TSourceLoc& loc, TString& structName, TType& type)
{
- // We only do this for structs.
- if (!type->isStruct())
- return type;
+ // If it was named, which means the type can be reused later, add
+ // it to the symbol table. (Unless it's a block, in which
+ // case the name is not a type.)
+ if (type.getBasicType() == EbtBlock || structName.size() == 0)
+ return;
+
+ TVariable* userTypeDef = new TVariable(&structName, type, true);
+ if (! symbolTable.insert(*userTypeDef)) {
+ error(loc, "redefinition", structName.c_str(), "struct");
+ return;
+ }
- // Type sanitization: if this is declaring a variable of a type that contains
- // interstage IO, we want to make it a temporary.
- const auto sanitizedTypeIter = sanitizedTypeMap.find(type->getStruct());
+ // See if we need IO aliases for the structure typeList
- if (sanitizedTypeIter != sanitizedTypeMap.end()) {
- // We've sanitized this before. Use that one.
- TType* sanitizedType = new TType();
- sanitizedType->shallowCopy(*sanitizedTypeIter->second);
+ const auto condAlloc = [](bool pred, TTypeList*& list) {
+ if (pred && list == nullptr)
+ list = new TTypeList;
+ };
- // Arrayness is not part of the sanitized type. Use the input type's arrayness.
- if (type->isArray())
- sanitizedType->newArraySizes(type->getArraySizes());
- else
- sanitizedType->clearArraySizes();
- return sanitizedType;
- } else {
- if (type->containsBuiltInInterstageIO(language)) {
- // This means the type contains interstage IO, but we've never encountered it before.
- // Copy it, sanitize it, and remember it in the sanitizedTypeMap
- TType* sanitizedType = type->clone();
- sanitizedType->makeTemporary();
- sanitizedTypeMap[type->getStruct()] = sanitizedType;
- return sanitizedType;
- } else {
- // This means the type has no interstage IO, so we can use it as is.
- return type;
+ tIoKinds newLists = { nullptr, nullptr, nullptr }; // allocate for each kind found
+ for (auto member = type.getStruct()->begin(); member != type.getStruct()->end(); ++member) {
+ condAlloc(hasUniform(member->type->getQualifier()), newLists.uniform);
+ condAlloc( hasInput(member->type->getQualifier()), newLists.input);
+ condAlloc( hasOutput(member->type->getQualifier()), newLists.output);
+
+ if (member->type->isStruct()) {
+ auto it = ioTypeMap.find(member->type->getStruct());
+ if (it != ioTypeMap.end()) {
+ condAlloc(it->second.uniform != nullptr, newLists.uniform);
+ condAlloc(it->second.input != nullptr, newLists.input);
+ condAlloc(it->second.output != nullptr, newLists.output);
+ }
+ }
+ }
+ if (newLists.uniform == nullptr &&
+ newLists.input == nullptr &&
+ newLists.output == nullptr) {
+ // Won't do any IO caching, clear up the type and get out now.
+ for (auto member = type.getStruct()->begin(); member != type.getStruct()->end(); ++member)
+ clearUniformInputOutput(member->type->getQualifier());
+ return;
+ }
+
+ // We have IO involved.
+
+ // Make a pure typeList for the symbol table, and cache side copies of IO versions.
+ for (auto member = type.getStruct()->begin(); member != type.getStruct()->end(); ++member) {
+ const auto inheritStruct = [&](TTypeList* s, TTypeLoc& ioMember) {
+ if (s != nullptr) {
+ ioMember.type = new TType;
+ ioMember.type->shallowCopy(*member->type);
+ ioMember.type->setStruct(s);
+ }
+ };
+ const auto newMember = [&](TTypeLoc& m) {
+ if (m.type == nullptr) {
+ m.type = new TType;
+ m.type->shallowCopy(*member->type);
+ }
+ };
+
+ TTypeLoc newUniformMember = { nullptr, member->loc };
+ TTypeLoc newInputMember = { nullptr, member->loc };
+ TTypeLoc newOutputMember = { nullptr, member->loc };
+ if (member->type->isStruct()) {
+ // swap in an IO child if there is one
+ auto it = ioTypeMap.find(member->type->getStruct());
+ if (it != ioTypeMap.end()) {
+ inheritStruct(it->second.uniform, newUniformMember);
+ inheritStruct(it->second.input, newInputMember);
+ inheritStruct(it->second.output, newOutputMember);
+ }
+ }
+ if (newLists.uniform) {
+ newMember(newUniformMember);
+ correctUniform(newUniformMember.type->getQualifier());
+ newLists.uniform->push_back(newUniformMember);
+ }
+ if (newLists.input) {
+ newMember(newInputMember);
+ correctInput(newInputMember.type->getQualifier());
+ newLists.input->push_back(newInputMember);
}
+ if (newLists.output) {
+ newMember(newOutputMember);
+ correctOutput(newOutputMember.type->getQualifier());
+ newLists.output->push_back(newOutputMember);
+ }
+
+ // make original pure
+ clearUniformInputOutput(member->type->getQualifier());
}
+ ioTypeMap[type.getStruct()] = newLists;
+}
+
+// Lookup a user-type by name.
+// If found, fill in the type and return the defining symbol.
+// If not found, return nullptr.
+TSymbol* HlslParseContext::lookupUserType(const TString& typeName, TType& type)
+{
+ TSymbol* symbol = symbolTable.find(typeName);
+ if (symbol && symbol->getAsVariable() && symbol->getAsVariable()->isUserType()) {
+ type.shallowCopy(symbol->getType());
+ return symbol;
+ } else
+ return nullptr;
}
//
@@ -5408,21 +5990,36 @@ TIntermNode* HlslParseContext::declareVariable(const TSourceLoc& loc, TString& i
inheritGlobalDefaults(type.getQualifier());
- const bool flattenVar = shouldFlatten(type);
- const bool splitVar = shouldSplit(type);
+ const bool flattenVar = shouldFlattenUniform(type);
- // Type sanitization: if this is declaring a variable of a type that contains
- // interstage IO, we want to make it a temporary.
- TType* sanitizedType = sanitizeType(&type);
+ // correct IO in the type
+ switch (type.getQualifier().storage) {
+ case EvqGlobal:
+ case EvqTemporary:
+ clearUniformInputOutput(type.getQualifier());
+ break;
+ case EvqUniform:
+ case EvqBuffer:
+ correctUniform(type.getQualifier());
+ if (type.isStruct()) {
+ auto it = ioTypeMap.find(type.getStruct());
+ if (it != ioTypeMap.end())
+ type.setStruct(it->second.uniform);
+ }
+
+ break;
+ default:
+ break;
+ }
// Declare the variable
if (type.isArray()) {
// array case
- declareArray(loc, identifier, *sanitizedType, symbol, !flattenVar);
+ declareArray(loc, identifier, type, symbol, !flattenVar);
} else {
// non-array case
if (! symbol)
- symbol = declareNonArray(loc, identifier, *sanitizedType, !flattenVar);
+ symbol = declareNonArray(loc, identifier, type, !flattenVar);
else if (type != symbol->getType())
error(loc, "cannot change the type of", "redeclaration", symbol->getName().c_str());
}
@@ -5430,9 +6027,6 @@ TIntermNode* HlslParseContext::declareVariable(const TSourceLoc& loc, TString& i
if (flattenVar)
flatten(loc, *symbol->getAsVariable());
- if (splitVar)
- split(*symbol->getAsVariable());
-
if (! symbol)
return nullptr;
@@ -5493,7 +6087,7 @@ TVariable* HlslParseContext::declareNonArray(const TSourceLoc& loc, TString& ide
// add variable to symbol table
if (symbolTable.insert(*variable)) {
if (track && symbolTable.atGlobalLevel())
- trackLinkageDeferred(*variable);
+ trackLinkage(*variable);
return variable;
}
@@ -5953,6 +6547,22 @@ void HlslParseContext::declareBlock(const TSourceLoc& loc, TType& type, const TS
{
assert(type.getWritableStruct() != nullptr);
+ // Clean up top-level decorations that don't belong.
+ switch (type.getQualifier().storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ correctUniform(type.getQualifier());
+ break;
+ case EvqVaryingIn:
+ correctInput(type.getQualifier());
+ break;
+ case EvqVaryingOut:
+ correctOutput(type.getQualifier());
+ break;
+ default:
+ break;
+ }
+
TTypeList& typeList = *type.getWritableStruct();
// fix and check for member storage qualifiers and types that don't belong within a block
for (unsigned int member = 0; member < typeList.size(); ++member) {
@@ -5961,6 +6571,31 @@ void HlslParseContext::declareBlock(const TSourceLoc& loc, TType& type, const TS
const TSourceLoc& memberLoc = typeList[member].loc;
globalQualifierFix(memberLoc, memberQualifier);
memberQualifier.storage = type.getQualifier().storage;
+
+ if (memberType.isStruct()) {
+ // clean up and pick up the right set of decorations
+ auto it = ioTypeMap.find(memberType.getStruct());
+ switch (type.getQualifier().storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ correctUniform(type.getQualifier());
+ if (it != ioTypeMap.end() && it->second.uniform)
+ type.setStruct(it->second.uniform);
+ break;
+ case EvqVaryingIn:
+ correctInput(type.getQualifier());
+ if (it != ioTypeMap.end() && it->second.input)
+ type.setStruct(it->second.input);
+ break;
+ case EvqVaryingOut:
+ correctOutput(type.getQualifier());
+ if (it != ioTypeMap.end() && it->second.output)
+ type.setStruct(it->second.output);
+ break;
+ default:
+ break;
+ }
+ }
}
// This might be a redeclaration of a built-in block. If so, redeclareBuiltinBlock() will
@@ -6009,8 +6644,6 @@ void HlslParseContext::declareBlock(const TSourceLoc& loc, TType& type, const TS
error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
}
- if (memberQualifier.hasPacking())
- error(memberLoc, "member of block cannot have a packing layout qualifier", typeList[member].type->getFieldName().c_str(), "");
if (memberQualifier.hasLocation()) {
switch (type.getQualifier().storage) {
case EvqVaryingIn:
@@ -6022,10 +6655,6 @@ void HlslParseContext::declareBlock(const TSourceLoc& loc, TType& type, const TS
}
} else
memberWithoutLocation = true;
- if (memberQualifier.hasAlign()) {
- if (defaultQualification.layoutPacking != ElpStd140 && defaultQualification.layoutPacking != ElpStd430)
- error(memberLoc, "can only be used with std140 or std430 layout packing", "align", "");
- }
TQualifier newMemberQualification = defaultQualification;
mergeQualifiers(newMemberQualification, memberQualifier);
@@ -6068,14 +6697,7 @@ void HlslParseContext::declareBlock(const TSourceLoc& loc, TType& type, const TS
}
// Save it in the AST for linker use.
- trackLinkageDeferred(variable);
-}
-
-void HlslParseContext::finalizeGlobalUniformBlockLayout(TVariable& block)
-{
- block.getWritableType().getQualifier().layoutPacking = ElpStd140;
- block.getWritableType().getQualifier().layoutMatrix = ElmRowMajor;
- fixBlockUniformOffsets(block.getType().getQualifier(), *block.getWritableType().getWritableStruct());
+ trackLinkage(variable);
}
//
@@ -6473,6 +7095,60 @@ TIntermNode* HlslParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* ex
return switchNode;
}
+// Make a new symbol-table level that is made out of the members of a structure.
+// This should be done as an anonymous struct (name is "") so that the symbol table
+// finds the members with on explicit reference to a 'this' variable.
+void HlslParseContext::pushThisScope(const TType& thisStruct)
+{
+ TVariable& thisVariable = *new TVariable(NewPoolTString(""), thisStruct);
+ symbolTable.pushThis(thisVariable);
+}
+
+// Track levels of class/struct/namespace nesting with a prefix string using
+// the type names separated by the scoping operator. E.g., two levels
+// would look like:
+//
+// outer::inner
+//
+// The string is empty when at normal global level.
+//
+void HlslParseContext::pushNamespace(const TString& typeName)
+{
+ // make new type prefix
+ TString newPrefix;
+ if (currentTypePrefix.size() > 0) {
+ newPrefix = currentTypePrefix.back();
+ newPrefix.append(scopeMangler);
+ }
+ newPrefix.append(typeName);
+ currentTypePrefix.push_back(newPrefix);
+}
+
+// Opposite of pushNamespace(), see above
+void HlslParseContext::popNamespace()
+{
+ currentTypePrefix.pop_back();
+}
+
+// Use the class/struct nesting string to create a global name for
+// a member of a class/struct.
+TString* HlslParseContext::getFullNamespaceName(const TString& localName) const
+{
+ TString* name = NewPoolTString("");
+ if (currentTypePrefix.size() > 0)
+ name->append(currentTypePrefix.back());
+ name->append(scopeMangler);
+ name->append(localName);
+
+ return name;
+}
+
+// Helper function to add the namespace scope mangling syntax to a string.
+void HlslParseContext::addScopeMangler(TString& name)
+{
+ name.append(scopeMangler);
+}
+
// Potentially rename shader entry point function
void HlslParseContext::renameShaderFunction(TString*& name) const
{
@@ -6482,9 +7158,477 @@ void HlslParseContext::renameShaderFunction(TString*& name) const
name = NewPoolTString(intermediate.getEntryPointName().c_str());
}
+// Return true if this has uniform-interface like decorations.
+bool HlslParseContext::hasUniform(const TQualifier& qualifier) const
+{
+ return qualifier.hasUniformLayout() ||
+ qualifier.layoutPushConstant;
+}
+
+// Potentially not the opposite of hasUniform(), as if some characteristic is
+// ever used for more than one thing (e.g., uniform or input), hasUniform() should
+// say it exists, but clearUniform() should leave it in place.
+void HlslParseContext::clearUniform(TQualifier& qualifier)
+{
+ qualifier.clearUniformLayout();
+ qualifier.layoutPushConstant = false;
+}
+
+// Return false if builtIn by itself doesn't force this qualifier to be an input qualifier.
+bool HlslParseContext::isInputBuiltIn(const TQualifier& qualifier) const
+{
+ switch (qualifier.builtIn) {
+ case EbvPosition:
+ case EbvPointSize:
+ return language != EShLangVertex && language != EShLangCompute && language != EShLangFragment;
+ case EbvClipDistance:
+ case EbvCullDistance:
+ return language != EShLangVertex && language != EShLangCompute;
+ case EbvFragCoord:
+ case EbvFace:
+ case EbvHelperInvocation:
+ case EbvLayer:
+ case EbvPointCoord:
+ case EbvSampleId:
+ case EbvSampleMask:
+ case EbvSamplePosition:
+ case EbvViewportIndex:
+ return language == EShLangFragment;
+ case EbvGlobalInvocationId:
+ case EbvLocalInvocationIndex:
+ case EbvLocalInvocationId:
+ case EbvNumWorkGroups:
+ case EbvWorkGroupId:
+ case EbvWorkGroupSize:
+ return language == EShLangCompute;
+ case EbvInvocationId:
+ return language == EShLangTessControl || language == EShLangTessEvaluation || language == EShLangGeometry;
+ case EbvPatchVertices:
+ return language == EShLangTessControl || language == EShLangTessEvaluation;
+ case EbvInstanceId:
+ case EbvInstanceIndex:
+ case EbvVertexId:
+ case EbvVertexIndex:
+ return language == EShLangVertex;
+ case EbvPrimitiveId:
+ return language == EShLangGeometry || language == EShLangFragment;
+ case EbvTessLevelInner:
+ case EbvTessLevelOuter:
+ return language == EShLangTessEvaluation;
+ default:
+ return false;
+ }
+}
+
+// Return true if there are decorations to preserve for input-like storage.
+bool HlslParseContext::hasInput(const TQualifier& qualifier) const
+{
+ if (qualifier.hasAnyLocation())
+ return true;
+
+ if (language == EShLangFragment && (qualifier.isInterpolation() || qualifier.centroid || qualifier.sample))
+ return true;
+
+ if (language == EShLangTessEvaluation && qualifier.patch)
+ return true;
+
+ if (isInputBuiltIn(qualifier))
+ return true;
+
+ return false;
+}
+
+// Return false if builtIn by itself doesn't force this qualifier to be an output qualifier.
+bool HlslParseContext::isOutputBuiltIn(const TQualifier& qualifier) const
+{
+ switch (qualifier.builtIn) {
+ case EbvPosition:
+ case EbvPointSize:
+ case EbvClipVertex:
+ case EbvClipDistance:
+ case EbvCullDistance:
+ return language != EShLangFragment && language != EShLangCompute;
+ case EbvFragDepth:
+ case EbvFragDepthGreater:
+ case EbvFragDepthLesser:
+ case EbvSampleMask:
+ return language == EShLangFragment;
+ case EbvLayer:
+ case EbvViewportIndex:
+ return language == EShLangGeometry;
+ case EbvPrimitiveId:
+ return language == EShLangGeometry || language == EShLangTessControl || language == EShLangTessEvaluation;
+ case EbvTessLevelInner:
+ case EbvTessLevelOuter:
+ return language == EShLangTessControl;
+ default:
+ return false;
+ }
+}
+
+// Return true if there are decorations to preserve for output-like storage.
+bool HlslParseContext::hasOutput(const TQualifier& qualifier) const
+{
+ if (qualifier.hasAnyLocation())
+ return true;
+
+ if (language != EShLangFragment && language != EShLangCompute && qualifier.hasXfb())
+ return true;
+
+ if (language == EShLangTessControl && qualifier.patch)
+ return true;
+
+ if (language == EShLangGeometry && qualifier.hasStream())
+ return true;
+
+ if (isOutputBuiltIn(qualifier))
+ return true;
+
+ return false;
+}
+
+// Make the IO decorations etc. be appropriate only for an input interface.
+void HlslParseContext::correctInput(TQualifier& qualifier)
+{
+ clearUniform(qualifier);
+ if (language == EShLangVertex)
+ qualifier.clearInterstage();
+ if (language != EShLangTessEvaluation)
+ qualifier.patch = false;
+ if (language != EShLangFragment) {
+ qualifier.clearInterpolation();
+ qualifier.sample = false;
+ }
+
+ qualifier.clearStreamLayout();
+ qualifier.clearXfbLayout();
+
+ if (! isInputBuiltIn(qualifier))
+ qualifier.builtIn = EbvNone;
+}
+
+// Make the IO decorations etc. be appropriate only for an output interface.
+void HlslParseContext::correctOutput(TQualifier& qualifier)
+{
+ clearUniform(qualifier);
+ if (language == EShLangFragment)
+ qualifier.clearInterstage();
+ if (language != EShLangGeometry)
+ qualifier.clearStreamLayout();
+ if (language == EShLangFragment)
+ qualifier.clearXfbLayout();
+ if (language != EShLangTessControl)
+ qualifier.patch = false;
+
+ switch (qualifier.builtIn) {
+ case EbvFragDepthGreater:
+ intermediate.setDepth(EldGreater);
+ qualifier.builtIn = EbvFragDepth;
+ break;
+ case EbvFragDepthLesser:
+ intermediate.setDepth(EldLess);
+ qualifier.builtIn = EbvFragDepth;
+ break;
+ default:
+ break;
+ }
+
+ if (! isOutputBuiltIn(qualifier))
+ qualifier.builtIn = EbvNone;
+}
+
+// Make the IO decorations etc. be appropriate only for uniform type interfaces.
+void HlslParseContext::correctUniform(TQualifier& qualifier)
+{
+ qualifier.builtIn = EbvNone;
+ qualifier.clearInterstage();
+ qualifier.clearInterstageLayout();
+}
+
+// Clear out all IO/Uniform stuff, so this has nothing to do with being an IO interface.
+void HlslParseContext::clearUniformInputOutput(TQualifier& qualifier)
+{
+ clearUniform(qualifier);
+ correctUniform(qualifier);
+}
+
+// Add patch constant function invocation
+void HlslParseContext::addPatchConstantInvocation()
+{
+ TSourceLoc loc;
+ loc.init();
+
+ // If there's no patch constant function, or we're not a HS, do nothing.
+ if (patchConstantFunctionName.empty() || language != EShLangTessControl)
+ return;
+
+ if (symbolTable.isFunctionNameVariable(patchConstantFunctionName)) {
+ error(loc, "can't use variable in patch constant function", patchConstantFunctionName.c_str(), "");
+ return;
+ }
+
+ const TString mangledName = patchConstantFunctionName + "(";
+
+ // create list of PCF candidates
+ TVector<const TFunction*> candidateList;
+ bool builtIn;
+ symbolTable.findFunctionNameList(mangledName, candidateList, builtIn);
+
+ // We have to have one and only one, or we don't know which to pick: the patchconstantfunc does not
+ // allow any disambiguation of overloads.
+ if (candidateList.empty()) {
+ error(loc, "patch constant function not found", patchConstantFunctionName.c_str(), "");
+ return;
+ }
+
+ // Based on directed experiments, it appears that if there are overloaded patchconstantfunctions,
+ // HLSL picks the last one in shader source order. Since that isn't yet implemented here, error
+ // out if there is more than one candidate.
+ if (candidateList.size() > 1) {
+ error(loc, "ambiguous patch constant function", patchConstantFunctionName.c_str(), "");
+ return;
+ }
+
+ // Look for builtin variables in a function's parameter list.
+ const auto findBuiltIns = [&](const TFunction& function, std::set<tInterstageIoData>& builtIns) {
+ for (int p=0; p<function.getParamCount(); ++p) {
+ const TStorageQualifier storage = function[p].type->getQualifier().storage;
+
+ if (function[p].declaredBuiltIn != EbvNone)
+ builtIns.insert(HlslParseContext::tInterstageIoData(function[p].declaredBuiltIn, storage));
+ else
+ builtIns.insert(HlslParseContext::tInterstageIoData(function[p].type->getQualifier().builtIn, storage));
+ }
+ };
+
+
+ // If we synthesize a builtin interface variable, we must add it to the linkage.
+ const auto addToLinkage = [&](const TType& type, const TString* name, TIntermSymbol** symbolNode) {
+ if (name == nullptr) {
+ error(loc, "unable to locate patch function parameter name", "", "");
+ return;
+ } else {
+ TVariable& variable = *new TVariable(name, type);
+ if (! symbolTable.insert(variable)) {
+ error(loc, "unable to declare patch constant function interface variable", name->c_str(), "");
+ return;
+ }
+
+ globalQualifierFix(loc, variable.getWritableType().getQualifier());
+
+ if (symbolNode != nullptr)
+ *symbolNode = intermediate.addSymbol(variable);
+
+ trackLinkage(variable);
+ }
+ };
+
+ // Return a symbol for the linkage variable of the given TBuiltInVariable type
+ const auto findLinkageSymbol = [this](TBuiltInVariable biType) -> TIntermSymbol* {
+ const auto it = builtInLinkageSymbols.find(biType);
+ if (it == builtInLinkageSymbols.end()) // if it wasn't declared by the user, return nullptr
+ return nullptr;
+
+ return intermediate.addSymbol(*it->second->getAsVariable());
+ };
+
+ // We will perform these steps. Each is in a scoped block for separation: they could
+ // become separate functions to make addPatchConstantInvocation shorter.
+ //
+ // 1. Union the interfaces, and create builtins for anything present in the PCF and
+ // declared as a builtin variable that isn't present in the entry point's signature.
+ //
+ // 2. Synthesizes a call to the patchconstfunction using builtin variables from either main,
+ // or the ones we created. Matching is based on builtin type. We may use synthesized
+ // variables from (1) above.
+ //
+ // 3. Create a return sequence: copy the return value (if any) from the PCF to a
+ // (non-sanitized) output variable. In case this may involve multiple copies, such as for
+ // an arrayed variable, a temporary copy of the PCF output is created to avoid multiple
+ // indirections into a complex R-value coming from the call to the PCF.
+ //
+ // 4. Add a barrier to the end of the entry point body
+ //
+ // 5. Call the PCF inside an if test for (invocation id == 0).
+
+ TFunction& patchConstantFunction = const_cast<TFunction&>(*candidateList[0]);
+ const int pcfParamCount = patchConstantFunction.getParamCount();
+ TIntermSymbol* invocationIdSym = findLinkageSymbol(EbvInvocationId);
+ TIntermSequence& epBodySeq = entryPointFunctionBody->getAsAggregate()->getSequence();
+
+ // ================ Step 1A: Union Interfaces ================
+ // Our patch constant function.
+ {
+ std::set<tInterstageIoData> pcfBuiltIns; // patch constant function builtins
+ std::set<tInterstageIoData> epfBuiltIns; // entry point function builtins
+
+ assert(entryPointFunction);
+ assert(entryPointFunctionBody);
+
+ findBuiltIns(patchConstantFunction, pcfBuiltIns);
+ findBuiltIns(*entryPointFunction, epfBuiltIns);
+
+ // Patchconstantfunction can contain only builtin qualified variables. (Technically, only HS inputs,
+ // but this test is less assertive than that).
+
+ for (auto bi = pcfBuiltIns.begin(); bi != pcfBuiltIns.end(); ++bi) {
+ if (bi->builtIn == EbvNone) {
+ error(loc, "patch constant function invalid parameter", "", "");
+ return;
+ }
+ }
+
+ // Find the set of builtins in the PCF that are not present in the entry point.
+ std::set<tInterstageIoData> notInEntryPoint;
+
+ notInEntryPoint = pcfBuiltIns;
+
+ // std::set_difference not usable on unordered containers
+ for (auto bi = epfBuiltIns.begin(); bi != epfBuiltIns.end(); ++bi)
+ notInEntryPoint.erase(*bi);
+
+ // Now we'll add those to the entry and to the linkage.
+ for (int p=0; p<pcfParamCount; ++p) {
+ TType* paramType = patchConstantFunction[p].type->clone();
+ const TBuiltInVariable biType = patchConstantFunction[p].declaredBuiltIn;
+ const TStorageQualifier storage = patchConstantFunction[p].type->getQualifier().storage;
+
+ // Use the original declaration type for the linkage
+ paramType->getQualifier().builtIn = biType;
+
+ if (notInEntryPoint.count(tInterstageIoData(biType, storage)) == 1)
+ addToLinkage(*paramType, patchConstantFunction[p].name, nullptr);
+ }
+
+ // If we didn't find it because the shader made one, add our own.
+ if (invocationIdSym == nullptr) {
+ TType invocationIdType(EbtUint, EvqIn, 1);
+ TString* invocationIdName = NewPoolTString("InvocationId");
+ invocationIdType.getQualifier().builtIn = EbvInvocationId;
+ addToLinkage(invocationIdType, invocationIdName, &invocationIdSym);
+ }
+
+ assert(invocationIdSym);
+ }
+
+ TIntermTyped* pcfArguments = nullptr;
+
+ // ================ Step 1B: Argument synthesis ================
+ // Create pcfArguments for synthesis of patchconstantfunction invocation
+ // TODO: handle struct or array inputs
+ {
+ for (int p=0; p<pcfParamCount; ++p) {
+ if (patchConstantFunction[p].type->isArray() ||
+ patchConstantFunction[p].type->isStruct()) {
+ error(loc, "unimplemented array or variable in patch constant function signature", "", "");
+ return;
+ }
+
+ // find which builtin it is
+ const TBuiltInVariable biType = patchConstantFunction[p].declaredBuiltIn;
+
+ TIntermSymbol* builtIn = findLinkageSymbol(biType);
+
+ if (builtIn == nullptr) {
+ error(loc, "unable to find patch constant function builtin variable", "", "");
+ return;
+ }
+
+ if (pcfParamCount == 1)
+ pcfArguments = builtIn;
+ else
+ pcfArguments = intermediate.growAggregate(pcfArguments, builtIn);
+ }
+ }
+
+ // ================ Step 2: Synthesize call to PCF ================
+ TIntermTyped* pcfCall = nullptr;
+
+ {
+ // Create a function call to the patchconstantfunction
+ if (pcfArguments)
+ addInputArgumentConversions(patchConstantFunction, pcfArguments);
+
+ // Synthetic call.
+ pcfCall = intermediate.setAggregateOperator(pcfArguments, EOpFunctionCall, patchConstantFunction.getType(), loc);
+ pcfCall->getAsAggregate()->setUserDefined();
+ pcfCall->getAsAggregate()->setName(patchConstantFunction.getMangledName());
+ intermediate.addToCallGraph(infoSink, entryPointFunction->getMangledName(), patchConstantFunction.getMangledName());
+
+ if (pcfCall->getAsAggregate()) {
+ TQualifierList& qualifierList = pcfCall->getAsAggregate()->getQualifierList();
+ for (int i = 0; i < patchConstantFunction.getParamCount(); ++i) {
+ TStorageQualifier qual = patchConstantFunction[i].type->getQualifier().storage;
+ qualifierList.push_back(qual);
+ }
+ pcfCall = addOutputArgumentConversions(patchConstantFunction, *pcfCall->getAsOperator());
+ }
+ }
+
+ // ================ Step 3: Create return Sequence ================
+ // Return sequence: copy PCF result to a temporary, then to shader output variable.
+ if (pcfCall->getBasicType() != EbtVoid) {
+ const TType* retType = &patchConstantFunction.getType(); // return type from the PCF
+ TType outType; // output type that goes with the return type.
+ outType.shallowCopy(*retType);
+
+ // substitute the output type
+ const auto newLists = ioTypeMap.find(retType->getStruct());
+ if (newLists != ioTypeMap.end())
+ outType.setStruct(newLists->second.output);
+
+ // Substitute the top level type's builtin type
+ if (patchConstantFunction.getDeclaredBuiltInType() != EbvNone)
+ outType.getQualifier().builtIn = patchConstantFunction.getDeclaredBuiltInType();
+
+ TVariable* pcfOutput = makeInternalVariable("@patchConstantOutput", outType);
+ pcfOutput->getWritableType().getQualifier().storage = EvqVaryingOut;
+
+ if (pcfOutput->getType().containsBuiltInInterstageIO(language))
+ split(*pcfOutput);
+
+ TIntermSymbol* pcfOutputSym = intermediate.addSymbol(*pcfOutput, loc);
+
+ // The call to the PCF is a complex R-value: we want to store it in a temp to avoid
+ // repeated calls to the PCF:
+ TVariable* pcfCallResult = makeInternalVariable("@patchConstantResult", *retType);
+ pcfCallResult->getWritableType().getQualifier().makeTemporary();
+ TIntermSymbol* pcfResultVar = intermediate.addSymbol(*pcfCallResult, loc);
+ // sanitizeType(&pcfCall->getWritableType());
+ TIntermNode* pcfResultAssign = intermediate.addAssign(EOpAssign, pcfResultVar, pcfCall, loc);
+
+ TIntermNode* pcfResultToOut = handleAssign(loc, EOpAssign, pcfOutputSym, intermediate.addSymbol(*pcfCallResult, loc));
+
+ TIntermTyped* pcfAggregate = nullptr;
+ pcfAggregate = intermediate.growAggregate(pcfAggregate, pcfResultAssign);
+ pcfAggregate = intermediate.growAggregate(pcfAggregate, pcfResultToOut);
+ pcfAggregate = intermediate.setAggregateOperator(pcfAggregate, EOpSequence, *retType, loc);
+
+ pcfCall = pcfAggregate;
+ }
+
+ // ================ Step 4: Barrier ================
+ TIntermTyped* barrier = new TIntermAggregate(EOpBarrier);
+ barrier->setLoc(loc);
+ barrier->setType(TType(EbtVoid));
+ epBodySeq.insert(epBodySeq.end(), barrier);
+
+ // ================ Step 5: Test on invocation ID ================
+ TIntermTyped* zero = intermediate.addConstantUnion(0, loc, true);
+ TIntermTyped* cmp = intermediate.addBinaryNode(EOpEqual, invocationIdSym, zero, loc, TType(EbtBool));
+
+ // Create if statement
+ TIntermTyped* invocationIdTest = new TIntermSelection(cmp, pcfCall, nullptr);
+ invocationIdTest->setLoc(loc);
+
+ // add our test sequence before the return.
+ epBodySeq.insert(epBodySeq.end(), invocationIdTest);
+}
+
// post-processing
void HlslParseContext::finish()
{
+ addPatchConstantInvocation();
addInterstageIoToLinkage();
TParseContextBase::finish();