Fix issues of explicit conversions.

For certain int8/int16/float16 conversions,they are conditionally allowed
when extensions defining explicit arithmetic types are enabled.
This commit is contained in:
Rex Xu 2019-04-25 10:34:28 +08:00
parent 86c72c9486
commit d932cc4295

View file

@ -498,6 +498,58 @@ TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped
TOperator newOp = EOpNull;
// Certain explicit conversions are allowed conditionally
bool arithemeticInt8Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
#ifdef AMD_EXTENSIONS
bool arithemeticInt16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
extensionRequested(E_GL_AMD_gpu_shader_int16);
bool arithemeticFloat16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
extensionRequested(E_GL_AMD_gpu_shader_half_float);
#else
bool arithemeticInt16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
bool arithemeticFloat16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
#endif
bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 ||
convertTo == EbtInt16 || convertTo == EbtUint16 ||
convertTo == EbtInt || convertTo == EbtUint ||
convertTo == EbtInt64 || convertTo == EbtUint64);
bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 ||
node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 ||
node->getBasicType() == EbtInt || node->getBasicType() == EbtUint ||
node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64);
bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble);
bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 ||
node->getBasicType() == EbtFloat ||
node->getBasicType() == EbtDouble);
if (! arithemeticInt8Enabled) {
if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) ||
((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes))
return nullptr;
}
if (! arithemeticInt16Enabled) {
if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) ||
((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes))
return nullptr;
}
if (! arithemeticFloat16Enabled) {
if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) ||
(node->getBasicType() == EbtFloat16 && ! convertToFloatTypes))
return nullptr;
}
switch (convertTo) {
case EbtDouble:
switch (node->getBasicType()) {