From 573ca4a4a79e19dc4e5160664843677010fe2c86 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 27 Apr 2015 21:13:44 -0700 Subject: [PATCH 01/44] nir: Import the revision 30 SPIR-V header from Khronos --- src/glsl/nir/spirv.h | 1304 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1304 insertions(+) create mode 100644 src/glsl/nir/spirv.h diff --git a/src/glsl/nir/spirv.h b/src/glsl/nir/spirv.h new file mode 100644 index 00000000000..93135c09596 --- /dev/null +++ b/src/glsl/nir/spirv.h @@ -0,0 +1,1304 @@ +/* +** Copyright (c) 2015 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +/* +** This header is automatically generated by the same tool that creates +** the Binary Section of the SPIR-V specification. +*/ + +/* +** Specification revision 30. +** Enumeration tokens for SPIR-V, in three styles: C, C++, generic. +** - C++ will have the tokens in the "spv" name space, with no prefix. +** - C will have tokens with as "Spv" prefix. +** +** Some tokens act like mask values, which can be OR'd together, +** while others are mutually exclusive. The mask-like ones have +** "Mask" in their name, and a parallel enum that has the shift +** amount (1 << x) for each corresponding enumerant. +*/ + +#ifndef spirv_H +#define spirv_H + +#ifdef __cplusplus + +namespace spv { + +const int MagicNumber = 0x07230203; +const int Version = 99; + +typedef unsigned int Id; + +const unsigned int OpCodeMask = 0xFFFF; +const unsigned int WordCountShift = 16; + +enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL = 3, +}; + +enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, +}; + +enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, +}; + +enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL12 = 2, + MemoryModelOpenCL20 = 3, + MemoryModelOpenCL21 = 4, +}; + +enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeEarlyFragmentTests = 8, + ExecutionModePointMode = 9, + ExecutionModeXfb = 10, + ExecutionModeDepthReplacing = 11, + ExecutionModeDepthAny = 12, + ExecutionModeDepthGreater = 13, + ExecutionModeDepthLess = 14, + ExecutionModeDepthUnchanged = 15, + ExecutionModeLocalSize = 16, + ExecutionModeLocalSizeHint = 17, + ExecutionModeInputPoints = 18, + ExecutionModeInputLines = 19, + ExecutionModeInputLinesAdjacency = 20, + ExecutionModeInputTriangles = 21, + ExecutionModeInputTrianglesAdjacency = 22, + ExecutionModeInputQuads = 23, + ExecutionModeInputIsolines = 24, + ExecutionModeOutputVertices = 25, + ExecutionModeOutputPoints = 26, + ExecutionModeOutputLineStrip = 27, + ExecutionModeOutputTriangleStrip = 28, + ExecutionModeVecTypeHint = 29, + ExecutionModeContractionOff = 30, +}; + +enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroupLocal = 4, + StorageClassWorkgroupGlobal = 5, + StorageClassPrivateGlobal = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPrivate = 9, + StorageClassAtomicCounter = 10, +}; + +enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, +}; + +enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, +}; + +enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, +}; + +enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, +}; + +enum FPFastMathModeMask { + FPFastMathModeMaskNone = 0, + FPFastMathModeNotNaNMask = 0x00000001, + FPFastMathModeNotInfMask = 0x00000002, + FPFastMathModeNSZMask = 0x00000004, + FPFastMathModeAllowRecipMask = 0x00000008, + FPFastMathModeFastMask = 0x00000010, +}; + +enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, +}; + +enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, +}; + +enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, +}; + +enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeSVM = 6, + FunctionParameterAttributeNoWrite = 7, + FunctionParameterAttributeNoReadWrite = 8, +}; + +enum Decoration { + DecorationPrecisionLow = 0, + DecorationPrecisionMedium = 1, + DecorationPrecisionHigh = 2, + DecorationBlock = 3, + DecorationBufferBlock = 4, + DecorationRowMajor = 5, + DecorationColMajor = 6, + DecorationGLSLShared = 7, + DecorationGLSLStd140 = 8, + DecorationGLSLStd430 = 9, + DecorationGLSLPacked = 10, + DecorationSmooth = 11, + DecorationNoperspective = 12, + DecorationFlat = 13, + DecorationPatch = 14, + DecorationCentroid = 15, + DecorationSample = 16, + DecorationInvariant = 17, + DecorationRestrict = 18, + DecorationAliased = 19, + DecorationVolatile = 20, + DecorationConstant = 21, + DecorationCoherent = 22, + DecorationNonwritable = 23, + DecorationNonreadable = 24, + DecorationUniform = 25, + DecorationNoStaticUse = 26, + DecorationCPacked = 27, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationAlignment = 36, + DecorationXfbBuffer = 37, + DecorationStride = 38, + DecorationBuiltIn = 39, + DecorationFuncParamAttr = 40, + DecorationFPRoundingMode = 41, + DecorationFPFastMathMode = 42, + DecorationLinkageAttributes = 43, + DecorationSpecId = 44, +}; + +enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipVertex = 2, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragColor = 21, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInWorkgroupLinearId = 35, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, +}; + +enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, +}; + +enum SelectionControlMask { + SelectionControlMaskNone = 0, + SelectionControlFlattenMask = 0x00000001, + SelectionControlDontFlattenMask = 0x00000002, +}; + +enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, +}; + +enum LoopControlMask { + LoopControlMaskNone = 0, + LoopControlUnrollMask = 0x00000001, + LoopControlDontUnrollMask = 0x00000002, +}; + +enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, +}; + +enum FunctionControlMask { + FunctionControlMaskNone = 0, + FunctionControlInlineMask = 0x00000001, + FunctionControlDontInlineMask = 0x00000002, + FunctionControlPureMask = 0x00000004, + FunctionControlConstMask = 0x00000008, +}; + +enum MemorySemanticsShift { + MemorySemanticsRelaxedShift = 0, + MemorySemanticsSequentiallyConsistentShift = 1, + MemorySemanticsAcquireShift = 2, + MemorySemanticsReleaseShift = 3, + MemorySemanticsUniformMemoryShift = 4, + MemorySemanticsSubgroupMemoryShift = 5, + MemorySemanticsWorkgroupLocalMemoryShift = 6, + MemorySemanticsWorkgroupGlobalMemoryShift = 7, + MemorySemanticsAtomicCounterMemoryShift = 8, + MemorySemanticsImageMemoryShift = 9, +}; + +enum MemorySemanticsMask { + MemorySemanticsMaskNone = 0, + MemorySemanticsRelaxedMask = 0x00000001, + MemorySemanticsSequentiallyConsistentMask = 0x00000002, + MemorySemanticsAcquireMask = 0x00000004, + MemorySemanticsReleaseMask = 0x00000008, + MemorySemanticsUniformMemoryMask = 0x00000010, + MemorySemanticsSubgroupMemoryMask = 0x00000020, + MemorySemanticsWorkgroupLocalMemoryMask = 0x00000040, + MemorySemanticsWorkgroupGlobalMemoryMask = 0x00000080, + MemorySemanticsAtomicCounterMemoryMask = 0x00000100, + MemorySemanticsImageMemoryMask = 0x00000200, +}; + +enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, +}; + +enum MemoryAccessMask { + MemoryAccessMaskNone = 0, + MemoryAccessVolatileMask = 0x00000001, + MemoryAccessAlignedMask = 0x00000002, +}; + +enum ExecutionScope { + ExecutionScopeCrossDevice = 0, + ExecutionScopeDevice = 1, + ExecutionScopeWorkgroup = 2, + ExecutionScopeSubgroup = 3, +}; + +enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, +}; + +enum KernelEnqueueFlags { + KernelEnqueueFlagsNoWait = 0, + KernelEnqueueFlagsWaitKernel = 1, + KernelEnqueueFlagsWaitWorkGroup = 2, +}; + +enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, +}; + +enum KernelProfilingInfoMask { + KernelProfilingInfoMaskNone = 0, + KernelProfilingInfoCmdExecTimeMask = 0x00000001, +}; + +enum Op { + OpNop = 0, + OpSource = 1, + OpSourceExtension = 2, + OpExtension = 3, + OpExtInstImport = 4, + OpMemoryModel = 5, + OpEntryPoint = 6, + OpExecutionMode = 7, + OpTypeVoid = 8, + OpTypeBool = 9, + OpTypeInt = 10, + OpTypeFloat = 11, + OpTypeVector = 12, + OpTypeMatrix = 13, + OpTypeSampler = 14, + OpTypeFilter = 15, + OpTypeArray = 16, + OpTypeRuntimeArray = 17, + OpTypeStruct = 18, + OpTypeOpaque = 19, + OpTypePointer = 20, + OpTypeFunction = 21, + OpTypeEvent = 22, + OpTypeDeviceEvent = 23, + OpTypeReserveId = 24, + OpTypeQueue = 25, + OpTypePipe = 26, + OpConstantTrue = 27, + OpConstantFalse = 28, + OpConstant = 29, + OpConstantComposite = 30, + OpConstantSampler = 31, + OpConstantNullPointer = 32, + OpConstantNullObject = 33, + OpSpecConstantTrue = 34, + OpSpecConstantFalse = 35, + OpSpecConstant = 36, + OpSpecConstantComposite = 37, + OpVariable = 38, + OpVariableArray = 39, + OpFunction = 40, + OpFunctionParameter = 41, + OpFunctionEnd = 42, + OpFunctionCall = 43, + OpExtInst = 44, + OpUndef = 45, + OpLoad = 46, + OpStore = 47, + OpPhi = 48, + OpDecorationGroup = 49, + OpDecorate = 50, + OpMemberDecorate = 51, + OpGroupDecorate = 52, + OpGroupMemberDecorate = 53, + OpName = 54, + OpMemberName = 55, + OpString = 56, + OpLine = 57, + OpVectorExtractDynamic = 58, + OpVectorInsertDynamic = 59, + OpVectorShuffle = 60, + OpCompositeConstruct = 61, + OpCompositeExtract = 62, + OpCompositeInsert = 63, + OpCopyObject = 64, + OpCopyMemory = 65, + OpCopyMemorySized = 66, + OpSampler = 67, + OpTextureSample = 68, + OpTextureSampleDref = 69, + OpTextureSampleLod = 70, + OpTextureSampleProj = 71, + OpTextureSampleGrad = 72, + OpTextureSampleOffset = 73, + OpTextureSampleProjLod = 74, + OpTextureSampleProjGrad = 75, + OpTextureSampleLodOffset = 76, + OpTextureSampleProjOffset = 77, + OpTextureSampleGradOffset = 78, + OpTextureSampleProjLodOffset = 79, + OpTextureSampleProjGradOffset = 80, + OpTextureFetchTexelLod = 81, + OpTextureFetchTexelOffset = 82, + OpTextureFetchSample = 83, + OpTextureFetchTexel = 84, + OpTextureGather = 85, + OpTextureGatherOffset = 86, + OpTextureGatherOffsets = 87, + OpTextureQuerySizeLod = 88, + OpTextureQuerySize = 89, + OpTextureQueryLod = 90, + OpTextureQueryLevels = 91, + OpTextureQuerySamples = 92, + OpAccessChain = 93, + OpInBoundsAccessChain = 94, + OpSNegate = 95, + OpFNegate = 96, + OpNot = 97, + OpAny = 98, + OpAll = 99, + OpConvertFToU = 100, + OpConvertFToS = 101, + OpConvertSToF = 102, + OpConvertUToF = 103, + OpUConvert = 104, + OpSConvert = 105, + OpFConvert = 106, + OpConvertPtrToU = 107, + OpConvertUToPtr = 108, + OpPtrCastToGeneric = 109, + OpGenericCastToPtr = 110, + OpBitcast = 111, + OpTranspose = 112, + OpIsNan = 113, + OpIsInf = 114, + OpIsFinite = 115, + OpIsNormal = 116, + OpSignBitSet = 117, + OpLessOrGreater = 118, + OpOrdered = 119, + OpUnordered = 120, + OpArrayLength = 121, + OpIAdd = 122, + OpFAdd = 123, + OpISub = 124, + OpFSub = 125, + OpIMul = 126, + OpFMul = 127, + OpUDiv = 128, + OpSDiv = 129, + OpFDiv = 130, + OpUMod = 131, + OpSRem = 132, + OpSMod = 133, + OpFRem = 134, + OpFMod = 135, + OpVectorTimesScalar = 136, + OpMatrixTimesScalar = 137, + OpVectorTimesMatrix = 138, + OpMatrixTimesVector = 139, + OpMatrixTimesMatrix = 140, + OpOuterProduct = 141, + OpDot = 142, + OpShiftRightLogical = 143, + OpShiftRightArithmetic = 144, + OpShiftLeftLogical = 145, + OpLogicalOr = 146, + OpLogicalXor = 147, + OpLogicalAnd = 148, + OpBitwiseOr = 149, + OpBitwiseXor = 150, + OpBitwiseAnd = 151, + OpSelect = 152, + OpIEqual = 153, + OpFOrdEqual = 154, + OpFUnordEqual = 155, + OpINotEqual = 156, + OpFOrdNotEqual = 157, + OpFUnordNotEqual = 158, + OpULessThan = 159, + OpSLessThan = 160, + OpFOrdLessThan = 161, + OpFUnordLessThan = 162, + OpUGreaterThan = 163, + OpSGreaterThan = 164, + OpFOrdGreaterThan = 165, + OpFUnordGreaterThan = 166, + OpULessThanEqual = 167, + OpSLessThanEqual = 168, + OpFOrdLessThanEqual = 169, + OpFUnordLessThanEqual = 170, + OpUGreaterThanEqual = 171, + OpSGreaterThanEqual = 172, + OpFOrdGreaterThanEqual = 173, + OpFUnordGreaterThanEqual = 174, + OpDPdx = 175, + OpDPdy = 176, + OpFwidth = 177, + OpDPdxFine = 178, + OpDPdyFine = 179, + OpFwidthFine = 180, + OpDPdxCoarse = 181, + OpDPdyCoarse = 182, + OpFwidthCoarse = 183, + OpEmitVertex = 184, + OpEndPrimitive = 185, + OpEmitStreamVertex = 186, + OpEndStreamPrimitive = 187, + OpControlBarrier = 188, + OpMemoryBarrier = 189, + OpImagePointer = 190, + OpAtomicInit = 191, + OpAtomicLoad = 192, + OpAtomicStore = 193, + OpAtomicExchange = 194, + OpAtomicCompareExchange = 195, + OpAtomicCompareExchangeWeak = 196, + OpAtomicIIncrement = 197, + OpAtomicIDecrement = 198, + OpAtomicIAdd = 199, + OpAtomicISub = 200, + OpAtomicUMin = 201, + OpAtomicUMax = 202, + OpAtomicAnd = 203, + OpAtomicOr = 204, + OpAtomicXor = 205, + OpLoopMerge = 206, + OpSelectionMerge = 207, + OpLabel = 208, + OpBranch = 209, + OpBranchConditional = 210, + OpSwitch = 211, + OpKill = 212, + OpReturn = 213, + OpReturnValue = 214, + OpUnreachable = 215, + OpLifetimeStart = 216, + OpLifetimeStop = 217, + OpCompileFlag = 218, + OpAsyncGroupCopy = 219, + OpWaitGroupEvents = 220, + OpGroupAll = 221, + OpGroupAny = 222, + OpGroupBroadcast = 223, + OpGroupIAdd = 224, + OpGroupFAdd = 225, + OpGroupFMin = 226, + OpGroupUMin = 227, + OpGroupSMin = 228, + OpGroupFMax = 229, + OpGroupUMax = 230, + OpGroupSMax = 231, + OpGenericCastToPtrExplicit = 232, + OpGenericPtrMemSemantics = 233, + OpReadPipe = 234, + OpWritePipe = 235, + OpReservedReadPipe = 236, + OpReservedWritePipe = 237, + OpReserveReadPipePackets = 238, + OpReserveWritePipePackets = 239, + OpCommitReadPipe = 240, + OpCommitWritePipe = 241, + OpIsValidReserveId = 242, + OpGetNumPipePackets = 243, + OpGetMaxPipePackets = 244, + OpGroupReserveReadPipePackets = 245, + OpGroupReserveWritePipePackets = 246, + OpGroupCommitReadPipe = 247, + OpGroupCommitWritePipe = 248, + OpEnqueueMarker = 249, + OpEnqueueKernel = 250, + OpGetKernelNDrangeSubGroupCount = 251, + OpGetKernelNDrangeMaxSubGroupSize = 252, + OpGetKernelWorkGroupSize = 253, + OpGetKernelPreferredWorkGroupSizeMultiple = 254, + OpRetainEvent = 255, + OpReleaseEvent = 256, + OpCreateUserEvent = 257, + OpIsValidEvent = 258, + OpSetUserEventStatus = 259, + OpCaptureEventProfilingInfo = 260, + OpGetDefaultQueue = 261, + OpBuildNDRange = 262, + OpSatConvertSToU = 263, + OpSatConvertUToS = 264, + OpAtomicIMin = 265, + OpAtomicIMax = 266, +}; + +}; // end namespace spv + +#endif // #ifdef __cplusplus + + +#ifndef __cplusplus + +const int SpvMagicNumber = 0x07230203; +const int SpvVersion = 99; + +typedef unsigned int SpvId; + +const unsigned int SpvOpCodeMask = 0xFFFF; +const unsigned int SpvWordCountShift = 16; + +typedef enum SpvSourceLanguage_ { + SpvSourceLanguageUnknown = 0, + SpvSourceLanguageESSL = 1, + SpvSourceLanguageGLSL = 2, + SpvSourceLanguageOpenCL = 3, +} SpvSourceLanguage; + +typedef enum SpvExecutionModel_ { + SpvExecutionModelVertex = 0, + SpvExecutionModelTessellationControl = 1, + SpvExecutionModelTessellationEvaluation = 2, + SpvExecutionModelGeometry = 3, + SpvExecutionModelFragment = 4, + SpvExecutionModelGLCompute = 5, + SpvExecutionModelKernel = 6, +} SpvExecutionModel; + +typedef enum SpvAddressingModel_ { + SpvAddressingModelLogical = 0, + SpvAddressingModelPhysical32 = 1, + SpvAddressingModelPhysical64 = 2, +} SpvAddressingModel; + +typedef enum SpvMemoryModel_ { + SpvMemoryModelSimple = 0, + SpvMemoryModelGLSL450 = 1, + SpvMemoryModelOpenCL12 = 2, + SpvMemoryModelOpenCL20 = 3, + SpvMemoryModelOpenCL21 = 4, +} SpvMemoryModel; + +typedef enum SpvExecutionMode_ { + SpvExecutionModeInvocations = 0, + SpvExecutionModeSpacingEqual = 1, + SpvExecutionModeSpacingFractionalEven = 2, + SpvExecutionModeSpacingFractionalOdd = 3, + SpvExecutionModeVertexOrderCw = 4, + SpvExecutionModeVertexOrderCcw = 5, + SpvExecutionModePixelCenterInteger = 6, + SpvExecutionModeOriginUpperLeft = 7, + SpvExecutionModeEarlyFragmentTests = 8, + SpvExecutionModePointMode = 9, + SpvExecutionModeXfb = 10, + SpvExecutionModeDepthReplacing = 11, + SpvExecutionModeDepthAny = 12, + SpvExecutionModeDepthGreater = 13, + SpvExecutionModeDepthLess = 14, + SpvExecutionModeDepthUnchanged = 15, + SpvExecutionModeLocalSize = 16, + SpvExecutionModeLocalSizeHint = 17, + SpvExecutionModeInputPoints = 18, + SpvExecutionModeInputLines = 19, + SpvExecutionModeInputLinesAdjacency = 20, + SpvExecutionModeInputTriangles = 21, + SpvExecutionModeInputTrianglesAdjacency = 22, + SpvExecutionModeInputQuads = 23, + SpvExecutionModeInputIsolines = 24, + SpvExecutionModeOutputVertices = 25, + SpvExecutionModeOutputPoints = 26, + SpvExecutionModeOutputLineStrip = 27, + SpvExecutionModeOutputTriangleStrip = 28, + SpvExecutionModeVecTypeHint = 29, + SpvExecutionModeContractionOff = 30, +} SpvExecutionMode; + +typedef enum SpvStorageClass_ { + SpvStorageClassUniformConstant = 0, + SpvStorageClassInput = 1, + SpvStorageClassUniform = 2, + SpvStorageClassOutput = 3, + SpvStorageClassWorkgroupLocal = 4, + SpvStorageClassWorkgroupGlobal = 5, + SpvStorageClassPrivateGlobal = 6, + SpvStorageClassFunction = 7, + SpvStorageClassGeneric = 8, + SpvStorageClassPrivate = 9, + SpvStorageClassAtomicCounter = 10, +} SpvStorageClass; + +typedef enum SpvDim_ { + SpvDim1D = 0, + SpvDim2D = 1, + SpvDim3D = 2, + SpvDimCube = 3, + SpvDimRect = 4, + SpvDimBuffer = 5, +} SpvDim; + +typedef enum SpvSamplerAddressingMode_ { + SpvSamplerAddressingModeNone = 0, + SpvSamplerAddressingModeClampToEdge = 1, + SpvSamplerAddressingModeClamp = 2, + SpvSamplerAddressingModeRepeat = 3, + SpvSamplerAddressingModeRepeatMirrored = 4, +} SpvSamplerAddressingMode; + +typedef enum SpvSamplerFilterMode_ { + SpvSamplerFilterModeNearest = 0, + SpvSamplerFilterModeLinear = 1, +} SpvSamplerFilterMode; + +typedef enum SpvFPFastMathModeShift_ { + SpvFPFastMathModeNotNaNShift = 0, + SpvFPFastMathModeNotInfShift = 1, + SpvFPFastMathModeNSZShift = 2, + SpvFPFastMathModeAllowRecipShift = 3, + SpvFPFastMathModeFastShift = 4, +} SpvFPFastMathModeShift; + +typedef enum SpvFPFastMathModeMask_ { + SpvFPFastMathModeMaskNone = 0, + SpvFPFastMathModeNotNaNMask = 0x00000001, + SpvFPFastMathModeNotInfMask = 0x00000002, + SpvFPFastMathModeNSZMask = 0x00000004, + SpvFPFastMathModeAllowRecipMask = 0x00000008, + SpvFPFastMathModeFastMask = 0x00000010, +} SpvFPFastMathModeMask; + +typedef enum SpvFPRoundingMode_ { + SpvFPRoundingModeRTE = 0, + SpvFPRoundingModeRTZ = 1, + SpvFPRoundingModeRTP = 2, + SpvFPRoundingModeRTN = 3, +} SpvFPRoundingMode; + +typedef enum SpvLinkageType_ { + SpvLinkageTypeExport = 0, + SpvLinkageTypeImport = 1, +} SpvLinkageType; + +typedef enum SpvAccessQualifier_ { + SpvAccessQualifierReadOnly = 0, + SpvAccessQualifierWriteOnly = 1, + SpvAccessQualifierReadWrite = 2, +} SpvAccessQualifier; + +typedef enum SpvFunctionParameterAttribute_ { + SpvFunctionParameterAttributeZext = 0, + SpvFunctionParameterAttributeSext = 1, + SpvFunctionParameterAttributeByVal = 2, + SpvFunctionParameterAttributeSret = 3, + SpvFunctionParameterAttributeNoAlias = 4, + SpvFunctionParameterAttributeNoCapture = 5, + SpvFunctionParameterAttributeSVM = 6, + SpvFunctionParameterAttributeNoWrite = 7, + SpvFunctionParameterAttributeNoReadWrite = 8, +} SpvFunctionParameterAttribute; + +typedef enum SpvDecoration_ { + SpvDecorationPrecisionLow = 0, + SpvDecorationPrecisionMedium = 1, + SpvDecorationPrecisionHigh = 2, + SpvDecorationBlock = 3, + SpvDecorationBufferBlock = 4, + SpvDecorationRowMajor = 5, + SpvDecorationColMajor = 6, + SpvDecorationGLSLShared = 7, + SpvDecorationGLSLStd140 = 8, + SpvDecorationGLSLStd430 = 9, + SpvDecorationGLSLPacked = 10, + SpvDecorationSmooth = 11, + SpvDecorationNoperspective = 12, + SpvDecorationFlat = 13, + SpvDecorationPatch = 14, + SpvDecorationCentroid = 15, + SpvDecorationSample = 16, + SpvDecorationInvariant = 17, + SpvDecorationRestrict = 18, + SpvDecorationAliased = 19, + SpvDecorationVolatile = 20, + SpvDecorationConstant = 21, + SpvDecorationCoherent = 22, + SpvDecorationNonwritable = 23, + SpvDecorationNonreadable = 24, + SpvDecorationUniform = 25, + SpvDecorationNoStaticUse = 26, + SpvDecorationCPacked = 27, + SpvDecorationSaturatedConversion = 28, + SpvDecorationStream = 29, + SpvDecorationLocation = 30, + SpvDecorationComponent = 31, + SpvDecorationIndex = 32, + SpvDecorationBinding = 33, + SpvDecorationDescriptorSet = 34, + SpvDecorationOffset = 35, + SpvDecorationAlignment = 36, + SpvDecorationXfbBuffer = 37, + SpvDecorationStride = 38, + SpvDecorationBuiltIn = 39, + SpvDecorationFuncParamAttr = 40, + SpvDecorationFPRoundingMode = 41, + SpvDecorationFPFastMathMode = 42, + SpvDecorationLinkageAttributes = 43, + SpvDecorationSpecId = 44, +} SpvDecoration; + +typedef enum SpvBuiltIn_ { + SpvBuiltInPosition = 0, + SpvBuiltInPointSize = 1, + SpvBuiltInClipVertex = 2, + SpvBuiltInClipDistance = 3, + SpvBuiltInCullDistance = 4, + SpvBuiltInVertexId = 5, + SpvBuiltInInstanceId = 6, + SpvBuiltInPrimitiveId = 7, + SpvBuiltInInvocationId = 8, + SpvBuiltInLayer = 9, + SpvBuiltInViewportIndex = 10, + SpvBuiltInTessLevelOuter = 11, + SpvBuiltInTessLevelInner = 12, + SpvBuiltInTessCoord = 13, + SpvBuiltInPatchVertices = 14, + SpvBuiltInFragCoord = 15, + SpvBuiltInPointCoord = 16, + SpvBuiltInFrontFacing = 17, + SpvBuiltInSampleId = 18, + SpvBuiltInSamplePosition = 19, + SpvBuiltInSampleMask = 20, + SpvBuiltInFragColor = 21, + SpvBuiltInFragDepth = 22, + SpvBuiltInHelperInvocation = 23, + SpvBuiltInNumWorkgroups = 24, + SpvBuiltInWorkgroupSize = 25, + SpvBuiltInWorkgroupId = 26, + SpvBuiltInLocalInvocationId = 27, + SpvBuiltInGlobalInvocationId = 28, + SpvBuiltInLocalInvocationIndex = 29, + SpvBuiltInWorkDim = 30, + SpvBuiltInGlobalSize = 31, + SpvBuiltInEnqueuedWorkgroupSize = 32, + SpvBuiltInGlobalOffset = 33, + SpvBuiltInGlobalLinearId = 34, + SpvBuiltInWorkgroupLinearId = 35, + SpvBuiltInSubgroupSize = 36, + SpvBuiltInSubgroupMaxSize = 37, + SpvBuiltInNumSubgroups = 38, + SpvBuiltInNumEnqueuedSubgroups = 39, + SpvBuiltInSubgroupId = 40, + SpvBuiltInSubgroupLocalInvocationId = 41, +} SpvBuiltIn; + +typedef enum SpvSelectionControlShift_ { + SpvSelectionControlFlattenShift = 0, + SpvSelectionControlDontFlattenShift = 1, +} SpvSelectionControlShift; + +typedef enum SpvSelectionControlMask_ { + SpvSelectionControlMaskNone = 0, + SpvSelectionControlFlattenMask = 0x00000001, + SpvSelectionControlDontFlattenMask = 0x00000002, +} SpvSelectionControlMask; + +typedef enum SpvLoopControlShift_ { + SpvLoopControlUnrollShift = 0, + SpvLoopControlDontUnrollShift = 1, +} SpvLoopControlShift; + +typedef enum SpvLoopControlMask_ { + SpvLoopControlMaskNone = 0, + SpvLoopControlUnrollMask = 0x00000001, + SpvLoopControlDontUnrollMask = 0x00000002, +} SpvLoopControlMask; + +typedef enum SpvFunctionControlShift_ { + SpvFunctionControlInlineShift = 0, + SpvFunctionControlDontInlineShift = 1, + SpvFunctionControlPureShift = 2, + SpvFunctionControlConstShift = 3, +} SpvFunctionControlShift; + +typedef enum SpvFunctionControlMask_ { + SpvFunctionControlMaskNone = 0, + SpvFunctionControlInlineMask = 0x00000001, + SpvFunctionControlDontInlineMask = 0x00000002, + SpvFunctionControlPureMask = 0x00000004, + SpvFunctionControlConstMask = 0x00000008, +} SpvFunctionControlMask; + +typedef enum SpvMemorySemanticsShift_ { + SpvMemorySemanticsRelaxedShift = 0, + SpvMemorySemanticsSequentiallyConsistentShift = 1, + SpvMemorySemanticsAcquireShift = 2, + SpvMemorySemanticsReleaseShift = 3, + SpvMemorySemanticsUniformMemoryShift = 4, + SpvMemorySemanticsSubgroupMemoryShift = 5, + SpvMemorySemanticsWorkgroupLocalMemoryShift = 6, + SpvMemorySemanticsWorkgroupGlobalMemoryShift = 7, + SpvMemorySemanticsAtomicCounterMemoryShift = 8, + SpvMemorySemanticsImageMemoryShift = 9, +} SpvMemorySemanticsShift; + +typedef enum SpvMemorySemanticsMask_ { + SpvMemorySemanticsMaskNone = 0, + SpvMemorySemanticsRelaxedMask = 0x00000001, + SpvMemorySemanticsSequentiallyConsistentMask = 0x00000002, + SpvMemorySemanticsAcquireMask = 0x00000004, + SpvMemorySemanticsReleaseMask = 0x00000008, + SpvMemorySemanticsUniformMemoryMask = 0x00000010, + SpvMemorySemanticsSubgroupMemoryMask = 0x00000020, + SpvMemorySemanticsWorkgroupLocalMemoryMask = 0x00000040, + SpvMemorySemanticsWorkgroupGlobalMemoryMask = 0x00000080, + SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000100, + SpvMemorySemanticsImageMemoryMask = 0x00000200, +} SpvMemorySemanticsMask; + +typedef enum SpvMemoryAccessShift_ { + SpvMemoryAccessVolatileShift = 0, + SpvMemoryAccessAlignedShift = 1, +} SpvMemoryAccessShift; + +typedef enum SpvMemoryAccessMask_ { + SpvMemoryAccessMaskNone = 0, + SpvMemoryAccessVolatileMask = 0x00000001, + SpvMemoryAccessAlignedMask = 0x00000002, +} SpvMemoryAccessMask; + +typedef enum SpvExecutionScope_ { + SpvExecutionScopeCrossDevice = 0, + SpvExecutionScopeDevice = 1, + SpvExecutionScopeWorkgroup = 2, + SpvExecutionScopeSubgroup = 3, +} SpvExecutionScope; + +typedef enum SpvGroupOperation_ { + SpvGroupOperationReduce = 0, + SpvGroupOperationInclusiveScan = 1, + SpvGroupOperationExclusiveScan = 2, +} SpvGroupOperation; + +typedef enum SpvKernelEnqueueFlags_ { + SpvKernelEnqueueFlagsNoWait = 0, + SpvKernelEnqueueFlagsWaitKernel = 1, + SpvKernelEnqueueFlagsWaitWorkGroup = 2, +} SpvKernelEnqueueFlags; + +typedef enum SpvKernelProfilingInfoShift_ { + SpvKernelProfilingInfoCmdExecTimeShift = 0, +} SpvKernelProfilingInfoShift; + +typedef enum SpvKernelProfilingInfoMask_ { + SpvKernelProfilingInfoMaskNone = 0, + SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001, +} SpvKernelProfilingInfoMask; + +typedef enum SpvOp_ { + SpvOpNop = 0, + SpvOpSource = 1, + SpvOpSourceExtension = 2, + SpvOpExtension = 3, + SpvOpExtInstImport = 4, + SpvOpMemoryModel = 5, + SpvOpEntryPoint = 6, + SpvOpExecutionMode = 7, + SpvOpTypeVoid = 8, + SpvOpTypeBool = 9, + SpvOpTypeInt = 10, + SpvOpTypeFloat = 11, + SpvOpTypeVector = 12, + SpvOpTypeMatrix = 13, + SpvOpTypeSampler = 14, + SpvOpTypeFilter = 15, + SpvOpTypeArray = 16, + SpvOpTypeRuntimeArray = 17, + SpvOpTypeStruct = 18, + SpvOpTypeOpaque = 19, + SpvOpTypePointer = 20, + SpvOpTypeFunction = 21, + SpvOpTypeEvent = 22, + SpvOpTypeDeviceEvent = 23, + SpvOpTypeReserveId = 24, + SpvOpTypeQueue = 25, + SpvOpTypePipe = 26, + SpvOpConstantTrue = 27, + SpvOpConstantFalse = 28, + SpvOpConstant = 29, + SpvOpConstantComposite = 30, + SpvOpConstantSampler = 31, + SpvOpConstantNullPointer = 32, + SpvOpConstantNullObject = 33, + SpvOpSpecConstantTrue = 34, + SpvOpSpecConstantFalse = 35, + SpvOpSpecConstant = 36, + SpvOpSpecConstantComposite = 37, + SpvOpVariable = 38, + SpvOpVariableArray = 39, + SpvOpFunction = 40, + SpvOpFunctionParameter = 41, + SpvOpFunctionEnd = 42, + SpvOpFunctionCall = 43, + SpvOpExtInst = 44, + SpvOpUndef = 45, + SpvOpLoad = 46, + SpvOpStore = 47, + SpvOpPhi = 48, + SpvOpDecorationGroup = 49, + SpvOpDecorate = 50, + SpvOpMemberDecorate = 51, + SpvOpGroupDecorate = 52, + SpvOpGroupMemberDecorate = 53, + SpvOpName = 54, + SpvOpMemberName = 55, + SpvOpString = 56, + SpvOpLine = 57, + SpvOpVectorExtractDynamic = 58, + SpvOpVectorInsertDynamic = 59, + SpvOpVectorShuffle = 60, + SpvOpCompositeConstruct = 61, + SpvOpCompositeExtract = 62, + SpvOpCompositeInsert = 63, + SpvOpCopyObject = 64, + SpvOpCopyMemory = 65, + SpvOpCopyMemorySized = 66, + SpvOpSampler = 67, + SpvOpTextureSample = 68, + SpvOpTextureSampleDref = 69, + SpvOpTextureSampleLod = 70, + SpvOpTextureSampleProj = 71, + SpvOpTextureSampleGrad = 72, + SpvOpTextureSampleOffset = 73, + SpvOpTextureSampleProjLod = 74, + SpvOpTextureSampleProjGrad = 75, + SpvOpTextureSampleLodOffset = 76, + SpvOpTextureSampleProjOffset = 77, + SpvOpTextureSampleGradOffset = 78, + SpvOpTextureSampleProjLodOffset = 79, + SpvOpTextureSampleProjGradOffset = 80, + SpvOpTextureFetchTexelLod = 81, + SpvOpTextureFetchTexelOffset = 82, + SpvOpTextureFetchSample = 83, + SpvOpTextureFetchTexel = 84, + SpvOpTextureGather = 85, + SpvOpTextureGatherOffset = 86, + SpvOpTextureGatherOffsets = 87, + SpvOpTextureQuerySizeLod = 88, + SpvOpTextureQuerySize = 89, + SpvOpTextureQueryLod = 90, + SpvOpTextureQueryLevels = 91, + SpvOpTextureQuerySamples = 92, + SpvOpAccessChain = 93, + SpvOpInBoundsAccessChain = 94, + SpvOpSNegate = 95, + SpvOpFNegate = 96, + SpvOpNot = 97, + SpvOpAny = 98, + SpvOpAll = 99, + SpvOpConvertFToU = 100, + SpvOpConvertFToS = 101, + SpvOpConvertSToF = 102, + SpvOpConvertUToF = 103, + SpvOpUConvert = 104, + SpvOpSConvert = 105, + SpvOpFConvert = 106, + SpvOpConvertPtrToU = 107, + SpvOpConvertUToPtr = 108, + SpvOpPtrCastToGeneric = 109, + SpvOpGenericCastToPtr = 110, + SpvOpBitcast = 111, + SpvOpTranspose = 112, + SpvOpIsNan = 113, + SpvOpIsInf = 114, + SpvOpIsFinite = 115, + SpvOpIsNormal = 116, + SpvOpSignBitSet = 117, + SpvOpLessOrGreater = 118, + SpvOpOrdered = 119, + SpvOpUnordered = 120, + SpvOpArrayLength = 121, + SpvOpIAdd = 122, + SpvOpFAdd = 123, + SpvOpISub = 124, + SpvOpFSub = 125, + SpvOpIMul = 126, + SpvOpFMul = 127, + SpvOpUDiv = 128, + SpvOpSDiv = 129, + SpvOpFDiv = 130, + SpvOpUMod = 131, + SpvOpSRem = 132, + SpvOpSMod = 133, + SpvOpFRem = 134, + SpvOpFMod = 135, + SpvOpVectorTimesScalar = 136, + SpvOpMatrixTimesScalar = 137, + SpvOpVectorTimesMatrix = 138, + SpvOpMatrixTimesVector = 139, + SpvOpMatrixTimesMatrix = 140, + SpvOpOuterProduct = 141, + SpvOpDot = 142, + SpvOpShiftRightLogical = 143, + SpvOpShiftRightArithmetic = 144, + SpvOpShiftLeftLogical = 145, + SpvOpLogicalOr = 146, + SpvOpLogicalXor = 147, + SpvOpLogicalAnd = 148, + SpvOpBitwiseOr = 149, + SpvOpBitwiseXor = 150, + SpvOpBitwiseAnd = 151, + SpvOpSelect = 152, + SpvOpIEqual = 153, + SpvOpFOrdEqual = 154, + SpvOpFUnordEqual = 155, + SpvOpINotEqual = 156, + SpvOpFOrdNotEqual = 157, + SpvOpFUnordNotEqual = 158, + SpvOpULessThan = 159, + SpvOpSLessThan = 160, + SpvOpFOrdLessThan = 161, + SpvOpFUnordLessThan = 162, + SpvOpUGreaterThan = 163, + SpvOpSGreaterThan = 164, + SpvOpFOrdGreaterThan = 165, + SpvOpFUnordGreaterThan = 166, + SpvOpULessThanEqual = 167, + SpvOpSLessThanEqual = 168, + SpvOpFOrdLessThanEqual = 169, + SpvOpFUnordLessThanEqual = 170, + SpvOpUGreaterThanEqual = 171, + SpvOpSGreaterThanEqual = 172, + SpvOpFOrdGreaterThanEqual = 173, + SpvOpFUnordGreaterThanEqual = 174, + SpvOpDPdx = 175, + SpvOpDPdy = 176, + SpvOpFwidth = 177, + SpvOpDPdxFine = 178, + SpvOpDPdyFine = 179, + SpvOpFwidthFine = 180, + SpvOpDPdxCoarse = 181, + SpvOpDPdyCoarse = 182, + SpvOpFwidthCoarse = 183, + SpvOpEmitVertex = 184, + SpvOpEndPrimitive = 185, + SpvOpEmitStreamVertex = 186, + SpvOpEndStreamPrimitive = 187, + SpvOpControlBarrier = 188, + SpvOpMemoryBarrier = 189, + SpvOpImagePointer = 190, + SpvOpAtomicInit = 191, + SpvOpAtomicLoad = 192, + SpvOpAtomicStore = 193, + SpvOpAtomicExchange = 194, + SpvOpAtomicCompareExchange = 195, + SpvOpAtomicCompareExchangeWeak = 196, + SpvOpAtomicIIncrement = 197, + SpvOpAtomicIDecrement = 198, + SpvOpAtomicIAdd = 199, + SpvOpAtomicISub = 200, + SpvOpAtomicUMin = 201, + SpvOpAtomicUMax = 202, + SpvOpAtomicAnd = 203, + SpvOpAtomicOr = 204, + SpvOpAtomicXor = 205, + SpvOpLoopMerge = 206, + SpvOpSelectionMerge = 207, + SpvOpLabel = 208, + SpvOpBranch = 209, + SpvOpBranchConditional = 210, + SpvOpSwitch = 211, + SpvOpKill = 212, + SpvOpReturn = 213, + SpvOpReturnValue = 214, + SpvOpUnreachable = 215, + SpvOpLifetimeStart = 216, + SpvOpLifetimeStop = 217, + SpvOpCompileFlag = 218, + SpvOpAsyncGroupCopy = 219, + SpvOpWaitGroupEvents = 220, + SpvOpGroupAll = 221, + SpvOpGroupAny = 222, + SpvOpGroupBroadcast = 223, + SpvOpGroupIAdd = 224, + SpvOpGroupFAdd = 225, + SpvOpGroupFMin = 226, + SpvOpGroupUMin = 227, + SpvOpGroupSMin = 228, + SpvOpGroupFMax = 229, + SpvOpGroupUMax = 230, + SpvOpGroupSMax = 231, + SpvOpGenericCastToPtrExplicit = 232, + SpvOpGenericPtrMemSemantics = 233, + SpvOpReadPipe = 234, + SpvOpWritePipe = 235, + SpvOpReservedReadPipe = 236, + SpvOpReservedWritePipe = 237, + SpvOpReserveReadPipePackets = 238, + SpvOpReserveWritePipePackets = 239, + SpvOpCommitReadPipe = 240, + SpvOpCommitWritePipe = 241, + SpvOpIsValidReserveId = 242, + SpvOpGetNumPipePackets = 243, + SpvOpGetMaxPipePackets = 244, + SpvOpGroupReserveReadPipePackets = 245, + SpvOpGroupReserveWritePipePackets = 246, + SpvOpGroupCommitReadPipe = 247, + SpvOpGroupCommitWritePipe = 248, + SpvOpEnqueueMarker = 249, + SpvOpEnqueueKernel = 250, + SpvOpGetKernelNDrangeSubGroupCount = 251, + SpvOpGetKernelNDrangeMaxSubGroupSize = 252, + SpvOpGetKernelWorkGroupSize = 253, + SpvOpGetKernelPreferredWorkGroupSizeMultiple = 254, + SpvOpRetainEvent = 255, + SpvOpReleaseEvent = 256, + SpvOpCreateUserEvent = 257, + SpvOpIsValidEvent = 258, + SpvOpSetUserEventStatus = 259, + SpvOpCaptureEventProfilingInfo = 260, + SpvOpGetDefaultQueue = 261, + SpvOpBuildNDRange = 262, + SpvOpSatConvertSToU = 263, + SpvOpSatConvertUToS = 264, + SpvOpAtomicIMin = 265, + SpvOpAtomicIMax = 266, +} SpvOp; + +#endif // #ifndef __cplusplus + +#endif // #ifndef spirv_H From 98452cd8ae22d2c7448b87c9090b3f1be09d9bc5 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Tue, 28 Apr 2015 17:43:16 -0700 Subject: [PATCH 02/44] nir: Add the start of a SPIR-V to NIR translator At the moment, it can handle the very basics of strings and can ignore debug instructions. It also has basic support for decorations. --- src/glsl/Makefile.sources | 2 + src/glsl/nir/nir_spirv.h | 38 +++ src/glsl/nir/spirv_to_nir.c | 453 ++++++++++++++++++++++++++++++++++++ 3 files changed, 493 insertions(+) create mode 100644 src/glsl/nir/nir_spirv.h create mode 100644 src/glsl/nir/spirv_to_nir.c diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources index d784a810723..be6e4ecf839 100644 --- a/src/glsl/Makefile.sources +++ b/src/glsl/Makefile.sources @@ -59,6 +59,7 @@ NIR_FILES = \ nir/nir_remove_dead_variables.c \ nir/nir_search.c \ nir/nir_search.h \ + nir/nir_spirv.h \ nir/nir_split_var_copies.c \ nir/nir_sweep.c \ nir/nir_to_ssa.c \ @@ -68,6 +69,7 @@ NIR_FILES = \ nir/nir_worklist.c \ nir/nir_worklist.h \ nir/nir_types.cpp \ + nir/spirv_to_nir.c \ $(NIR_GENERATED_FILES) # libglsl diff --git a/src/glsl/nir/nir_spirv.h b/src/glsl/nir/nir_spirv.h new file mode 100644 index 00000000000..789d30cd672 --- /dev/null +++ b/src/glsl/nir/nir_spirv.h @@ -0,0 +1,38 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Jason Ekstrand (jason@jlekstrand.net) + * + */ + +#pragma once + +#ifndef _NIR_SPIRV_H_ +#define _NIR_SPIRV_H_ + +#include "nir.h" + +nir_shader *spirv_to_nir(const uint32_t *words, size_t word_count, + const nir_shader_compiler_options *options); + +#endif /* _NIR_SPIRV_H_ */ diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c new file mode 100644 index 00000000000..02b99db17ae --- /dev/null +++ b/src/glsl/nir/spirv_to_nir.c @@ -0,0 +1,453 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Jason Ekstrand (jason@jlekstrand.net) + * + */ + +#include "nir_spirv.h" +#include "spirv.h" + +struct vtn_decoration; + +enum vtn_value_type { + vtn_value_type_invalid = 0, + vtn_value_type_undef, + vtn_value_type_string, + vtn_value_type_decoration_group, + vtn_value_type_ssa, + vtn_value_type_deref, +}; + +struct vtn_value { + enum vtn_value_type value_type; + const char *name; + struct vtn_decoration *decoration; + union { + void *ptr; + char *str; + nir_ssa_def *ssa; + nir_deref_var *deref; + }; +}; + +struct vtn_decoration { + struct vtn_decoration *next; + const uint32_t *literals; + struct vtn_value *group; + SpvDecoration decoration; +}; + +struct vtn_builder { + nir_shader *shader; + nir_function_impl *impl; + + unsigned value_id_bound; + struct vtn_value *values; +}; + +static void +vtn_push_value(struct vtn_builder *b, uint32_t value_id, + enum vtn_value_type value_type, void *ptr) +{ + assert(value_id < b->value_id_bound); + assert(b->values[value_id].value_type == vtn_value_type_invalid); + + b->values[value_id].value_type = value_type; + b->values[value_id].ptr = ptr; +} + +static void +vtn_push_token(struct vtn_builder *b, uint32_t value_id, + enum vtn_value_type value_type) +{ + vtn_push_value(b, value_id, value_type, NULL); +} + +static char * +vtn_string_literal(struct vtn_builder *b, const uint32_t *words, + unsigned word_count) +{ + return ralloc_strndup(b, (char *)words, (word_count - 2) * sizeof(*words)); +} + +typedef void (*decoration_foreach_cb)(struct vtn_builder *, + struct vtn_value *, + const struct vtn_decoration *, + void *); + +static void +_foreach_decoration_helper(struct vtn_builder *b, + struct vtn_value *base_value, + struct vtn_value *value, + decoration_foreach_cb cb, void *data) +{ + for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) { + if (dec->group) { + assert(dec->group->value_type == vtn_value_type_decoration_group); + _foreach_decoration_helper(b, base_value, dec->group, cb, data); + } else { + cb(b, base_value, dec, data); + } + } +} + +/** Iterates (recursively if needed) over all of the decorations on a value + * + * This function iterates over all of the decorations applied to a given + * value. If it encounters a decoration group, it recurses into the group + * and iterates over all of those decorations as well. + */ +static void +vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value, + decoration_foreach_cb cb, void *data) +{ + _foreach_decoration_helper(b, value, value, cb, data); +} + +static void +vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + switch (opcode) { + case SpvOpDecorationGroup: + vtn_push_token(b, w[1], vtn_value_type_undef); + break; + + case SpvOpDecorate: { + struct vtn_value *val = &b->values[w[1]]; + + struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration); + dec->decoration = w[2]; + dec->literals = &w[3]; + + /* Link into the list */ + dec->next = val->decoration; + val->decoration = dec; + break; + } + + case SpvOpGroupDecorate: { + struct vtn_value *group = &b->values[w[1]]; + assert(group->value_type == vtn_value_type_decoration_group); + + for (unsigned i = 2; i < count; i++) { + struct vtn_value *val = &b->values[w[i]]; + struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration); + dec->group = group; + + /* Link into the list */ + dec->next = val->decoration; + val->decoration = dec; + } + break; + } + + case SpvOpGroupMemberDecorate: + assert(!"Bad instruction. Khronos Bug #13513"); + break; + + default: + unreachable("Unhandled opcode"); + } +} + +static void +vtn_handle_type(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + unreachable("Unhandled opcode"); +} + +static void +vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + unreachable("Unhandled opcode"); +} + +static void +vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + unreachable("Unhandled opcode"); +} + +static void +vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + unreachable("Unhandled opcode"); +} + +static void +vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + unreachable("Unhandled opcode"); +} + +static void +vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + switch (opcode) { + case SpvOpSource: + case SpvOpSourceExtension: + case SpvOpMemberName: + case SpvOpLine: + /* Unhandled, but these are for debug so that's ok. */ + break; + + case SpvOpName: + b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2); + break; + + case SpvOpString: + vtn_push_value(b, w[1], vtn_value_type_string, + vtn_string_literal(b, &w[2], count - 2)); + break; + + case SpvOpUndef: + vtn_push_token(b, w[2], vtn_value_type_undef); + break; + + case SpvOpTypeVoid: + case SpvOpTypeBool: + case SpvOpTypeInt: + case SpvOpTypeFloat: + case SpvOpTypeVector: + case SpvOpTypeMatrix: + case SpvOpTypeSampler: + case SpvOpTypeArray: + case SpvOpTypeRuntimeArray: + case SpvOpTypeStruct: + case SpvOpTypeOpaque: + case SpvOpTypePointer: + case SpvOpTypeFunction: + case SpvOpTypeEvent: + case SpvOpTypeDeviceEvent: + case SpvOpTypeReserveId: + case SpvOpTypeQueue: + case SpvOpTypePipe: + vtn_handle_type(b, opcode, w, count); + break; + + case SpvOpConstantTrue: + case SpvOpConstantFalse: + case SpvOpConstant: + case SpvOpConstantComposite: + case SpvOpConstantSampler: + case SpvOpConstantNullPointer: + case SpvOpConstantNullObject: + case SpvOpSpecConstantTrue: + case SpvOpSpecConstantFalse: + case SpvOpSpecConstant: + case SpvOpSpecConstantComposite: + vtn_handle_constant(b, opcode, w, count); + break; + + case SpvOpVariable: + case SpvOpVariableArray: + case SpvOpLoad: + case SpvOpStore: + case SpvOpCopyMemory: + case SpvOpCopyMemorySized: + case SpvOpAccessChain: + case SpvOpInBoundsAccessChain: + case SpvOpArrayLength: + case SpvOpImagePointer: + vtn_handle_variables(b, opcode, w, count); + break; + + case SpvOpDecorationGroup: + case SpvOpDecorate: + case SpvOpMemberDecorate: + case SpvOpGroupDecorate: + case SpvOpGroupMemberDecorate: + vtn_handle_decoration(b, opcode, w, count); + break; + + case SpvOpTextureSample: + case SpvOpTextureSampleDref: + case SpvOpTextureSampleLod: + case SpvOpTextureSampleProj: + case SpvOpTextureSampleGrad: + case SpvOpTextureSampleOffset: + case SpvOpTextureSampleProjLod: + case SpvOpTextureSampleProjGrad: + case SpvOpTextureSampleLodOffset: + case SpvOpTextureSampleProjOffset: + case SpvOpTextureSampleGradOffset: + case SpvOpTextureSampleProjLodOffset: + case SpvOpTextureSampleProjGradOffset: + case SpvOpTextureFetchTexelLod: + case SpvOpTextureFetchTexelOffset: + case SpvOpTextureFetchSample: + case SpvOpTextureFetchTexel: + case SpvOpTextureGather: + case SpvOpTextureGatherOffset: + case SpvOpTextureGatherOffsets: + case SpvOpTextureQuerySizeLod: + case SpvOpTextureQuerySize: + case SpvOpTextureQueryLod: + case SpvOpTextureQueryLevels: + case SpvOpTextureQuerySamples: + vtn_handle_texture(b, opcode, w, count); + break; + + case SpvOpSNegate: + case SpvOpFNegate: + case SpvOpNot: + case SpvOpAny: + case SpvOpAll: + case SpvOpConvertFToU: + case SpvOpConvertFToS: + case SpvOpConvertSToF: + case SpvOpConvertUToF: + case SpvOpUConvert: + case SpvOpSConvert: + case SpvOpFConvert: + case SpvOpConvertPtrToU: + case SpvOpConvertUToPtr: + case SpvOpPtrCastToGeneric: + case SpvOpGenericCastToPtr: + case SpvOpBitcast: + case SpvOpTranspose: + case SpvOpIsNan: + case SpvOpIsInf: + case SpvOpIsFinite: + case SpvOpIsNormal: + case SpvOpSignBitSet: + case SpvOpLessOrGreater: + case SpvOpOrdered: + case SpvOpUnordered: + case SpvOpIAdd: + case SpvOpFAdd: + case SpvOpISub: + case SpvOpFSub: + case SpvOpIMul: + case SpvOpFMul: + case SpvOpUDiv: + case SpvOpSDiv: + case SpvOpFDiv: + case SpvOpUMod: + case SpvOpSRem: + case SpvOpSMod: + case SpvOpFRem: + case SpvOpFMod: + case SpvOpVectorTimesScalar: + case SpvOpMatrixTimesScalar: + case SpvOpVectorTimesMatrix: + case SpvOpMatrixTimesVector: + case SpvOpMatrixTimesMatrix: + case SpvOpOuterProduct: + case SpvOpDot: + case SpvOpShiftRightLogical: + case SpvOpShiftRightArithmetic: + case SpvOpShiftLeftLogical: + case SpvOpLogicalOr: + case SpvOpLogicalXor: + case SpvOpLogicalAnd: + case SpvOpBitwiseOr: + case SpvOpBitwiseXor: + case SpvOpBitwiseAnd: + case SpvOpSelect: + case SpvOpIEqual: + case SpvOpFOrdEqual: + case SpvOpFUnordEqual: + case SpvOpINotEqual: + case SpvOpFOrdNotEqual: + case SpvOpFUnordNotEqual: + case SpvOpULessThan: + case SpvOpSLessThan: + case SpvOpFOrdLessThan: + case SpvOpFUnordLessThan: + case SpvOpUGreaterThan: + case SpvOpSGreaterThan: + case SpvOpFOrdGreaterThan: + case SpvOpFUnordGreaterThan: + case SpvOpULessThanEqual: + case SpvOpSLessThanEqual: + case SpvOpFOrdLessThanEqual: + case SpvOpFUnordLessThanEqual: + case SpvOpUGreaterThanEqual: + case SpvOpSGreaterThanEqual: + case SpvOpFOrdGreaterThanEqual: + case SpvOpFUnordGreaterThanEqual: + case SpvOpDPdx: + case SpvOpDPdy: + case SpvOpFwidth: + case SpvOpDPdxFine: + case SpvOpDPdyFine: + case SpvOpFwidthFine: + case SpvOpDPdxCoarse: + case SpvOpDPdyCoarse: + case SpvOpFwidthCoarse: + vtn_handle_alu(b, opcode, w, count); + break; + + default: + unreachable("Unhandled opcode"); + } +} + +nir_shader * +spirv_to_nir(const uint32_t *words, size_t word_count, + const nir_shader_compiler_options *options) +{ + /* Handle the SPIR-V header (first 4 dwords) */ + assert(word_count > 5); + + assert(words[0] == SpvMagicNumber); + assert(words[1] == 99); + /* words[2] == generator magic */ + unsigned value_id_bound = words[3]; + assert(words[4] == 0); + + words+= 5; + + nir_shader *shader = nir_shader_create(NULL, options); + + /* Initialize the stn_builder object */ + struct vtn_builder *b = rzalloc(NULL, struct vtn_builder); + b->shader = shader; + b->value_id_bound = value_id_bound; + b->values = ralloc_array(b, struct vtn_value, value_id_bound); + + /* Start handling instructions */ + const uint32_t *word_end = words + word_count; + while (words < word_end) { + SpvOp opcode = words[0] & SpvOpCodeMask; + unsigned count = words[0] >> SpvWordCountShift; + assert(words + count <= word_end); + + vtn_handle_instruction(b, opcode, words, count); + + words += count; + } + + ralloc_free(b); + + return shader; +} From cae8db6b7e149e111fb9d3de69a45c0b3e036b76 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 14:30:22 -0700 Subject: [PATCH 03/44] glsl/compiler: Move the error_no_memory stub to standalone_scaffolding.cpp --- src/glsl/main.cpp | 6 ------ src/glsl/standalone_scaffolding.cpp | 6 ++++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/glsl/main.cpp b/src/glsl/main.cpp index ccac8399646..fc54ddd7eb1 100644 --- a/src/glsl/main.cpp +++ b/src/glsl/main.cpp @@ -41,12 +41,6 @@ static int glsl_version = 330; -extern "C" void -_mesa_error_no_memory(const char *caller) -{ - fprintf(stderr, "Mesa error: out of memory in %s", caller); -} - static void initialize_context(struct gl_context *ctx, gl_api api) { diff --git a/src/glsl/standalone_scaffolding.cpp b/src/glsl/standalone_scaffolding.cpp index a109c4e92d2..6e1ecec3235 100644 --- a/src/glsl/standalone_scaffolding.cpp +++ b/src/glsl/standalone_scaffolding.cpp @@ -34,6 +34,12 @@ #include #include "util/ralloc.h" +extern "C" void +_mesa_error_no_memory(const char *caller) +{ + fprintf(stderr, "Mesa error: out of memory in %s", caller); +} + void _mesa_warning(struct gl_context *ctx, const char *fmt, ...) { From 4763a13b075105a6ba33bbbb6ae1fbb1c3956cb1 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 14:29:38 -0700 Subject: [PATCH 04/44] REVERT: Add a simple helper program for testing SPIR-V -> NIR translation --- src/glsl/Makefile.am | 12 ++++++++- src/glsl/nir/spirv2nir.c | 54 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 src/glsl/nir/spirv2nir.c diff --git a/src/glsl/Makefile.am b/src/glsl/Makefile.am index 23c6fe8bb6c..7af9a709d5a 100644 --- a/src/glsl/Makefile.am +++ b/src/glsl/Makefile.am @@ -77,7 +77,7 @@ check_PROGRAMS = \ tests/sampler-types-test \ tests/uniform-initializer-test -noinst_PROGRAMS = glsl_compiler +noinst_PROGRAMS = glsl_compiler spirv2nir tests_blob_test_SOURCES = \ tests/blob_test.c @@ -162,6 +162,16 @@ glsl_compiler_LDADD = \ $(top_builddir)/src/libglsl_util.la \ $(PTHREAD_LIBS) +spirv2nir_SOURCES = \ + standalone_scaffolding.cpp \ + standalone_scaffolding.h \ + nir/spirv2nir.c + +spirv2nir_LDADD = \ + libglsl.la \ + $(top_builddir)/src/libglsl_util.la \ + $(PTHREAD_LIBS) + glsl_test_SOURCES = \ standalone_scaffolding.cpp \ tests/common.c \ diff --git a/src/glsl/nir/spirv2nir.c b/src/glsl/nir/spirv2nir.c new file mode 100644 index 00000000000..0eed23fbc3f --- /dev/null +++ b/src/glsl/nir/spirv2nir.c @@ -0,0 +1,54 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Jason Ekstrand (jason@jlekstrand.net) + * + */ + +/* + * A simple executable that opens a SPIR-V shader, converts it to NIR, and + * dumps out the result. This should be useful for testing the + * spirv_to_nir code. + */ + +#include "nir_spirv.h" + +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int fd = open(argv[1], O_RDONLY); + off_t len = lseek(fd, 0, SEEK_END); + + assert(len % 4 == 0); + size_t word_count = len / 4; + + const void *map = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0); + assert(map != NULL); + + nir_shader *shader = spirv_to_nir(map, word_count, NULL); + nir_print_shader(shader, stderr); +} From f9a31ba044d3abf07359e66a833eaf1292668c3d Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 14:32:55 -0700 Subject: [PATCH 05/44] nir/spirv: Add stub support for extension instructions --- src/glsl/nir/spirv_to_nir.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 02b99db17ae..76ddce62c1c 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -64,6 +64,9 @@ struct vtn_builder { unsigned value_id_bound; struct vtn_value *values; + + SpvExecutionModel execution_model; + struct vtn_value *entry_point; }; static void @@ -91,6 +94,21 @@ vtn_string_literal(struct vtn_builder *b, const uint32_t *words, return ralloc_strndup(b, (char *)words, (word_count - 2) * sizeof(*words)); } +static void +vtn_handle_extension(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + switch (opcode) { + case SpvOpExtInstImport: + /* Do nothing for the moment */ + break; + + case SpvOpExtInst: + default: + unreachable("Unhandled opcode"); + } +} + typedef void (*decoration_foreach_cb)(struct vtn_builder *, struct vtn_value *, const struct vtn_decoration *, @@ -216,6 +234,7 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpSourceExtension: case SpvOpMemberName: case SpvOpLine: + case SpvOpExtension: /* Unhandled, but these are for debug so that's ok. */ break; @@ -232,6 +251,22 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, vtn_push_token(b, w[2], vtn_value_type_undef); break; + case SpvOpMemoryModel: + assert(w[1] == SpvAddressingModelLogical); + assert(w[2] == SpvMemoryModelGLSL450); + break; + + case SpvOpEntryPoint: + assert(b->entry_point == NULL); + b->entry_point = &b->values[w[2]]; + b->execution_model = w[1]; + break; + + case SpvOpExtInstImport: + case SpvOpExtInst: + vtn_handle_extension(b, opcode, w, count); + break; + case SpvOpTypeVoid: case SpvOpTypeBool: case SpvOpTypeInt: From 2b570a49a92458f4771252c7faf1bac5a3c9dca5 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 14:34:06 -0700 Subject: [PATCH 06/44] nir/spirv: Rework the way values are added Instead of having functions to add values and set various things, we just have a function that does a few asserts and then returns the value. The caller is then responsible for setting the various fields. --- src/glsl/nir/spirv_to_nir.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 76ddce62c1c..e4bddbb6bdc 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -69,22 +69,16 @@ struct vtn_builder { struct vtn_value *entry_point; }; -static void +static struct vtn_value * vtn_push_value(struct vtn_builder *b, uint32_t value_id, - enum vtn_value_type value_type, void *ptr) + enum vtn_value_type value_type) { assert(value_id < b->value_id_bound); assert(b->values[value_id].value_type == vtn_value_type_invalid); b->values[value_id].value_type = value_type; - b->values[value_id].ptr = ptr; -} -static void -vtn_push_token(struct vtn_builder *b, uint32_t value_id, - enum vtn_value_type value_type) -{ - vtn_push_value(b, value_id, value_type, NULL); + return &b->values[value_id]; } static char * @@ -149,7 +143,7 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, { switch (opcode) { case SpvOpDecorationGroup: - vtn_push_token(b, w[1], vtn_value_type_undef); + vtn_push_value(b, w[1], vtn_value_type_undef); break; case SpvOpDecorate: { @@ -243,12 +237,12 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, break; case SpvOpString: - vtn_push_value(b, w[1], vtn_value_type_string, - vtn_string_literal(b, &w[2], count - 2)); + vtn_push_value(b, w[1], vtn_value_type_string)->str = + vtn_string_literal(b, &w[2], count - 2); break; case SpvOpUndef: - vtn_push_token(b, w[2], vtn_value_type_undef); + vtn_push_value(b, w[2], vtn_value_type_undef); break; case SpvOpMemoryModel: From 7b63b3de93b747dc5bf64891d2559d0db52d0f4e Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Tue, 24 Feb 2015 16:27:32 -0800 Subject: [PATCH 07/44] glsl: Add GLSL_TYPE_FUNCTION to the base types enums --- src/glsl/ast_to_hir.cpp | 1 + src/glsl/glsl_types.cpp | 2 ++ src/glsl/glsl_types.h | 1 + src/glsl/ir_clone.cpp | 1 + src/glsl/link_uniform_initializers.cpp | 1 + src/glsl/nir/nir_lower_io.c | 1 + src/mesa/drivers/dri/i965/brw_fs.cpp | 1 + src/mesa/drivers/dri/i965/brw_fs_visitor.cpp | 1 + src/mesa/drivers/dri/i965/brw_shader.cpp | 1 + src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp | 1 + src/mesa/program/ir_to_mesa.cpp | 2 ++ 11 files changed, 13 insertions(+) diff --git a/src/glsl/ast_to_hir.cpp b/src/glsl/ast_to_hir.cpp index 14e63090557..bf68ec39229 100644 --- a/src/glsl/ast_to_hir.cpp +++ b/src/glsl/ast_to_hir.cpp @@ -970,6 +970,7 @@ do_comparison(void *mem_ctx, int operation, ir_rvalue *op0, ir_rvalue *op1) case GLSL_TYPE_SAMPLER: case GLSL_TYPE_IMAGE: case GLSL_TYPE_INTERFACE: + case GLSL_TYPE_FUNCTION: case GLSL_TYPE_ATOMIC_UINT: /* I assume a comparison of a struct containing a sampler just * ignores the sampler present in the type. diff --git a/src/glsl/glsl_types.cpp b/src/glsl/glsl_types.cpp index 9c9b7efcbc7..3ee5c00b22d 100644 --- a/src/glsl/glsl_types.cpp +++ b/src/glsl/glsl_types.cpp @@ -955,6 +955,7 @@ glsl_type::component_slots() const case GLSL_TYPE_IMAGE: return 1; + case GLSL_TYPE_FUNCTION: case GLSL_TYPE_SAMPLER: case GLSL_TYPE_ATOMIC_UINT: case GLSL_TYPE_VOID: @@ -1326,6 +1327,7 @@ glsl_type::count_attribute_slots() const case GLSL_TYPE_ARRAY: return this->length * this->fields.array->count_attribute_slots(); + case GLSL_TYPE_FUNCTION: case GLSL_TYPE_SAMPLER: case GLSL_TYPE_IMAGE: case GLSL_TYPE_ATOMIC_UINT: diff --git a/src/glsl/glsl_types.h b/src/glsl/glsl_types.h index 5645dcd5011..c77e337bf63 100644 --- a/src/glsl/glsl_types.h +++ b/src/glsl/glsl_types.h @@ -56,6 +56,7 @@ enum glsl_base_type { GLSL_TYPE_IMAGE, GLSL_TYPE_ATOMIC_UINT, GLSL_TYPE_STRUCT, + GLSL_TYPE_FUNCTION, GLSL_TYPE_INTERFACE, GLSL_TYPE_ARRAY, GLSL_TYPE_VOID, diff --git a/src/glsl/ir_clone.cpp b/src/glsl/ir_clone.cpp index 914e0e4d540..636c143ddc2 100644 --- a/src/glsl/ir_clone.cpp +++ b/src/glsl/ir_clone.cpp @@ -357,6 +357,7 @@ ir_constant::clone(void *mem_ctx, struct hash_table *ht) const return c; } + case GLSL_TYPE_FUNCTION: case GLSL_TYPE_SAMPLER: case GLSL_TYPE_IMAGE: case GLSL_TYPE_ATOMIC_UINT: diff --git a/src/glsl/link_uniform_initializers.cpp b/src/glsl/link_uniform_initializers.cpp index 69073841ea4..60bfc9c15c9 100644 --- a/src/glsl/link_uniform_initializers.cpp +++ b/src/glsl/link_uniform_initializers.cpp @@ -88,6 +88,7 @@ copy_constant_to_storage(union gl_constant_value *storage, case GLSL_TYPE_IMAGE: case GLSL_TYPE_ATOMIC_UINT: case GLSL_TYPE_INTERFACE: + case GLSL_TYPE_FUNCTION: case GLSL_TYPE_VOID: case GLSL_TYPE_ERROR: /* All other types should have already been filtered by other diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c index 03eed04e1e9..561bebd3a9c 100644 --- a/src/glsl/nir/nir_lower_io.c +++ b/src/glsl/nir/nir_lower_io.c @@ -67,6 +67,7 @@ type_size(const struct glsl_type *type) return 0; case GLSL_TYPE_IMAGE: return 0; + case GLSL_TYPE_FUNCTION: case GLSL_TYPE_VOID: case GLSL_TYPE_ERROR: case GLSL_TYPE_DOUBLE: diff --git a/src/mesa/drivers/dri/i965/brw_fs.cpp b/src/mesa/drivers/dri/i965/brw_fs.cpp index b2701b89689..2fa5a664d30 100644 --- a/src/mesa/drivers/dri/i965/brw_fs.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs.cpp @@ -671,6 +671,7 @@ fs_visitor::type_size(const struct glsl_type *type) case GLSL_TYPE_ERROR: case GLSL_TYPE_INTERFACE: case GLSL_TYPE_DOUBLE: + case GLSL_TYPE_FUNCTION: unreachable("not reached"); } diff --git a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp index 80ca1b750f8..c911a551038 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_visitor.cpp @@ -1348,6 +1348,7 @@ fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r, case GLSL_TYPE_VOID: case GLSL_TYPE_ERROR: case GLSL_TYPE_INTERFACE: + case GLSL_TYPE_FUNCTION: unreachable("not reached"); } } diff --git a/src/mesa/drivers/dri/i965/brw_shader.cpp b/src/mesa/drivers/dri/i965/brw_shader.cpp index c1fd859fef5..ebfb49acf8d 100644 --- a/src/mesa/drivers/dri/i965/brw_shader.cpp +++ b/src/mesa/drivers/dri/i965/brw_shader.cpp @@ -351,6 +351,7 @@ brw_type_for_base_type(const struct glsl_type *type) case GLSL_TYPE_ERROR: case GLSL_TYPE_INTERFACE: case GLSL_TYPE_DOUBLE: + case GLSL_TYPE_FUNCTION: unreachable("not reached"); } diff --git a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp index 5a60fe43bf8..e51c140c0f2 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_visitor.cpp @@ -615,6 +615,7 @@ type_size(const struct glsl_type *type) case GLSL_TYPE_DOUBLE: case GLSL_TYPE_ERROR: case GLSL_TYPE_INTERFACE: + case GLSL_TYPE_FUNCTION: unreachable("not reached"); } diff --git a/src/mesa/program/ir_to_mesa.cpp b/src/mesa/program/ir_to_mesa.cpp index 3dcb53702a5..fceed712bdb 100644 --- a/src/mesa/program/ir_to_mesa.cpp +++ b/src/mesa/program/ir_to_mesa.cpp @@ -541,6 +541,7 @@ type_size(const struct glsl_type *type) case GLSL_TYPE_VOID: case GLSL_TYPE_ERROR: case GLSL_TYPE_INTERFACE: + case GLSL_TYPE_FUNCTION: assert(!"Invalid type in type_size"); break; } @@ -2448,6 +2449,7 @@ _mesa_associate_uniform_storage(struct gl_context *ctx, case GLSL_TYPE_STRUCT: case GLSL_TYPE_ERROR: case GLSL_TYPE_INTERFACE: + case GLSL_TYPE_FUNCTION: assert(!"Should not get here."); break; } From 053778c49362d49db93335d46cdafaa760038ac4 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Tue, 24 Feb 2015 16:29:33 -0800 Subject: [PATCH 08/44] glsl/types: Add support for function types --- src/glsl/glsl_types.cpp | 100 ++++++++++++++++++++++++++++++++++++++++ src/glsl/glsl_types.h | 23 ++++++++- 2 files changed, 122 insertions(+), 1 deletion(-) diff --git a/src/glsl/glsl_types.cpp b/src/glsl/glsl_types.cpp index 3ee5c00b22d..0d83ee68e42 100644 --- a/src/glsl/glsl_types.cpp +++ b/src/glsl/glsl_types.cpp @@ -32,6 +32,7 @@ mtx_t glsl_type::mutex = _MTX_INITIALIZER_NP; hash_table *glsl_type::array_types = NULL; hash_table *glsl_type::record_types = NULL; hash_table *glsl_type::interface_types = NULL; +hash_table *glsl_type::function_types = NULL; void *glsl_type::mem_ctx = NULL; void @@ -159,6 +160,39 @@ glsl_type::glsl_type(const glsl_struct_field *fields, unsigned num_fields, mtx_unlock(&glsl_type::mutex); } +glsl_type::glsl_type(const glsl_type *return_type, + const glsl_function_param *params, unsigned num_params) : + gl_type(0), + base_type(GLSL_TYPE_FUNCTION), + sampler_dimensionality(0), sampler_shadow(0), sampler_array(0), + sampler_type(0), interface_packing(0), + vector_elements(0), matrix_columns(0), + length(num_params) +{ + unsigned int i; + + mtx_lock(&glsl_type::mutex); + + init_ralloc_type_ctx(); + + this->fields.parameters = rzalloc_array(this->mem_ctx, + glsl_function_param, num_params + 1); + + /* We store the return type as the first parameter */ + this->fields.parameters[0].type = return_type; + this->fields.parameters[0].in = false; + this->fields.parameters[0].out = true; + + /* We store the i'th parameter in slot i+1 */ + for (i = 0; i < length; i++) { + this->fields.parameters[i + 1].type = params[i].type; + this->fields.parameters[i + 1].in = params[i].in; + this->fields.parameters[i + 1].out = params[i].out; + } + + mtx_unlock(&glsl_type::mutex); +} + bool glsl_type::contains_sampler() const @@ -827,6 +861,72 @@ glsl_type::get_interface_instance(const glsl_struct_field *fields, } +static int +function_key_compare(const void *a, const void *b) +{ + const glsl_type *const key1 = (glsl_type *) a; + const glsl_type *const key2 = (glsl_type *) b; + + if (key1->length != key2->length) + return 1; + + return memcmp(key1->fields.parameters, key2->fields.parameters, + (key1->length + 1) * sizeof(*key1->fields.parameters)); +} + + +static unsigned +function_key_hash(const void *a) +{ + const glsl_type *const key = (glsl_type *) a; + char hash_key[128]; + unsigned size = 0; + + size = snprintf(hash_key, sizeof(hash_key), "%08x", key->length); + + for (unsigned i = 0; i < key->length; i++) { + if (size >= sizeof(hash_key)) + break; + + size += snprintf(& hash_key[size], sizeof(hash_key) - size, + "%p", (void *) key->fields.structure[i].type); + } + + return hash_table_string_hash(& hash_key); +} + +const glsl_type * +glsl_type::get_function_instance(const glsl_type *return_type, + const glsl_function_param *params, + unsigned num_params) +{ + const glsl_type key(return_type, params, num_params); + + mtx_lock(&glsl_type::mutex); + + if (function_types == NULL) { + function_types = hash_table_ctor(64, function_key_hash, + function_key_compare); + } + + const glsl_type *t = (glsl_type *) hash_table_find(function_types, &key); + if (t == NULL) { + mtx_unlock(&glsl_type::mutex); + t = new glsl_type(return_type, params, num_params); + mtx_lock(&glsl_type::mutex); + + hash_table_insert(function_types, (void *) t, t); + } + + assert(t->base_type == GLSL_TYPE_FUNCTION); + assert(t->length == num_params); + + mtx_unlock(&glsl_type::mutex); + + return t; +} + + const glsl_type * glsl_type::get_mul_type(const glsl_type *type_a, const glsl_type *type_b) { diff --git a/src/glsl/glsl_types.h b/src/glsl/glsl_types.h index c77e337bf63..4d726c6bcf9 100644 --- a/src/glsl/glsl_types.h +++ b/src/glsl/glsl_types.h @@ -179,7 +179,7 @@ struct glsl_type { */ union { const struct glsl_type *array; /**< Type of array elements. */ - const struct glsl_type *parameters; /**< Parameters to function. */ + struct glsl_function_param *parameters; /**< Parameters to function. */ struct glsl_struct_field *structure; /**< List of struct fields. */ } fields; @@ -276,6 +276,13 @@ struct glsl_type { enum glsl_interface_packing packing, const char *block_name); + /** + * Get the instance of a function type + */ + static const glsl_type *get_function_instance(const struct glsl_type *return_type, + const glsl_function_param *parameters, + unsigned num_params); + /** * Get the type resulting from a multiplication of \p type_a * \p type_b */ @@ -689,6 +696,10 @@ private: glsl_type(const glsl_struct_field *fields, unsigned num_fields, enum glsl_interface_packing packing, const char *name); + /** Constructor for interface types */ + glsl_type(const glsl_type *return_type, + const glsl_function_param *params, unsigned num_params); + /** Constructor for array types */ glsl_type(const glsl_type *array, unsigned length); @@ -701,6 +712,9 @@ private: /** Hash table containing the known interface types. */ static struct hash_table *interface_types; + /** Hash table containing the known function types. */ + static struct hash_table *function_types; + static int record_key_compare(const void *a, const void *b); static unsigned record_key_hash(const void *key); @@ -771,6 +785,13 @@ struct glsl_struct_field { int stream; }; +struct glsl_function_param { + const struct glsl_type *type; + + bool in; + bool out; +}; + static inline unsigned int glsl_align(unsigned int a, unsigned int align) { From fe550f0738cf2052d5f0bc7d23de46a79f8ae04b Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 14:48:12 -0700 Subject: [PATCH 09/44] glsl/types: Expose the function_param and struct_field structs to C Previously, they were hidden behind a #ifdef __cplusplus so C wouldn't find them. This commit simpliy moves the ifdef. --- src/glsl/glsl_types.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/glsl/glsl_types.h b/src/glsl/glsl_types.h index 4d726c6bcf9..2d4718572af 100644 --- a/src/glsl/glsl_types.h +++ b/src/glsl/glsl_types.h @@ -742,6 +742,10 @@ private: /*@}*/ }; +#undef DECL_TYPE +#undef STRUCT_TYPE +#endif /* __cplusplus */ + struct glsl_struct_field { const struct glsl_type *type; const char *name; @@ -798,8 +802,4 @@ glsl_align(unsigned int a, unsigned int align) return (a + align - 1) / align * align; } -#undef DECL_TYPE -#undef STRUCT_TYPE -#endif /* __cplusplus */ - #endif /* GLSL_TYPES_H */ From e9d3b1e6942fc7c3d8dcfb9797a78ee16f0b20bc Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 14:28:37 -0700 Subject: [PATCH 10/44] nir/types: Add more helpers for creating types --- src/glsl/nir/nir_types.cpp | 46 ++++++++++++++++++++++++++++++++++++++ src/glsl/nir/nir_types.h | 13 +++++++++++ 2 files changed, 59 insertions(+) diff --git a/src/glsl/nir/nir_types.cpp b/src/glsl/nir/nir_types.cpp index 62176f508a1..937a842d98e 100644 --- a/src/glsl/nir/nir_types.cpp +++ b/src/glsl/nir/nir_types.cpp @@ -148,14 +148,60 @@ glsl_float_type(void) return glsl_type::float_type; } +const glsl_type * +glsl_int_type(void) +{ + return glsl_type::int_type; +} + +const glsl_type * +glsl_uint_type(void) +{ + return glsl_type::uint_type; +} + +const glsl_type * +glsl_bool_type(void) +{ + return glsl_type::bool_type; +} + const glsl_type * glsl_vec4_type(void) { return glsl_type::vec4_type; } +const glsl_type * +glsl_vector_type(enum glsl_base_type base_type, unsigned components) +{ + assert(components > 1 && components <= 4); + return glsl_type::get_instance(base_type, components, 1); +} + +const glsl_type * +glsl_matrix_type(enum glsl_base_type base_type, unsigned rows, unsigned columns) +{ + assert(rows > 1 && rows <= 4 && columns > 1 && columns <= 4); + return glsl_type::get_instance(base_type, rows, columns); +} + const glsl_type * glsl_array_type(const glsl_type *base, unsigned elements) { return glsl_type::get_array_instance(base, elements); } + +const glsl_type * +glsl_struct_type(const glsl_struct_field *fields, + unsigned num_fields, const char *name) +{ + return glsl_type::get_record_instance(fields, num_fields, name); +} + +const glsl_type * +glsl_function_type(const glsl_type *return_type, + const glsl_function_param *params, unsigned num_params) +{ + return glsl_type::get_function_instance(return_type, params, num_params); +} diff --git a/src/glsl/nir/nir_types.h b/src/glsl/nir/nir_types.h index 276d4ad6234..aad43f7a8c0 100644 --- a/src/glsl/nir/nir_types.h +++ b/src/glsl/nir/nir_types.h @@ -70,9 +70,22 @@ bool glsl_type_is_matrix(const struct glsl_type *type); const struct glsl_type *glsl_void_type(void); const struct glsl_type *glsl_float_type(void); +const struct glsl_type *glsl_int_type(void); +const struct glsl_type *glsl_uint_type(void); +const struct glsl_type *glsl_bool_type(void); + const struct glsl_type *glsl_vec4_type(void); +const struct glsl_type *glsl_vector_type(enum glsl_base_type base_type, + unsigned components); +const struct glsl_type *glsl_matrix_type(enum glsl_base_type base_type, + unsigned rows, unsigned columns); const struct glsl_type *glsl_array_type(const struct glsl_type *base, unsigned elements); +const struct glsl_type *glsl_struct_type(const struct glsl_struct_field *fields, + unsigned num_fields, const char *name); +const struct glsl_type * glsl_function_type(const struct glsl_type *return_type, + const struct glsl_function_param *params, + unsigned num_params); #ifdef __cplusplus } From 3f83579664b5ff9ec292fb94da6b3bb8b949868b Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 14:36:01 -0700 Subject: [PATCH 11/44] nir/spirv: Add basic support for types --- src/glsl/nir/spirv_to_nir.c | 89 +++++++++++++++++++++++++++++++++++-- 1 file changed, 85 insertions(+), 4 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index e4bddbb6bdc..da85abebe15 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -26,6 +26,7 @@ */ #include "nir_spirv.h" +#include "nir_vla.h" #include "spirv.h" struct vtn_decoration; @@ -35,6 +36,7 @@ enum vtn_value_type { vtn_value_type_undef, vtn_value_type_string, vtn_value_type_decoration_group, + vtn_value_type_type, vtn_value_type_ssa, vtn_value_type_deref, }; @@ -46,6 +48,7 @@ struct vtn_value { union { void *ptr; char *str; + const struct glsl_type *type; nir_ssa_def *ssa; nir_deref_var *deref; }; @@ -184,11 +187,88 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, } } -static void +static const struct glsl_type * vtn_handle_type(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) + const uint32_t *args, unsigned count) { - unreachable("Unhandled opcode"); + switch (opcode) { + case SpvOpTypeVoid: + return glsl_void_type(); + case SpvOpTypeBool: + return glsl_bool_type(); + case SpvOpTypeInt: + return glsl_int_type(); + case SpvOpTypeFloat: + return glsl_float_type(); + + case SpvOpTypeVector: { + const struct glsl_type *base = b->values[args[0]].type; + unsigned elems = args[1]; + + assert(glsl_type_is_scalar(base)); + return glsl_vector_type(glsl_get_base_type(base), elems); + } + + case SpvOpTypeMatrix: { + const struct glsl_type *base = b->values[args[0]].type; + unsigned columns = args[1]; + + assert(glsl_type_is_vector(base)); + return glsl_matrix_type(glsl_get_base_type(base), + glsl_get_vector_elements(base), + columns); + } + + case SpvOpTypeArray: + return glsl_array_type(b->values[args[0]].type, args[1]); + + case SpvOpTypeStruct: { + NIR_VLA(struct glsl_struct_field, fields, count); + for (unsigned i = 0; i < count; i++) { + /* TODO: Handle decorators */ + fields[i].type = b->values[args[i]].type; + fields[i].name = ralloc_asprintf(b, "field%d", i); + fields[i].location = -1; + fields[i].interpolation = 0; + fields[i].centroid = 0; + fields[i].sample = 0; + fields[i].matrix_layout = 2; + fields[i].stream = -1; + } + return glsl_struct_type(fields, count, "struct"); + } + + case SpvOpTypeFunction: { + const struct glsl_type *return_type = b->values[args[0]].type; + NIR_VLA(struct glsl_function_param, params, count - 1); + for (unsigned i = 1; i < count; i++) { + params[i - 1].type = b->values[args[i]].type; + + /* FIXME: */ + params[i - 1].in = true; + params[i - 1].out = true; + } + return glsl_function_type(return_type, params, count - 1); + } + + case SpvOpTypePointer: + /* FIXME: For now, we'll just do the really lame thing and return + * the same type. The validator should ensure that the proper number + * of dereferences happen + */ + return b->values[args[0]].type; + + case SpvOpTypeSampler: + case SpvOpTypeRuntimeArray: + case SpvOpTypeOpaque: + case SpvOpTypeEvent: + case SpvOpTypeDeviceEvent: + case SpvOpTypeReserveId: + case SpvOpTypeQueue: + case SpvOpTypePipe: + default: + unreachable("Unhandled opcode"); + } } static void @@ -279,7 +359,8 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpTypeReserveId: case SpvOpTypeQueue: case SpvOpTypePipe: - vtn_handle_type(b, opcode, w, count); + vtn_push_value(b, w[1], vtn_value_type_type)->type = + vtn_handle_type(b, opcode, &w[2], count - 2); break; case SpvOpConstantTrue: From b2db85d8e4593f7b13e4550159f1d940d9d87a80 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 18:14:11 -0700 Subject: [PATCH 12/44] nir/spirv: Add support for constants --- src/glsl/nir/spirv_to_nir.c | 68 ++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index da85abebe15..1e4c4439883 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -37,6 +37,7 @@ enum vtn_value_type { vtn_value_type_string, vtn_value_type_decoration_group, vtn_value_type_type, + vtn_value_type_constant, vtn_value_type_ssa, vtn_value_type_deref, }; @@ -49,6 +50,7 @@ struct vtn_value { void *ptr; char *str; const struct glsl_type *type; + nir_constant *constant; nir_ssa_def *ssa; nir_deref_var *deref; }; @@ -84,6 +86,15 @@ vtn_push_value(struct vtn_builder *b, uint32_t value_id, return &b->values[value_id]; } +static struct vtn_value * +vtn_value(struct vtn_builder *b, uint32_t value_id, + enum vtn_value_type value_type) +{ + assert(value_id < b->value_id_bound); + assert(b->values[value_id].value_type == value_type); + return &b->values[value_id]; +} + static char * vtn_string_literal(struct vtn_builder *b, const uint32_t *words, unsigned word_count) @@ -275,7 +286,62 @@ static void vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - unreachable("Unhandled opcode"); + const struct glsl_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; + nir_constant *constant = ralloc(b, nir_constant); + switch (opcode) { + case SpvOpConstantTrue: + assert(type == glsl_bool_type()); + constant->value.u[0] = NIR_TRUE; + break; + case SpvOpConstantFalse: + assert(type == glsl_bool_type()); + constant->value.u[0] = NIR_FALSE; + break; + case SpvOpConstant: + assert(glsl_type_is_scalar(type)); + constant->value.u[0] = w[3]; + break; + case SpvOpConstantComposite: { + unsigned elem_count = count - 3; + nir_constant **elems = ralloc_array(b, nir_constant *, elem_count); + for (unsigned i = 0; i < elem_count; i++) + elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant; + + switch (glsl_get_base_type(type)) { + case GLSL_TYPE_UINT: + case GLSL_TYPE_INT: + case GLSL_TYPE_FLOAT: + case GLSL_TYPE_BOOL: + if (glsl_type_is_matrix(type)) { + unsigned rows = glsl_get_vector_elements(type); + assert(glsl_get_matrix_columns(type) == elem_count); + for (unsigned i = 0; i < elem_count; i++) + for (unsigned j = 0; j < rows; j++) + constant->value.u[rows * i + j] = elems[i]->value.u[j]; + } else { + assert(glsl_type_is_vector(type)); + assert(glsl_get_vector_elements(type) == elem_count); + for (unsigned i = 0; i < elem_count; i++) + constant->value.u[i] = elems[i]->value.u[0]; + } + ralloc_free(elems); + break; + + case GLSL_TYPE_STRUCT: + case GLSL_TYPE_ARRAY: + constant->elements = elems; + break; + + default: + unreachable("Unsupported type for constants"); + } + break; + } + + default: + unreachable("Unhandled opcode"); + } + vtn_push_value(b, w[2], vtn_value_type_constant)->constant = constant; } static void From 707b706d183cd0733df0eaf0f79c7ef42e58a8e4 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 19:37:41 -0700 Subject: [PATCH 13/44] nir/spirv: Add support for declaring variables Deref chains and variable load/store operations are still missing. --- src/glsl/nir/spirv_to_nir.c | 152 +++++++++++++++++++++++++++++++++++- 1 file changed, 151 insertions(+), 1 deletion(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 1e4c4439883..131eecc1d4b 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -38,6 +38,7 @@ enum vtn_value_type { vtn_value_type_decoration_group, vtn_value_type_type, vtn_value_type_constant, + vtn_value_type_variable, vtn_value_type_ssa, vtn_value_type_deref, }; @@ -51,6 +52,7 @@ struct vtn_value { char *str; const struct glsl_type *type; nir_constant *constant; + nir_variable *var; nir_ssa_def *ssa; nir_deref_var *deref; }; @@ -344,11 +346,159 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, vtn_push_value(b, w[2], vtn_value_type_constant)->constant = constant; } +static void +var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, + const struct vtn_decoration *dec, void *unused) +{ + assert(val->value_type == vtn_value_type_variable); + nir_variable *var = val->var; + switch (dec->decoration) { + case SpvDecorationPrecisionLow: + case SpvDecorationPrecisionMedium: + case SpvDecorationPrecisionHigh: + break; /* FIXME: Do nothing with these for now. */ + case SpvDecorationSmooth: + var->data.interpolation = INTERP_QUALIFIER_SMOOTH; + break; + case SpvDecorationNoperspective: + var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE; + break; + case SpvDecorationFlat: + var->data.interpolation = INTERP_QUALIFIER_FLAT; + break; + case SpvDecorationCentroid: + var->data.centroid = true; + break; + case SpvDecorationSample: + var->data.sample = true; + break; + case SpvDecorationInvariant: + var->data.invariant = true; + break; + case SpvDecorationConstant: + assert(var->constant_initializer != NULL); + var->data.read_only = true; + break; + case SpvDecorationNonwritable: + var->data.read_only = true; + break; + case SpvDecorationLocation: + var->data.explicit_location = true; + var->data.location = dec->literals[0]; + break; + case SpvDecorationComponent: + var->data.location_frac = dec->literals[0]; + break; + case SpvDecorationIndex: + var->data.explicit_index = true; + var->data.index = dec->literals[0]; + break; + case SpvDecorationBinding: + var->data.explicit_binding = true; + var->data.binding = dec->literals[0]; + break; + case SpvDecorationBlock: + case SpvDecorationBufferBlock: + case SpvDecorationRowMajor: + case SpvDecorationColMajor: + case SpvDecorationGLSLShared: + case SpvDecorationGLSLStd140: + case SpvDecorationGLSLStd430: + case SpvDecorationGLSLPacked: + case SpvDecorationPatch: + case SpvDecorationRestrict: + case SpvDecorationAliased: + case SpvDecorationVolatile: + case SpvDecorationCoherent: + case SpvDecorationNonreadable: + case SpvDecorationUniform: + /* This is really nice but we have no use for it right now. */ + case SpvDecorationNoStaticUse: + case SpvDecorationCPacked: + case SpvDecorationSaturatedConversion: + case SpvDecorationStream: + case SpvDecorationDescriptorSet: + case SpvDecorationOffset: + case SpvDecorationAlignment: + case SpvDecorationXfbBuffer: + case SpvDecorationStride: + case SpvDecorationBuiltIn: + case SpvDecorationFuncParamAttr: + case SpvDecorationFPRoundingMode: + case SpvDecorationFPFastMathMode: + case SpvDecorationLinkageAttributes: + case SpvDecorationSpecId: + default: + unreachable("Unhandled variable decoration"); + } +} + static void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - unreachable("Unhandled opcode"); + switch (opcode) { + case SpvOpVariable: { + const struct glsl_type *type = + vtn_value(b, w[1], vtn_value_type_type)->type; + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_variable); + + nir_variable *var = ralloc(b->shader, nir_variable); + val->var = var; + + var->type = type; + var->name = ralloc_strdup(var, val->name); + + switch ((SpvStorageClass)w[3]) { + case SpvStorageClassUniformConstant: + var->data.mode = nir_var_uniform; + var->data.read_only = true; + break; + case SpvStorageClassInput: + var->data.mode = nir_var_shader_in; + var->data.read_only = true; + break; + case SpvStorageClassOutput: + var->data.mode = nir_var_shader_out; + break; + case SpvStorageClassPrivateGlobal: + var->data.mode = nir_var_global; + break; + case SpvStorageClassFunction: + var->data.mode = nir_var_local; + break; + case SpvStorageClassUniform: + case SpvStorageClassWorkgroupLocal: + case SpvStorageClassWorkgroupGlobal: + case SpvStorageClassGeneric: + case SpvStorageClassPrivate: + case SpvStorageClassAtomicCounter: + default: + unreachable("Unhandled variable storage class"); + } + + if (count > 4) { + assert(count == 5); + var->constant_initializer = + vtn_value(b, w[4], vtn_value_type_constant)->constant; + } + + vtn_foreach_decoration(b, val, var_decoration_cb, NULL); + break; + } + + case SpvOpVariableArray: + case SpvOpLoad: + case SpvOpStore: + case SpvOpCopyMemory: + case SpvOpCopyMemorySized: + case SpvOpAccessChain: + case SpvOpInBoundsAccessChain: + case SpvOpArrayLength: + case SpvOpImagePointer: + default: + unreachable("Unhandled opcode"); + } } static void From 8ee23dab041fe76f20d6297a1aa34f7ff1be6dc8 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 20:09:36 -0700 Subject: [PATCH 14/44] nir/types: Add accessors for function parameter/return types --- src/glsl/nir/nir_types.cpp | 12 ++++++++++++ src/glsl/nir/nir_types.h | 6 ++++++ 2 files changed, 18 insertions(+) diff --git a/src/glsl/nir/nir_types.cpp b/src/glsl/nir/nir_types.cpp index 937a842d98e..f2894d40c78 100644 --- a/src/glsl/nir/nir_types.cpp +++ b/src/glsl/nir/nir_types.cpp @@ -70,6 +70,18 @@ glsl_get_struct_field(const glsl_type *type, unsigned index) return type->fields.structure[index].type; } +const glsl_type * +glsl_get_function_return_type(const glsl_type *type) +{ + return type->fields.parameters[0].type; +} + +const glsl_function_param * +glsl_get_function_param(const glsl_type *type, unsigned index) +{ + return &type->fields.parameters[index + 1]; +} + const struct glsl_type * glsl_get_column_type(const struct glsl_type *type) { diff --git a/src/glsl/nir/nir_types.h b/src/glsl/nir/nir_types.h index aad43f7a8c0..dd535770c9f 100644 --- a/src/glsl/nir/nir_types.h +++ b/src/glsl/nir/nir_types.h @@ -49,6 +49,12 @@ const struct glsl_type *glsl_get_array_element(const struct glsl_type *type); const struct glsl_type *glsl_get_column_type(const struct glsl_type *type); +const struct glsl_type * +glsl_get_function_return_type(const struct glsl_type *type); + +const struct glsl_function_param * +glsl_get_function_param(const struct glsl_type *type, unsigned index); + enum glsl_base_type glsl_get_base_type(const struct glsl_type *type); unsigned glsl_get_vector_elements(const struct glsl_type *type); From a6cb9d92222079d0afcd651941cc6c3d091944a5 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 20:10:20 -0700 Subject: [PATCH 15/44] nir/spirv: Add support for declaring functions --- src/glsl/nir/spirv_to_nir.c | 67 +++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 131eecc1d4b..821927b9d84 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -39,6 +39,7 @@ enum vtn_value_type { vtn_value_type_type, vtn_value_type_constant, vtn_value_type_variable, + vtn_value_type_function, vtn_value_type_ssa, vtn_value_type_deref, }; @@ -53,6 +54,7 @@ struct vtn_value { const struct glsl_type *type; nir_constant *constant; nir_variable *var; + nir_function_impl *impl; nir_ssa_def *ssa; nir_deref_var *deref; }; @@ -68,6 +70,7 @@ struct vtn_decoration { struct vtn_builder { nir_shader *shader; nir_function_impl *impl; + struct exec_list *cf_list; unsigned value_id_bound; struct vtn_value *values; @@ -501,6 +504,63 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, } } +static void +vtn_handle_functions(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + switch (opcode) { + case SpvOpFunction: { + assert(b->impl == NULL); + + const struct glsl_type *result_type = + vtn_value(b, w[1], vtn_value_type_type)->type; + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function); + const struct glsl_type *func_type = + vtn_value(b, w[4], vtn_value_type_type)->type; + + assert(glsl_get_function_return_type(func_type) == result_type); + + nir_function *func = + nir_function_create(b->shader, ralloc_strdup(b->shader, val->name)); + + nir_function_overload *overload = nir_function_overload_create(func); + overload->num_params = glsl_get_length(func_type); + overload->params = ralloc_array(overload, nir_parameter, + overload->num_params); + for (unsigned i = 0; i < overload->num_params; i++) { + const struct glsl_function_param *param = + glsl_get_function_param(func_type, i); + overload->params[i].type = param->type; + if (param->in) { + if (param->out) { + overload->params[i].param_type = nir_parameter_inout; + } else { + overload->params[i].param_type = nir_parameter_in; + } + } else { + if (param->out) { + overload->params[i].param_type = nir_parameter_out; + } else { + assert(!"Parameter is neither in nor out"); + } + } + } + + val->impl = b->impl = nir_function_impl_create(overload); + b->cf_list = &b->impl->body; + + break; + } + case SpvOpFunctionEnd: + b->impl = NULL; + break; + case SpvOpFunctionParameter: + case SpvOpFunctionCall: + default: + unreachable("Unhandled opcode"); + } +} + static void vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -614,6 +674,13 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_decoration(b, opcode, w, count); break; + case SpvOpFunction: + case SpvOpFunctionEnd: + case SpvOpFunctionParameter: + case SpvOpFunctionCall: + vtn_handle_functions(b, opcode, w, count); + break; + case SpvOpTextureSample: case SpvOpTextureSampleDref: case SpvOpTextureSampleLod: From eccd798cc29737f7375150488abefadf551d0f81 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 20:19:34 -0700 Subject: [PATCH 16/44] nir/spirv: Add support for OpLabel --- src/glsl/nir/spirv_to_nir.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 821927b9d84..e8acb33f481 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -40,6 +40,7 @@ enum vtn_value_type { vtn_value_type_constant, vtn_value_type_variable, vtn_value_type_function, + vtn_value_type_block, vtn_value_type_ssa, vtn_value_type_deref, }; @@ -55,6 +56,7 @@ struct vtn_value { nir_constant *constant; nir_variable *var; nir_function_impl *impl; + nir_block *block; nir_ssa_def *ssa; nir_deref_var *deref; }; @@ -612,6 +614,17 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, b->execution_model = w[1]; break; + case SpvOpLabel: { + struct exec_node *list_tail = exec_list_get_tail(b->cf_list); + nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node); + assert(tail_node->type == nir_cf_node_block); + nir_block *block = nir_cf_node_as_block(tail_node); + + assert(exec_list_is_empty(&block->instr_list)); + vtn_push_value(b, w[1], vtn_value_type_block)->block = block; + break; + } + case SpvOpExtInstImport: case SpvOpExtInst: vtn_handle_extension(b, opcode, w, count); From 7182597e50253f07b66cb0edb806b11a56242b5c Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 20:56:17 -0700 Subject: [PATCH 17/44] nir/types: Add a scalar type constructor --- src/glsl/nir/nir_types.cpp | 6 ++++++ src/glsl/nir/nir_types.h | 1 + 2 files changed, 7 insertions(+) diff --git a/src/glsl/nir/nir_types.cpp b/src/glsl/nir/nir_types.cpp index f2894d40c78..f93a52b5fa5 100644 --- a/src/glsl/nir/nir_types.cpp +++ b/src/glsl/nir/nir_types.cpp @@ -184,6 +184,12 @@ glsl_vec4_type(void) return glsl_type::vec4_type; } +const glsl_type * +glsl_scalar_type(enum glsl_base_type base_type) +{ + return glsl_type::get_instance(base_type, 1, 1); +} + const glsl_type * glsl_vector_type(enum glsl_base_type base_type, unsigned components) { diff --git a/src/glsl/nir/nir_types.h b/src/glsl/nir/nir_types.h index dd535770c9f..40a80ec7130 100644 --- a/src/glsl/nir/nir_types.h +++ b/src/glsl/nir/nir_types.h @@ -81,6 +81,7 @@ const struct glsl_type *glsl_uint_type(void); const struct glsl_type *glsl_bool_type(void); const struct glsl_type *glsl_vec4_type(void); +const struct glsl_type *glsl_scalar_type(enum glsl_base_type base_type); const struct glsl_type *glsl_vector_type(enum glsl_base_type base_type, unsigned components); const struct glsl_type *glsl_matrix_type(enum glsl_base_type base_type, From 5acd472271d72866a36a5de374f3e1a846b61dc8 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 29 Apr 2015 20:56:36 -0700 Subject: [PATCH 18/44] nir/spirv: Add support for deref chains --- src/glsl/nir/spirv_to_nir.c | 86 +++++++++++++++++++++++++++++++------ 1 file changed, 74 insertions(+), 12 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index e8acb33f481..ba536e8c81d 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -38,11 +38,10 @@ enum vtn_value_type { vtn_value_type_decoration_group, vtn_value_type_type, vtn_value_type_constant, - vtn_value_type_variable, + vtn_value_type_deref, vtn_value_type_function, vtn_value_type_block, vtn_value_type_ssa, - vtn_value_type_deref, }; struct vtn_value { @@ -54,11 +53,10 @@ struct vtn_value { char *str; const struct glsl_type *type; nir_constant *constant; - nir_variable *var; + nir_deref_var *deref; nir_function_impl *impl; nir_block *block; nir_ssa_def *ssa; - nir_deref_var *deref; }; }; @@ -353,10 +351,13 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, static void var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, - const struct vtn_decoration *dec, void *unused) + const struct vtn_decoration *dec, void *void_var) { - assert(val->value_type == vtn_value_type_variable); - nir_variable *var = val->var; + assert(val->value_type == vtn_value_type_deref); + assert(val->deref->deref.child == NULL); + assert(val->deref->var == void_var); + + nir_variable *var = void_var; switch (dec->decoration) { case SpvDecorationPrecisionLow: case SpvDecorationPrecisionMedium: @@ -446,10 +447,9 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, case SpvOpVariable: { const struct glsl_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_variable); + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref); nir_variable *var = ralloc(b->shader, nir_variable); - val->var = var; var->type = type; var->name = ralloc_strdup(var, val->name); @@ -488,7 +488,71 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, vtn_value(b, w[4], vtn_value_type_constant)->constant; } - vtn_foreach_decoration(b, val, var_decoration_cb, NULL); + val->deref = nir_deref_var_create(b->shader, var); + + vtn_foreach_decoration(b, val, var_decoration_cb, var); + break; + } + + case SpvOpAccessChain: + case SpvOpInBoundsAccessChain: { + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref); + nir_deref_var *base = vtn_value(b, w[3], vtn_value_type_deref)->deref; + val->deref = nir_deref_as_var(nir_copy_deref(b, &base->deref)); + + nir_deref *tail = &val->deref->deref; + while (tail->child) + tail = tail->child; + + for (unsigned i = 0; i < count - 3; i++) { + assert(w[i + 3] < b->value_id_bound); + struct vtn_value *idx_val = &b->values[w[i + 3]]; + + enum glsl_base_type base_type = glsl_get_base_type(tail->type); + switch (base_type) { + case GLSL_TYPE_UINT: + case GLSL_TYPE_INT: + case GLSL_TYPE_FLOAT: + case GLSL_TYPE_DOUBLE: + case GLSL_TYPE_BOOL: + case GLSL_TYPE_ARRAY: { + nir_deref_array *deref_arr = nir_deref_array_create(b); + if (base_type == GLSL_TYPE_ARRAY) { + deref_arr->deref.type = glsl_get_array_element(tail->type); + } else if (glsl_type_is_matrix(tail->type)) { + deref_arr->deref.type = glsl_get_column_type(tail->type); + } else { + assert(glsl_type_is_vector(tail->type)); + deref_arr->deref.type = glsl_scalar_type(base_type); + } + + if (idx_val->value_type == vtn_value_type_constant) { + unsigned idx = idx_val->constant->value.u[0]; + deref_arr->deref_array_type = nir_deref_array_type_direct; + deref_arr->base_offset = idx; + } else { + assert(idx_val->value_type == vtn_value_type_ssa); + deref_arr->deref_array_type = nir_deref_array_type_indirect; + /* TODO */ + unreachable("Indirect array accesses not implemented"); + } + tail->child = &deref_arr->deref; + break; + } + + case GLSL_TYPE_STRUCT: { + assert(idx_val->value_type == vtn_value_type_constant); + unsigned idx = idx_val->constant->value.u[0]; + nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx); + deref_struct->deref.type = glsl_get_struct_field(tail->type, idx); + tail->child = &deref_struct->deref; + break; + } + default: + unreachable("Invalid type for deref"); + } + tail = tail->child; + } break; } @@ -497,8 +561,6 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, case SpvOpStore: case SpvOpCopyMemory: case SpvOpCopyMemorySized: - case SpvOpAccessChain: - case SpvOpInBoundsAccessChain: case SpvOpArrayLength: case SpvOpImagePointer: default: From 6ff0830d64840d88631287db32ae464c7a436b17 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Fri, 1 May 2015 11:26:40 -0700 Subject: [PATCH 19/44] nir/types: Add an is_vector_or_scalar helper --- src/glsl/nir/nir_types.cpp | 6 ++++++ src/glsl/nir/nir_types.h | 1 + 2 files changed, 7 insertions(+) diff --git a/src/glsl/nir/nir_types.cpp b/src/glsl/nir/nir_types.cpp index f93a52b5fa5..a6d35fe6179 100644 --- a/src/glsl/nir/nir_types.cpp +++ b/src/glsl/nir/nir_types.cpp @@ -142,6 +142,12 @@ glsl_type_is_scalar(const struct glsl_type *type) return type->is_scalar(); } +bool +glsl_type_is_vector_or_scalar(const struct glsl_type *type) +{ + return type->is_vector() || type->is_scalar(); +} + bool glsl_type_is_matrix(const struct glsl_type *type) { diff --git a/src/glsl/nir/nir_types.h b/src/glsl/nir/nir_types.h index 40a80ec7130..f19f0e5db5d 100644 --- a/src/glsl/nir/nir_types.h +++ b/src/glsl/nir/nir_types.h @@ -72,6 +72,7 @@ const char *glsl_get_struct_elem_name(const struct glsl_type *type, bool glsl_type_is_void(const struct glsl_type *type); bool glsl_type_is_vector(const struct glsl_type *type); bool glsl_type_is_scalar(const struct glsl_type *type); +bool glsl_type_is_vector_or_scalar(const struct glsl_type *type); bool glsl_type_is_matrix(const struct glsl_type *type); const struct glsl_type *glsl_void_type(void); From 01f3aa9c5191707e6b527f6858391c44db8c40db Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Fri, 1 May 2015 11:27:21 -0700 Subject: [PATCH 20/44] nir/spirv: Use vtn_value in the types code and fix a off-by-one error --- src/glsl/nir/spirv_to_nir.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index ba536e8c81d..8e043100c37 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -218,7 +218,8 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, return glsl_float_type(); case SpvOpTypeVector: { - const struct glsl_type *base = b->values[args[0]].type; + const struct glsl_type *base = + vtn_value(b, args[0], vtn_value_type_type)->type; unsigned elems = args[1]; assert(glsl_type_is_scalar(base)); @@ -226,7 +227,8 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, } case SpvOpTypeMatrix: { - const struct glsl_type *base = b->values[args[0]].type; + const struct glsl_type *base = + vtn_value(b, args[0], vtn_value_type_type)->type; unsigned columns = args[1]; assert(glsl_type_is_vector(base)); @@ -242,7 +244,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, NIR_VLA(struct glsl_struct_field, fields, count); for (unsigned i = 0; i < count; i++) { /* TODO: Handle decorators */ - fields[i].type = b->values[args[i]].type; + fields[i].type = vtn_value(b, args[i], vtn_value_type_type)->type; fields[i].name = ralloc_asprintf(b, "field%d", i); fields[i].location = -1; fields[i].interpolation = 0; @@ -258,7 +260,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, const struct glsl_type *return_type = b->values[args[0]].type; NIR_VLA(struct glsl_function_param, params, count - 1); for (unsigned i = 1; i < count; i++) { - params[i - 1].type = b->values[args[i]].type; + params[i - 1].type = vtn_value(b, args[i], vtn_value_type_type)->type; /* FIXME: */ params[i - 1].in = true; @@ -272,7 +274,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, * the same type. The validator should ensure that the proper number * of dereferences happen */ - return b->values[args[0]].type; + return vtn_value(b, args[1], vtn_value_type_type)->type; case SpvOpTypeSampler: case SpvOpTypeRuntimeArray: From 5045efa4aa2bd335b2b0110b6af792fbc4fcba7e Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Fri, 1 May 2015 11:27:44 -0700 Subject: [PATCH 21/44] nir/spirv: Add a vtn_untyped_value helper --- src/glsl/nir/spirv_to_nir.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 8e043100c37..7f74c9708d9 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -91,13 +91,20 @@ vtn_push_value(struct vtn_builder *b, uint32_t value_id, return &b->values[value_id]; } +static struct vtn_value * +vtn_untyped_value(struct vtn_builder *b, uint32_t value_id) +{ + assert(value_id < b->value_id_bound); + return &b->values[value_id]; +} + static struct vtn_value * vtn_value(struct vtn_builder *b, uint32_t value_id, enum vtn_value_type value_type) { - assert(value_id < b->value_id_bound); - assert(b->values[value_id].value_type == value_type); - return &b->values[value_id]; + struct vtn_value *val = vtn_untyped_value(b, value_id); + assert(val->value_type == value_type); + return val; } static char * From 06acd174f3a6bb9097ca31a3d3bc5f10797b2f4d Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Fri, 1 May 2015 11:28:01 -0700 Subject: [PATCH 22/44] nir/spirv: Actaully add variables to the funciton or shader --- src/glsl/nir/spirv_to_nir.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 7f74c9708d9..de3ad50e25c 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -497,6 +497,12 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, vtn_value(b, w[4], vtn_value_type_constant)->constant; } + if (var->data.mode == nir_var_local) { + exec_list_push_tail(&b->impl->locals, &var->node); + } else { + exec_list_push_tail(&b->shader->globals, &var->node); + } + val->deref = nir_deref_var_create(b->shader, var); vtn_foreach_decoration(b, val, var_decoration_cb, var); From 88f6fbc897b9eba82764465c5d1b3ef94dbfc990 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 10:19:24 -0700 Subject: [PATCH 23/44] nir: Add a helper for getting the tail of a deref chain --- src/glsl/nir/nir.h | 9 +++++++++ src/glsl/nir/nir_lower_var_copies.c | 15 ++------------- src/glsl/nir/nir_split_var_copies.c | 12 ++---------- 3 files changed, 13 insertions(+), 23 deletions(-) diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h index 697d37e95ac..61306e9b7e0 100644 --- a/src/glsl/nir/nir.h +++ b/src/glsl/nir/nir.h @@ -782,6 +782,15 @@ NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref) NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref) NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref) +/** Returns the tail of a deref chain */ +static inline nir_deref * +nir_deref_tail(nir_deref *deref) +{ + while (deref->child) + deref = deref->child; + return deref; +} + typedef struct { nir_instr instr; diff --git a/src/glsl/nir/nir_lower_var_copies.c b/src/glsl/nir/nir_lower_var_copies.c index 21672901f04..98c107aa50e 100644 --- a/src/glsl/nir/nir_lower_var_copies.c +++ b/src/glsl/nir/nir_lower_var_copies.c @@ -53,17 +53,6 @@ deref_next_wildcard_parent(nir_deref *deref) return NULL; } -/* Returns the last deref in the chain. - */ -static nir_deref * -get_deref_tail(nir_deref *deref) -{ - while (deref->child) - deref = deref->child; - - return deref; -} - /* This function recursively walks the given deref chain and replaces the * given copy instruction with an equivalent sequence load/store * operations. @@ -121,8 +110,8 @@ emit_copy_load_store(nir_intrinsic_instr *copy_instr, } else { /* In this case, we have no wildcards anymore, so all we have to do * is just emit the load and store operations. */ - src_tail = get_deref_tail(src_tail); - dest_tail = get_deref_tail(dest_tail); + src_tail = nir_deref_tail(src_tail); + dest_tail = nir_deref_tail(dest_tail); assert(src_tail->type == dest_tail->type); diff --git a/src/glsl/nir/nir_split_var_copies.c b/src/glsl/nir/nir_split_var_copies.c index fc72c078c77..5c163b59819 100644 --- a/src/glsl/nir/nir_split_var_copies.c +++ b/src/glsl/nir/nir_split_var_copies.c @@ -66,14 +66,6 @@ struct split_var_copies_state { void *dead_ctx; }; -static nir_deref * -get_deref_tail(nir_deref *deref) -{ - while (deref->child != NULL) - deref = deref->child; - return deref; -} - /* Recursively constructs deref chains to split a copy instruction into * multiple (if needed) copy instructions with full-length deref chains. * External callers of this function should pass the tail and head of the @@ -225,8 +217,8 @@ split_var_copies_block(nir_block *block, void *void_state) nir_deref *dest_head = &intrinsic->variables[0]->deref; nir_deref *src_head = &intrinsic->variables[1]->deref; - nir_deref *dest_tail = get_deref_tail(dest_head); - nir_deref *src_tail = get_deref_tail(src_head); + nir_deref *dest_tail = nir_deref_tail(dest_head); + nir_deref *src_tail = nir_deref_tail(src_head); switch (glsl_get_base_type(src_tail->type)) { case GLSL_TYPE_ARRAY: From ae6d32c635707a5391c10ce12af37b79b190b8e8 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Fri, 1 May 2015 11:28:18 -0700 Subject: [PATCH 24/44] nir/spirv: Implement load/store instructiosn --- src/glsl/nir/spirv_to_nir.c | 72 +++++++++++++++++++++++++++++++++---- 1 file changed, 66 insertions(+), 6 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index de3ad50e25c..85f07e06647 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -519,9 +519,9 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, while (tail->child) tail = tail->child; - for (unsigned i = 0; i < count - 3; i++) { - assert(w[i + 3] < b->value_id_bound); - struct vtn_value *idx_val = &b->values[w[i + 3]]; + for (unsigned i = 0; i < count - 4; i++) { + assert(w[i + 4] < b->value_id_bound); + struct vtn_value *idx_val = &b->values[w[i + 4]]; enum glsl_base_type base_type = glsl_get_base_type(tail->type); switch (base_type) { @@ -571,10 +571,70 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, break; } + case SpvOpCopyMemory: { + nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref; + nir_deref_var *src = vtn_value(b, w[2], vtn_value_type_deref)->deref; + + nir_intrinsic_instr *copy = + nir_intrinsic_instr_create(b->shader, nir_intrinsic_copy_var); + copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref)); + copy->variables[1] = nir_deref_as_var(nir_copy_deref(copy, &src->deref)); + + nir_instr_insert_after_cf_list(b->cf_list, ©->instr); + break; + } + + case SpvOpLoad: { + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); + nir_deref_var *src = vtn_value(b, w[3], vtn_value_type_deref)->deref; + const struct glsl_type *src_type = nir_deref_tail(&src->deref)->type; + assert(glsl_type_is_vector_or_scalar(src_type)); + + nir_intrinsic_instr *load = + nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var); + load->variables[0] = nir_deref_as_var(nir_copy_deref(load, &src->deref)); + load->num_components = glsl_get_vector_elements(src_type); + nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, + val->name); + + nir_instr_insert_after_cf_list(b->cf_list, &load->instr); + val->ssa = &load->dest.ssa; + break; + } + + case SpvOpStore: { + nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref; + const struct glsl_type *dest_type = nir_deref_tail(&dest->deref)->type; + struct vtn_value *src_val = vtn_untyped_value(b, w[2]); + if (src_val->value_type == vtn_value_type_ssa) { + assert(glsl_type_is_vector_or_scalar(dest_type)); + nir_intrinsic_instr *store = + nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var); + store->src[0] = nir_src_for_ssa(src_val->ssa); + store->variables[0] = nir_deref_as_var(nir_copy_deref(store, &dest->deref)); + store->num_components = glsl_get_vector_elements(dest_type); + + nir_instr_insert_after_cf_list(b->cf_list, &store->instr); + } else { + assert(src_val->value_type == vtn_value_type_constant); + + nir_variable *const_tmp = rzalloc(b->shader, nir_variable); + const_tmp->type = dest_type; + const_tmp->data.mode = nir_var_local; + const_tmp->data.read_only = true; + exec_list_push_tail(&b->impl->locals, &const_tmp->node); + + nir_intrinsic_instr *copy = + nir_intrinsic_instr_create(b->shader, nir_intrinsic_copy_var); + copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref)); + copy->variables[1] = nir_deref_var_create(copy, const_tmp); + + nir_instr_insert_after_cf_list(b->cf_list, ©->instr); + } + break; + } + case SpvOpVariableArray: - case SpvOpLoad: - case SpvOpStore: - case SpvOpCopyMemory: case SpvOpCopyMemorySized: case SpvOpArrayLength: case SpvOpImagePointer: From f23afc549baa8b2b6608c009dbad8606d7a9a07f Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Fri, 1 May 2015 14:00:57 -0700 Subject: [PATCH 25/44] nir/spirv: Split instruction handling into preamble and body sections --- src/glsl/nir/spirv_to_nir.c | 140 ++++++++++++++++++++++++------------ 1 file changed, 94 insertions(+), 46 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 85f07e06647..898bb6a8e64 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -114,6 +114,28 @@ vtn_string_literal(struct vtn_builder *b, const uint32_t *words, return ralloc_strndup(b, (char *)words, (word_count - 2) * sizeof(*words)); } +typedef bool (*vtn_instruction_handler)(struct vtn_builder *, SpvOp, + const uint32_t *, unsigned); + +static const uint32_t * +vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start, + const uint32_t *end, vtn_instruction_handler handler) +{ + const uint32_t *w = start; + while (w < end) { + SpvOp opcode = w[0] & SpvOpCodeMask; + unsigned count = w[0] >> SpvWordCountShift; + assert(count >= 1 && w + count <= end); + + if (!handler(b, opcode, w, count)) + return w; + + w += count; + } + assert(w == end); + return w; +} + static void vtn_handle_extension(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -714,32 +736,19 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, unreachable("Unhandled opcode"); } -static void -vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) +static bool +vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) { switch (opcode) { case SpvOpSource: case SpvOpSourceExtension: - case SpvOpMemberName: - case SpvOpLine: + case SpvOpCompileFlag: case SpvOpExtension: + case SpvOpExtInstImport: /* Unhandled, but these are for debug so that's ok. */ break; - case SpvOpName: - b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2); - break; - - case SpvOpString: - vtn_push_value(b, w[1], vtn_value_type_string)->str = - vtn_string_literal(b, &w[2], count - 2); - break; - - case SpvOpUndef: - vtn_push_value(b, w[2], vtn_value_type_undef); - break; - case SpvOpMemoryModel: assert(w[1] == SpvAddressingModelLogical); assert(w[2] == SpvMemoryModelGLSL450); @@ -751,20 +760,32 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, b->execution_model = w[1]; break; - case SpvOpLabel: { - struct exec_node *list_tail = exec_list_get_tail(b->cf_list); - nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node); - assert(tail_node->type == nir_cf_node_block); - nir_block *block = nir_cf_node_as_block(tail_node); - - assert(exec_list_is_empty(&block->instr_list)); - vtn_push_value(b, w[1], vtn_value_type_block)->block = block; + case SpvOpExecutionMode: + unreachable("Execution modes not yet implemented"); break; - } - case SpvOpExtInstImport: - case SpvOpExtInst: - vtn_handle_extension(b, opcode, w, count); + case SpvOpString: + vtn_push_value(b, w[1], vtn_value_type_string)->str = + vtn_string_literal(b, &w[2], count - 2); + break; + + case SpvOpName: + b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2); + break; + + case SpvOpMemberName: + /* TODO */ + break; + + case SpvOpLine: + break; /* Ignored for now */ + + case SpvOpDecorationGroup: + case SpvOpDecorate: + case SpvOpMemberDecorate: + case SpvOpGroupDecorate: + case SpvOpGroupMemberDecorate: + vtn_handle_decoration(b, opcode, w, count); break; case SpvOpTypeVoid: @@ -803,6 +824,41 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_constant(b, opcode, w, count); break; + case SpvOpVariable: + vtn_handle_variables(b, opcode, w, count); + break; + + default: + return false; /* End of preamble */ + } + + return true; +} + +static bool +vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + switch (opcode) { + case SpvOpLabel: { + struct exec_node *list_tail = exec_list_get_tail(b->cf_list); + nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node); + assert(tail_node->type == nir_cf_node_block); + nir_block *block = nir_cf_node_as_block(tail_node); + + assert(exec_list_is_empty(&block->instr_list)); + vtn_push_value(b, w[1], vtn_value_type_block)->block = block; + break; + } + + case SpvOpUndef: + vtn_push_value(b, w[2], vtn_value_type_undef); + break; + + case SpvOpExtInst: + vtn_handle_extension(b, opcode, w, count); + break; + case SpvOpVariable: case SpvOpVariableArray: case SpvOpLoad: @@ -816,14 +872,6 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_variables(b, opcode, w, count); break; - case SpvOpDecorationGroup: - case SpvOpDecorate: - case SpvOpMemberDecorate: - case SpvOpGroupDecorate: - case SpvOpGroupMemberDecorate: - vtn_handle_decoration(b, opcode, w, count); - break; - case SpvOpFunction: case SpvOpFunctionEnd: case SpvOpFunctionParameter: @@ -953,6 +1001,8 @@ vtn_handle_instruction(struct vtn_builder *b, SpvOp opcode, default: unreachable("Unhandled opcode"); } + + return true; } nir_shader * @@ -978,17 +1028,15 @@ spirv_to_nir(const uint32_t *words, size_t word_count, b->value_id_bound = value_id_bound; b->values = ralloc_array(b, struct vtn_value, value_id_bound); - /* Start handling instructions */ const uint32_t *word_end = words + word_count; - while (words < word_end) { - SpvOp opcode = words[0] & SpvOpCodeMask; - unsigned count = words[0] >> SpvWordCountShift; - assert(words + count <= word_end); - vtn_handle_instruction(b, opcode, words, count); + /* Handle all the preamble instructions */ + words = vtn_foreach_instruction(b, words, word_end, + vtn_handle_preamble_instruction); - words += count; - } + words = vtn_foreach_instruction(b, words, word_end, + vtn_handle_body_instruction); + assert(words == word_end); ralloc_free(b); From ebc152e4c98eafcbe52aabcf4463664876fb9112 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 10:22:52 -0700 Subject: [PATCH 26/44] nir/spirv: Add a helper for getting a value as an SSA value --- src/glsl/nir/spirv_to_nir.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 898bb6a8e64..eac21c499eb 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -107,6 +107,12 @@ vtn_value(struct vtn_builder *b, uint32_t value_id, return val; } +static nir_ssa_def * +vtn_ssa_value(struct vtn_builder *b, uint32_t value_id) +{ + return vtn_value(b, value_id, vtn_value_type_ssa)->ssa; +} + static char * vtn_string_literal(struct vtn_builder *b, const uint32_t *words, unsigned word_count) From c5650148a9e4156847f852cd466c882fa668fac0 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 10:23:09 -0700 Subject: [PATCH 27/44] nir/spirv: Handle OpBranchConditional We do control-flow handling as a two-step process. The first step is to walk the instructions list and record various information about blocks and functions. This is where the acutal nir_function_overload objects get created. We also record the start/stop instruction for each block. Then a second pass walks over each of the functions and over the blocks in each function in a way that's NIR-friendly and actually parses the instructions. --- src/glsl/nir/spirv_to_nir.c | 257 ++++++++++++++++++++++++++---------- 1 file changed, 189 insertions(+), 68 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index eac21c499eb..c0d77d5453d 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -44,6 +44,19 @@ enum vtn_value_type { vtn_value_type_ssa, }; +struct vtn_block { + const uint32_t *label; + const uint32_t *branch; + nir_block *block; +}; + +struct vtn_function { + struct exec_node node; + + nir_function_overload *overload; + struct vtn_block *start_block; +}; + struct vtn_value { enum vtn_value_type value_type; const char *name; @@ -54,8 +67,8 @@ struct vtn_value { const struct glsl_type *type; nir_constant *constant; nir_deref_var *deref; - nir_function_impl *impl; - nir_block *block; + struct vtn_function *func; + struct vtn_block *block; nir_ssa_def *ssa; }; }; @@ -71,12 +84,17 @@ struct vtn_builder { nir_shader *shader; nir_function_impl *impl; struct exec_list *cf_list; + struct vtn_block *block; + struct vtn_block *merge_block; unsigned value_id_bound; struct vtn_value *values; SpvExecutionModel execution_model; struct vtn_value *entry_point; + + struct vtn_function *func; + struct exec_list functions; }; static struct vtn_value * @@ -672,60 +690,10 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, } static void -vtn_handle_functions(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) +vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) { - switch (opcode) { - case SpvOpFunction: { - assert(b->impl == NULL); - - const struct glsl_type *result_type = - vtn_value(b, w[1], vtn_value_type_type)->type; - struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function); - const struct glsl_type *func_type = - vtn_value(b, w[4], vtn_value_type_type)->type; - - assert(glsl_get_function_return_type(func_type) == result_type); - - nir_function *func = - nir_function_create(b->shader, ralloc_strdup(b->shader, val->name)); - - nir_function_overload *overload = nir_function_overload_create(func); - overload->num_params = glsl_get_length(func_type); - overload->params = ralloc_array(overload, nir_parameter, - overload->num_params); - for (unsigned i = 0; i < overload->num_params; i++) { - const struct glsl_function_param *param = - glsl_get_function_param(func_type, i); - overload->params[i].type = param->type; - if (param->in) { - if (param->out) { - overload->params[i].param_type = nir_parameter_inout; - } else { - overload->params[i].param_type = nir_parameter_in; - } - } else { - if (param->out) { - overload->params[i].param_type = nir_parameter_out; - } else { - assert(!"Parameter is neither in nor out"); - } - } - } - - val->impl = b->impl = nir_function_impl_create(overload); - b->cf_list = &b->impl->body; - - break; - } - case SpvOpFunctionEnd: - b->impl = NULL; - break; - case SpvOpFunctionParameter: - case SpvOpFunctionCall: - default: - unreachable("Unhandled opcode"); - } + unreachable("Unhandled opcode"); } static void @@ -841,22 +809,118 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, return true; } +static bool +vtn_handle_first_cfg_pass_instruction(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + switch (opcode) { + case SpvOpFunction: { + assert(b->func == NULL); + b->func = rzalloc(b, struct vtn_function); + + const struct glsl_type *result_type = + vtn_value(b, w[1], vtn_value_type_type)->type; + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function); + const struct glsl_type *func_type = + vtn_value(b, w[4], vtn_value_type_type)->type; + + assert(glsl_get_function_return_type(func_type) == result_type); + + nir_function *func = + nir_function_create(b->shader, ralloc_strdup(b->shader, val->name)); + + nir_function_overload *overload = nir_function_overload_create(func); + overload->num_params = glsl_get_length(func_type); + overload->params = ralloc_array(overload, nir_parameter, + overload->num_params); + for (unsigned i = 0; i < overload->num_params; i++) { + const struct glsl_function_param *param = + glsl_get_function_param(func_type, i); + overload->params[i].type = param->type; + if (param->in) { + if (param->out) { + overload->params[i].param_type = nir_parameter_inout; + } else { + overload->params[i].param_type = nir_parameter_in; + } + } else { + if (param->out) { + overload->params[i].param_type = nir_parameter_out; + } else { + assert(!"Parameter is neither in nor out"); + } + } + } + b->func->overload = overload; + break; + } + + case SpvOpFunctionEnd: + b->func = NULL; + break; + + case SpvOpFunctionParameter: + break; /* Does nothing */ + + case SpvOpLabel: { + assert(b->block == NULL); + b->block = rzalloc(b, struct vtn_block); + b->block->label = w; + vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block; + + if (b->func->start_block == NULL) { + /* This is the first block encountered for this function. In this + * case, we set the start block and add it to the list of + * implemented functions that we'll walk later. + */ + b->func->start_block = b->block; + exec_list_push_tail(&b->functions, &b->func->node); + } + break; + } + + case SpvOpBranch: + case SpvOpBranchConditional: + case SpvOpSwitch: + case SpvOpKill: + case SpvOpReturn: + case SpvOpReturnValue: + case SpvOpUnreachable: + assert(b->block); + b->block->branch = w; + b->block = NULL; + break; + + default: + /* Continue on as per normal */ + return true; + } + + return true; +} + static bool vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { switch (opcode) { case SpvOpLabel: { + struct vtn_block *block = vtn_value(b, w[1], vtn_value_type_block)->block; struct exec_node *list_tail = exec_list_get_tail(b->cf_list); nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node); assert(tail_node->type == nir_cf_node_block); - nir_block *block = nir_cf_node_as_block(tail_node); - - assert(exec_list_is_empty(&block->instr_list)); - vtn_push_value(b, w[1], vtn_value_type_block)->block = block; + block->block = nir_cf_node_as_block(tail_node); + assert(exec_list_is_empty(&block->block->instr_list)); break; } + case SpvOpLoopMerge: + case SpvOpSelectionMerge: + assert(b->merge_block == NULL); + /* TODO: Selection Control */ + b->merge_block = vtn_value(b, w[1], vtn_value_type_block)->block; + break; + case SpvOpUndef: vtn_push_value(b, w[2], vtn_value_type_undef); break; @@ -878,11 +942,8 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_variables(b, opcode, w, count); break; - case SpvOpFunction: - case SpvOpFunctionEnd: - case SpvOpFunctionParameter: case SpvOpFunctionCall: - vtn_handle_functions(b, opcode, w, count); + vtn_handle_function_call(b, opcode, w, count); break; case SpvOpTextureSample: @@ -1011,10 +1072,65 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, return true; } +static void +vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start, + struct vtn_block *end) +{ + struct vtn_block *block = start; + while (block != end) { + vtn_foreach_instruction(b, block->label, block->branch, + vtn_handle_body_instruction); + + const uint32_t *w = block->branch; + SpvOp branch_op = w[0] & SpvOpCodeMask; + switch (branch_op) { + case SpvOpBranch: { + assert(vtn_value(b, w[1], vtn_value_type_block)->block == end); + return; + } + + case SpvOpBranchConditional: { + /* Gather up the branch blocks */ + struct vtn_block *then_block = + vtn_value(b, w[2], vtn_value_type_block)->block; + struct vtn_block *else_block = + vtn_value(b, w[3], vtn_value_type_block)->block; + struct vtn_block *merge_block = b->merge_block; + + nir_if *if_stmt = nir_if_create(b->shader); + if_stmt->condition = nir_src_for_ssa(vtn_ssa_value(b, w[1])); + nir_cf_node_insert_end(b->cf_list, &if_stmt->cf_node); + + struct exec_list *old_list = b->cf_list; + + b->cf_list = &if_stmt->then_list; + vtn_walk_blocks(b, then_block, merge_block); + + b->cf_list = &if_stmt->else_list; + vtn_walk_blocks(b, else_block, merge_block); + + b->cf_list = old_list; + block = merge_block; + continue; + } + + case SpvOpSwitch: + case SpvOpKill: + case SpvOpReturn: + case SpvOpReturnValue: + case SpvOpUnreachable: + default: + unreachable("Unhandled opcode"); + } + } +} + nir_shader * spirv_to_nir(const uint32_t *words, size_t word_count, const nir_shader_compiler_options *options) { + const uint32_t *word_end = words + word_count; + /* Handle the SPIR-V header (first 4 dwords) */ assert(word_count > 5); @@ -1033,16 +1149,21 @@ spirv_to_nir(const uint32_t *words, size_t word_count, b->shader = shader; b->value_id_bound = value_id_bound; b->values = ralloc_array(b, struct vtn_value, value_id_bound); - - const uint32_t *word_end = words + word_count; + exec_list_make_empty(&b->functions); /* Handle all the preamble instructions */ words = vtn_foreach_instruction(b, words, word_end, vtn_handle_preamble_instruction); - words = vtn_foreach_instruction(b, words, word_end, - vtn_handle_body_instruction); - assert(words == word_end); + /* Do a very quick CFG analysis pass */ + vtn_foreach_instruction(b, words, word_end, + vtn_handle_first_cfg_pass_instruction); + + foreach_list_typed(struct vtn_function, func, node, &b->functions) { + b->impl = nir_function_impl_create(func->overload); + b->cf_list = &b->impl->body; + vtn_walk_blocks(b, func->start_block, NULL); + } ralloc_free(b); From 683c99908aa3560722614bee6b61969f08cf0616 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 12:02:24 -0700 Subject: [PATCH 28/44] nir/spirv: Explicitly type constants and SSA values --- src/glsl/nir/spirv_to_nir.c | 40 +++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index c0d77d5453d..4d425c9a846 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -61,10 +61,10 @@ struct vtn_value { enum vtn_value_type value_type; const char *name; struct vtn_decoration *decoration; + const struct glsl_type *type; union { void *ptr; char *str; - const struct glsl_type *type; nir_constant *constant; nir_deref_var *deref; struct vtn_function *func; @@ -346,20 +346,21 @@ static void vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - const struct glsl_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - nir_constant *constant = ralloc(b, nir_constant); + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant); + val->type = vtn_value(b, w[1], vtn_value_type_type)->type; + val->constant = ralloc(b, nir_constant); switch (opcode) { case SpvOpConstantTrue: - assert(type == glsl_bool_type()); - constant->value.u[0] = NIR_TRUE; + assert(val->type == glsl_bool_type()); + val->constant->value.u[0] = NIR_TRUE; break; case SpvOpConstantFalse: - assert(type == glsl_bool_type()); - constant->value.u[0] = NIR_FALSE; + assert(val->type == glsl_bool_type()); + val->constant->value.u[0] = NIR_FALSE; break; case SpvOpConstant: - assert(glsl_type_is_scalar(type)); - constant->value.u[0] = w[3]; + assert(glsl_type_is_scalar(val->type)); + val->constant->value.u[0] = w[3]; break; case SpvOpConstantComposite: { unsigned elem_count = count - 3; @@ -367,29 +368,30 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, for (unsigned i = 0; i < elem_count; i++) elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant; - switch (glsl_get_base_type(type)) { + switch (glsl_get_base_type(val->type)) { case GLSL_TYPE_UINT: case GLSL_TYPE_INT: case GLSL_TYPE_FLOAT: case GLSL_TYPE_BOOL: - if (glsl_type_is_matrix(type)) { - unsigned rows = glsl_get_vector_elements(type); - assert(glsl_get_matrix_columns(type) == elem_count); + if (glsl_type_is_matrix(val->type)) { + unsigned rows = glsl_get_vector_elements(val->type); + assert(glsl_get_matrix_columns(val->type) == elem_count); for (unsigned i = 0; i < elem_count; i++) for (unsigned j = 0; j < rows; j++) - constant->value.u[rows * i + j] = elems[i]->value.u[j]; + val->constant->value.u[rows * i + j] = elems[i]->value.u[j]; } else { - assert(glsl_type_is_vector(type)); - assert(glsl_get_vector_elements(type) == elem_count); + assert(glsl_type_is_vector(val->type)); + assert(glsl_get_vector_elements(val->type) == elem_count); for (unsigned i = 0; i < elem_count; i++) - constant->value.u[i] = elems[i]->value.u[0]; + val->constant->value.u[i] = elems[i]->value.u[0]; } ralloc_free(elems); break; case GLSL_TYPE_STRUCT: case GLSL_TYPE_ARRAY: - constant->elements = elems; + ralloc_steal(val->constant, elems); + val->constant->elements = elems; break; default: @@ -401,7 +403,6 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, default: unreachable("Unhandled opcode"); } - vtn_push_value(b, w[2], vtn_value_type_constant)->constant = constant; } static void @@ -644,6 +645,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, val->name); nir_instr_insert_after_cf_list(b->cf_list, &load->instr); + val->type = src_type; val->ssa = &load->dest.ssa; break; } From d2a7972557209cfe47fd1d7325ccbca8b3b844a5 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 12:02:57 -0700 Subject: [PATCH 29/44] nir/spirv: Add support for indirect array accesses --- src/glsl/nir/spirv_to_nir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 4d425c9a846..3f8ce2af10e 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -595,8 +595,8 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, } else { assert(idx_val->value_type == vtn_value_type_ssa); deref_arr->deref_array_type = nir_deref_array_type_indirect; - /* TODO */ - unreachable("Indirect array accesses not implemented"); + deref_arr->base_offset = 0; + deref_arr->indirect = nir_src_for_ssa(vtn_ssa_value(b, w[1])); } tail->child = &deref_arr->deref; break; From ff828749eab5d100ba61988f1b6c17712e751559 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 12:04:02 -0700 Subject: [PATCH 30/44] nir/spirv: Add support for a bunch of ALU operations --- src/glsl/nir/spirv_to_nir.c | 202 ++++++++++++++++++++++++++++++++++-- 1 file changed, 195 insertions(+), 7 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 3f8ce2af10e..734ffeeed54 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -27,6 +27,7 @@ #include "nir_spirv.h" #include "nir_vla.h" +#include "nir_builder.h" #include "spirv.h" struct vtn_decoration; @@ -81,6 +82,8 @@ struct vtn_decoration { }; struct vtn_builder { + nir_builder nb; + nir_shader *shader; nir_function_impl *impl; struct exec_list *cf_list; @@ -705,11 +708,192 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, unreachable("Unhandled opcode"); } +static void +vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + unreachable("Matrix math not handled"); +} + static void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - unreachable("Unhandled opcode"); + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); + val->type = vtn_value(b, w[1], vtn_value_type_type)->type; + + /* Collect the various SSA sources */ + unsigned num_inputs = count - 3; + nir_ssa_def *src[4]; + for (unsigned i = 0; i < num_inputs; i++) + src[i] = vtn_ssa_value(b, w[i + 3]); + + /* We use the builder for some of the instructions. Go ahead and + * initialize it with the current cf_list. + */ + nir_builder_insert_after_cf_list(&b->nb, b->cf_list); + + /* Indicates that the first two arguments should be swapped. This is + * used for implementing greater-than and less-than-or-equal. + */ + bool swap = false; + + nir_op op; + switch (opcode) { + /* Basic ALU operations */ + case SpvOpSNegate: op = nir_op_ineg; break; + case SpvOpFNegate: op = nir_op_fneg; break; + case SpvOpNot: op = nir_op_inot; break; + + case SpvOpAny: + switch (src[0]->num_components) { + case 1: op = nir_op_imov; break; + case 2: op = nir_op_bany2; break; + case 3: op = nir_op_bany3; break; + case 4: op = nir_op_bany4; break; + } + break; + + case SpvOpAll: + switch (src[0]->num_components) { + case 1: op = nir_op_imov; break; + case 2: op = nir_op_ball2; break; + case 3: op = nir_op_ball3; break; + case 4: op = nir_op_ball4; break; + } + break; + + case SpvOpIAdd: op = nir_op_iadd; break; + case SpvOpFAdd: op = nir_op_fadd; break; + case SpvOpISub: op = nir_op_isub; break; + case SpvOpFSub: op = nir_op_fsub; break; + case SpvOpIMul: op = nir_op_imul; break; + case SpvOpFMul: op = nir_op_fmul; break; + case SpvOpUDiv: op = nir_op_udiv; break; + case SpvOpSDiv: op = nir_op_idiv; break; + case SpvOpFDiv: op = nir_op_fdiv; break; + case SpvOpUMod: op = nir_op_umod; break; + case SpvOpSMod: op = nir_op_umod; break; /* FIXME? */ + case SpvOpFMod: op = nir_op_fmod; break; + + case SpvOpDot: + assert(src[0]->num_components == src[1]->num_components); + switch (src[0]->num_components) { + case 1: op = nir_op_fmul; break; + case 2: op = nir_op_fdot2; break; + case 3: op = nir_op_fdot3; break; + case 4: op = nir_op_fdot4; break; + } + break; + + case SpvOpShiftRightLogical: op = nir_op_ushr; break; + case SpvOpShiftRightArithmetic: op = nir_op_ishr; break; + case SpvOpShiftLeftLogical: op = nir_op_ishl; break; + case SpvOpLogicalOr: op = nir_op_ior; break; + case SpvOpLogicalXor: op = nir_op_ixor; break; + case SpvOpLogicalAnd: op = nir_op_iand; break; + case SpvOpBitwiseOr: op = nir_op_ior; break; + case SpvOpBitwiseXor: op = nir_op_ixor; break; + case SpvOpBitwiseAnd: op = nir_op_iand; break; + case SpvOpSelect: op = nir_op_bcsel; break; + case SpvOpIEqual: op = nir_op_ieq; break; + + /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */ + case SpvOpFOrdEqual: op = nir_op_feq; break; + case SpvOpFUnordEqual: op = nir_op_feq; break; + case SpvOpINotEqual: op = nir_op_ine; break; + case SpvOpFOrdNotEqual: op = nir_op_fne; break; + case SpvOpFUnordNotEqual: op = nir_op_fne; break; + case SpvOpULessThan: op = nir_op_ult; break; + case SpvOpSLessThan: op = nir_op_ilt; break; + case SpvOpFOrdLessThan: op = nir_op_flt; break; + case SpvOpFUnordLessThan: op = nir_op_flt; break; + case SpvOpUGreaterThan: op = nir_op_ult; swap = true; break; + case SpvOpSGreaterThan: op = nir_op_ilt; swap = true; break; + case SpvOpFOrdGreaterThan: op = nir_op_flt; swap = true; break; + case SpvOpFUnordGreaterThan: op = nir_op_flt; swap = true; break; + case SpvOpULessThanEqual: op = nir_op_uge; swap = true; break; + case SpvOpSLessThanEqual: op = nir_op_ige; swap = true; break; + case SpvOpFOrdLessThanEqual: op = nir_op_fge; swap = true; break; + case SpvOpFUnordLessThanEqual: op = nir_op_fge; swap = true; break; + case SpvOpUGreaterThanEqual: op = nir_op_uge; break; + case SpvOpSGreaterThanEqual: op = nir_op_ige; break; + case SpvOpFOrdGreaterThanEqual: op = nir_op_fge; break; + case SpvOpFUnordGreaterThanEqual:op = nir_op_fge; break; + + /* Conversions: */ + case SpvOpConvertFToU: op = nir_op_f2u; break; + case SpvOpConvertFToS: op = nir_op_f2i; break; + case SpvOpConvertSToF: op = nir_op_i2f; break; + case SpvOpConvertUToF: op = nir_op_u2f; break; + case SpvOpBitcast: op = nir_op_imov; break; + case SpvOpUConvert: + case SpvOpSConvert: + op = nir_op_imov; /* TODO: NIR is 32-bit only; these are no-ops. */ + break; + case SpvOpFConvert: + op = nir_op_fmov; + break; + + /* Derivatives: */ + case SpvOpDPdx: op = nir_op_fddx; break; + case SpvOpDPdy: op = nir_op_fddy; break; + case SpvOpDPdxFine: op = nir_op_fddx_fine; break; + case SpvOpDPdyFine: op = nir_op_fddy_fine; break; + case SpvOpDPdxCoarse: op = nir_op_fddx_coarse; break; + case SpvOpDPdyCoarse: op = nir_op_fddy_coarse; break; + case SpvOpFwidth: + val->ssa = nir_fadd(&b->nb, + nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])), + nir_fabs(&b->nb, nir_fddx(&b->nb, src[1]))); + return; + case SpvOpFwidthFine: + val->ssa = nir_fadd(&b->nb, + nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])), + nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[1]))); + return; + case SpvOpFwidthCoarse: + val->ssa = nir_fadd(&b->nb, + nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])), + nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[1]))); + return; + + case SpvOpVectorTimesScalar: + /* The builder will take care of splatting for us. */ + val->ssa = nir_fmul(&b->nb, src[0], src[1]); + return; + + case SpvOpSRem: + case SpvOpFRem: + unreachable("No NIR equivalent"); + + case SpvOpIsNan: + case SpvOpIsInf: + case SpvOpIsFinite: + case SpvOpIsNormal: + case SpvOpSignBitSet: + case SpvOpLessOrGreater: + case SpvOpOrdered: + case SpvOpUnordered: + default: + unreachable("Unhandled opcode"); + } + + if (swap) { + nir_ssa_def *tmp = src[0]; + src[0] = src[1]; + src[1] = tmp; + } + + nir_alu_instr *instr = nir_alu_instr_create(b->shader, op); + nir_ssa_dest_init(&instr->instr, &instr->dest.dest, + glsl_get_vector_elements(val->type), val->name); + val->ssa = &instr->dest.dest.ssa; + + for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) + instr->src[i].src = nir_src_for_ssa(src[i]); + + nir_instr_insert_after_cf_list(b->cf_list, &instr->instr); } static bool @@ -993,7 +1177,6 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpPtrCastToGeneric: case SpvOpGenericCastToPtr: case SpvOpBitcast: - case SpvOpTranspose: case SpvOpIsNan: case SpvOpIsInf: case SpvOpIsFinite: @@ -1017,11 +1200,6 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpFRem: case SpvOpFMod: case SpvOpVectorTimesScalar: - case SpvOpMatrixTimesScalar: - case SpvOpVectorTimesMatrix: - case SpvOpMatrixTimesVector: - case SpvOpMatrixTimesMatrix: - case SpvOpOuterProduct: case SpvOpDot: case SpvOpShiftRightLogical: case SpvOpShiftRightArithmetic: @@ -1067,6 +1245,15 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_alu(b, opcode, w, count); break; + case SpvOpTranspose: + case SpvOpOuterProduct: + case SpvOpMatrixTimesScalar: + case SpvOpVectorTimesMatrix: + case SpvOpMatrixTimesVector: + case SpvOpMatrixTimesMatrix: + vtn_handle_matrix_alu(b, opcode, w, count); + break; + default: unreachable("Unhandled opcode"); } @@ -1163,6 +1350,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count, foreach_list_typed(struct vtn_function, func, node, &b->functions) { b->impl = nir_function_impl_create(func->overload); + nir_builder_init(&b->nb, b->impl); b->cf_list = &b->impl->body; vtn_walk_blocks(b, func->start_block, NULL); } From 98d78856f6f8965448f8ae5db74ab2f0609cb45e Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 12:12:23 -0700 Subject: [PATCH 31/44] nir/spirv: Use the builder for all instructions We don't actually use it to create all the instructions but we do use it for insertion always. This should make things far more consistent for implementing extended instructions. --- src/glsl/nir/spirv_to_nir.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 734ffeeed54..3af84aecaa4 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -86,7 +86,6 @@ struct vtn_builder { nir_shader *shader; nir_function_impl *impl; - struct exec_list *cf_list; struct vtn_block *block; struct vtn_block *merge_block; @@ -630,7 +629,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref)); copy->variables[1] = nir_deref_as_var(nir_copy_deref(copy, &src->deref)); - nir_instr_insert_after_cf_list(b->cf_list, ©->instr); + nir_builder_instr_insert(&b->nb, ©->instr); break; } @@ -647,7 +646,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, val->name); - nir_instr_insert_after_cf_list(b->cf_list, &load->instr); + nir_builder_instr_insert(&b->nb, &load->instr); val->type = src_type; val->ssa = &load->dest.ssa; break; @@ -665,7 +664,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, store->variables[0] = nir_deref_as_var(nir_copy_deref(store, &dest->deref)); store->num_components = glsl_get_vector_elements(dest_type); - nir_instr_insert_after_cf_list(b->cf_list, &store->instr); + nir_builder_instr_insert(&b->nb, &store->instr); } else { assert(src_val->value_type == vtn_value_type_constant); @@ -680,7 +679,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref)); copy->variables[1] = nir_deref_var_create(copy, const_tmp); - nir_instr_insert_after_cf_list(b->cf_list, ©->instr); + nir_builder_instr_insert(&b->nb, ©->instr); } break; } @@ -728,11 +727,6 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, for (unsigned i = 0; i < num_inputs; i++) src[i] = vtn_ssa_value(b, w[i + 3]); - /* We use the builder for some of the instructions. Go ahead and - * initialize it with the current cf_list. - */ - nir_builder_insert_after_cf_list(&b->nb, b->cf_list); - /* Indicates that the first two arguments should be swapped. This is * used for implementing greater-than and less-than-or-equal. */ @@ -893,7 +887,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) instr->src[i].src = nir_src_for_ssa(src[i]); - nir_instr_insert_after_cf_list(b->cf_list, &instr->instr); + nir_builder_instr_insert(&b->nb, &instr->instr); } static bool @@ -1092,7 +1086,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, switch (opcode) { case SpvOpLabel: { struct vtn_block *block = vtn_value(b, w[1], vtn_value_type_block)->block; - struct exec_node *list_tail = exec_list_get_tail(b->cf_list); + struct exec_node *list_tail = exec_list_get_tail(b->nb.cf_node_list); nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node); assert(tail_node->type == nir_cf_node_block); block->block = nir_cf_node_as_block(tail_node); @@ -1288,17 +1282,17 @@ vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start, nir_if *if_stmt = nir_if_create(b->shader); if_stmt->condition = nir_src_for_ssa(vtn_ssa_value(b, w[1])); - nir_cf_node_insert_end(b->cf_list, &if_stmt->cf_node); + nir_cf_node_insert_end(b->nb.cf_node_list, &if_stmt->cf_node); - struct exec_list *old_list = b->cf_list; + struct exec_list *old_list = b->nb.cf_node_list; - b->cf_list = &if_stmt->then_list; + nir_builder_insert_after_cf_list(&b->nb, &if_stmt->then_list); vtn_walk_blocks(b, then_block, merge_block); - b->cf_list = &if_stmt->else_list; + nir_builder_insert_after_cf_list(&b->nb, &if_stmt->else_list); vtn_walk_blocks(b, else_block, merge_block); - b->cf_list = old_list; + nir_builder_insert_after_cf_list(&b->nb, old_list); block = merge_block; continue; } @@ -1351,7 +1345,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count, foreach_list_typed(struct vtn_function, func, node, &b->functions) { b->impl = nir_function_impl_create(func->overload); nir_builder_init(&b->nb, b->impl); - b->cf_list = &b->impl->body; + nir_builder_insert_after_cf_list(&b->nb, &b->impl->body); vtn_walk_blocks(b, func->start_block, NULL); } From 1da9876486b61aa2a6f8c10da75e1b852058ef7d Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 12:25:09 -0700 Subject: [PATCH 32/44] nir/spirv: Split the core datastructures into a header file --- src/glsl/nir/spirv_to_nir.c | 117 +---------------------- src/glsl/nir/spirv_to_nir_private.h | 141 ++++++++++++++++++++++++++++ 2 files changed, 146 insertions(+), 112 deletions(-) create mode 100644 src/glsl/nir/spirv_to_nir_private.h diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 3af84aecaa4..20b7f6e1dd1 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -25,109 +25,10 @@ * */ -#include "nir_spirv.h" +#include "spirv_to_nir_private.h" #include "nir_vla.h" -#include "nir_builder.h" -#include "spirv.h" -struct vtn_decoration; - -enum vtn_value_type { - vtn_value_type_invalid = 0, - vtn_value_type_undef, - vtn_value_type_string, - vtn_value_type_decoration_group, - vtn_value_type_type, - vtn_value_type_constant, - vtn_value_type_deref, - vtn_value_type_function, - vtn_value_type_block, - vtn_value_type_ssa, -}; - -struct vtn_block { - const uint32_t *label; - const uint32_t *branch; - nir_block *block; -}; - -struct vtn_function { - struct exec_node node; - - nir_function_overload *overload; - struct vtn_block *start_block; -}; - -struct vtn_value { - enum vtn_value_type value_type; - const char *name; - struct vtn_decoration *decoration; - const struct glsl_type *type; - union { - void *ptr; - char *str; - nir_constant *constant; - nir_deref_var *deref; - struct vtn_function *func; - struct vtn_block *block; - nir_ssa_def *ssa; - }; -}; - -struct vtn_decoration { - struct vtn_decoration *next; - const uint32_t *literals; - struct vtn_value *group; - SpvDecoration decoration; -}; - -struct vtn_builder { - nir_builder nb; - - nir_shader *shader; - nir_function_impl *impl; - struct vtn_block *block; - struct vtn_block *merge_block; - - unsigned value_id_bound; - struct vtn_value *values; - - SpvExecutionModel execution_model; - struct vtn_value *entry_point; - - struct vtn_function *func; - struct exec_list functions; -}; - -static struct vtn_value * -vtn_push_value(struct vtn_builder *b, uint32_t value_id, - enum vtn_value_type value_type) -{ - assert(value_id < b->value_id_bound); - assert(b->values[value_id].value_type == vtn_value_type_invalid); - - b->values[value_id].value_type = value_type; - - return &b->values[value_id]; -} - -static struct vtn_value * -vtn_untyped_value(struct vtn_builder *b, uint32_t value_id) -{ - assert(value_id < b->value_id_bound); - return &b->values[value_id]; -} - -static struct vtn_value * -vtn_value(struct vtn_builder *b, uint32_t value_id, - enum vtn_value_type value_type) -{ - struct vtn_value *val = vtn_untyped_value(b, value_id); - assert(val->value_type == value_type); - return val; -} - -static nir_ssa_def * +nir_ssa_def * vtn_ssa_value(struct vtn_builder *b, uint32_t value_id) { return vtn_value(b, value_id, vtn_value_type_ssa)->ssa; @@ -140,9 +41,6 @@ vtn_string_literal(struct vtn_builder *b, const uint32_t *words, return ralloc_strndup(b, (char *)words, (word_count - 2) * sizeof(*words)); } -typedef bool (*vtn_instruction_handler)(struct vtn_builder *, SpvOp, - const uint32_t *, unsigned); - static const uint32_t * vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start, const uint32_t *end, vtn_instruction_handler handler) @@ -177,16 +75,11 @@ vtn_handle_extension(struct vtn_builder *b, SpvOp opcode, } } -typedef void (*decoration_foreach_cb)(struct vtn_builder *, - struct vtn_value *, - const struct vtn_decoration *, - void *); - static void _foreach_decoration_helper(struct vtn_builder *b, struct vtn_value *base_value, struct vtn_value *value, - decoration_foreach_cb cb, void *data) + vtn_decoration_foreach_cb cb, void *data) { for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) { if (dec->group) { @@ -204,9 +97,9 @@ _foreach_decoration_helper(struct vtn_builder *b, * value. If it encounters a decoration group, it recurses into the group * and iterates over all of those decorations as well. */ -static void +void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value, - decoration_foreach_cb cb, void *data) + vtn_decoration_foreach_cb cb, void *data) { _foreach_decoration_helper(b, value, value, cb, data); } diff --git a/src/glsl/nir/spirv_to_nir_private.h b/src/glsl/nir/spirv_to_nir_private.h new file mode 100644 index 00000000000..0a07b377e72 --- /dev/null +++ b/src/glsl/nir/spirv_to_nir_private.h @@ -0,0 +1,141 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Jason Ekstrand (jason@jlekstrand.net) + * + */ + +#include "nir_spirv.h" +#include "nir_builder.h" +#include "spirv.h" + +struct vtn_builder; +struct vtn_decoration; + +enum vtn_value_type { + vtn_value_type_invalid = 0, + vtn_value_type_undef, + vtn_value_type_string, + vtn_value_type_decoration_group, + vtn_value_type_type, + vtn_value_type_constant, + vtn_value_type_deref, + vtn_value_type_function, + vtn_value_type_block, + vtn_value_type_ssa, +}; + +struct vtn_block { + const uint32_t *label; + const uint32_t *branch; + nir_block *block; +}; + +struct vtn_function { + struct exec_node node; + + nir_function_overload *overload; + struct vtn_block *start_block; +}; + +typedef bool (*vtn_instruction_handler)(struct vtn_builder *, uint32_t, + const uint32_t *, unsigned); + +struct vtn_value { + enum vtn_value_type value_type; + const char *name; + struct vtn_decoration *decoration; + const struct glsl_type *type; + union { + void *ptr; + char *str; + nir_constant *constant; + nir_deref_var *deref; + struct vtn_function *func; + struct vtn_block *block; + nir_ssa_def *ssa; + }; +}; + +struct vtn_decoration { + struct vtn_decoration *next; + const uint32_t *literals; + struct vtn_value *group; + SpvDecoration decoration; +}; + +struct vtn_builder { + nir_builder nb; + + nir_shader *shader; + nir_function_impl *impl; + struct vtn_block *block; + struct vtn_block *merge_block; + + unsigned value_id_bound; + struct vtn_value *values; + + SpvExecutionModel execution_model; + struct vtn_value *entry_point; + + struct vtn_function *func; + struct exec_list functions; +}; + +static inline struct vtn_value * +vtn_push_value(struct vtn_builder *b, uint32_t value_id, + enum vtn_value_type value_type) +{ + assert(value_id < b->value_id_bound); + assert(b->values[value_id].value_type == vtn_value_type_invalid); + + b->values[value_id].value_type = value_type; + + return &b->values[value_id]; +} + +static inline struct vtn_value * +vtn_untyped_value(struct vtn_builder *b, uint32_t value_id) +{ + assert(value_id < b->value_id_bound); + return &b->values[value_id]; +} + +static inline struct vtn_value * +vtn_value(struct vtn_builder *b, uint32_t value_id, + enum vtn_value_type value_type) +{ + struct vtn_value *val = vtn_untyped_value(b, value_id); + assert(val->value_type == value_type); + return val; +} + +nir_ssa_def *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id); + +typedef void (*vtn_decoration_foreach_cb)(struct vtn_builder *, + struct vtn_value *, + const struct vtn_decoration *, + void *); + +void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value, + vtn_decoration_foreach_cb cb, void *data); From b0d1854efc3863b4e362496d77820b865d3e8357 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Mon, 4 May 2015 15:17:56 -0700 Subject: [PATCH 33/44] nir/spirv: Add initial support for GLSL 4.50 builtins --- src/glsl/Makefile.sources | 1 + src/glsl/nir/spirv_glsl450_to_nir.c | 284 ++++++++++++++++++++++++++++ src/glsl/nir/spirv_to_nir.c | 24 ++- src/glsl/nir/spirv_to_nir_private.h | 5 + 4 files changed, 310 insertions(+), 4 deletions(-) create mode 100644 src/glsl/nir/spirv_glsl450_to_nir.c diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources index be6e4ecf839..a234ac6f8e2 100644 --- a/src/glsl/Makefile.sources +++ b/src/glsl/Makefile.sources @@ -70,6 +70,7 @@ NIR_FILES = \ nir/nir_worklist.h \ nir/nir_types.cpp \ nir/spirv_to_nir.c \ + nir/spirv_glsl450_to_nir.c \ $(NIR_GENERATED_FILES) # libglsl diff --git a/src/glsl/nir/spirv_glsl450_to_nir.c b/src/glsl/nir/spirv_glsl450_to_nir.c new file mode 100644 index 00000000000..240ff012fe1 --- /dev/null +++ b/src/glsl/nir/spirv_glsl450_to_nir.c @@ -0,0 +1,284 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Jason Ekstrand (jason@jlekstrand.net) + * + */ + +#include "spirv_to_nir_private.h" + +enum GLSL450Entrypoint { + Round = 0, + RoundEven = 1, + Trunc = 2, + Abs = 3, + Sign = 4, + Floor = 5, + Ceil = 6, + Fract = 7, + + Radians = 8, + Degrees = 9, + Sin = 10, + Cos = 11, + Tan = 12, + Asin = 13, + Acos = 14, + Atan = 15, + Sinh = 16, + Cosh = 17, + Tanh = 18, + Asinh = 19, + Acosh = 20, + Atanh = 21, + Atan2 = 22, + + Pow = 23, + Exp = 24, + Log = 25, + Exp2 = 26, + Log2 = 27, + Sqrt = 28, + InverseSqrt = 29, + + Determinant = 30, + MatrixInverse = 31, + + Modf = 32, // second argument needs the OpVariable = , not an OpLoad + Min = 33, + Max = 34, + Clamp = 35, + Mix = 36, + Step = 37, + SmoothStep = 38, + + FloatBitsToInt = 39, + FloatBitsToUint = 40, + IntBitsToFloat = 41, + UintBitsToFloat = 42, + + Fma = 43, + Frexp = 44, + Ldexp = 45, + + PackSnorm4x8 = 46, + PackUnorm4x8 = 47, + PackSnorm2x16 = 48, + PackUnorm2x16 = 49, + PackHalf2x16 = 50, + PackDouble2x32 = 51, + UnpackSnorm2x16 = 52, + UnpackUnorm2x16 = 53, + UnpackHalf2x16 = 54, + UnpackSnorm4x8 = 55, + UnpackUnorm4x8 = 56, + UnpackDouble2x32 = 57, + + Length = 58, + Distance = 59, + Cross = 60, + Normalize = 61, + Ftransform = 62, + FaceForward = 63, + Reflect = 64, + Refract = 65, + + UaddCarry = 66, + UsubBorrow = 67, + UmulExtended = 68, + ImulExtended = 69, + BitfieldExtract = 70, + BitfieldInsert = 71, + BitfieldReverse = 72, + BitCount = 73, + FindLSB = 74, + FindMSB = 75, + + InterpolateAtCentroid = 76, + InterpolateAtSample = 77, + InterpolateAtOffset = 78, + + Count +}; + +static nir_ssa_def* +build_length(nir_builder *b, nir_ssa_def *vec) +{ + switch (vec->num_components) { + case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec)); + case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec)); + case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec)); + case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec)); + default: + unreachable("Invalid number of components"); + } +} + +static void +handle_glsl450_alu(struct vtn_builder *b, enum GLSL450Entrypoint entrypoint, + const uint32_t *w, unsigned count) +{ + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); + val->type = vtn_value(b, w[1], vtn_value_type_type)->type; + + /* Collect the various SSA sources */ + unsigned num_inputs = count - 5; + nir_ssa_def *src[3]; + for (unsigned i = 0; i < num_inputs; i++) + src[i] = vtn_ssa_value(b, w[i + 5]); + + nir_op op; + switch (entrypoint) { + case Round: op = nir_op_fround_even; break; /* TODO */ + case RoundEven: op = nir_op_fround_even; break; + case Trunc: op = nir_op_ftrunc; break; + case Abs: op = nir_op_fabs; break; + case Sign: op = nir_op_fsign; break; + case Floor: op = nir_op_ffloor; break; + case Ceil: op = nir_op_fceil; break; + case Fract: op = nir_op_ffract; break; + case Radians: + val->ssa = nir_fmul(&b->nb, src[0], nir_imm_float(&b->nb, 0.01745329251)); + return; + case Degrees: + val->ssa = nir_fmul(&b->nb, src[0], nir_imm_float(&b->nb, 57.2957795131)); + return; + case Sin: op = nir_op_fsin; break; + case Cos: op = nir_op_fcos; break; + case Tan: + val->ssa = nir_fdiv(&b->nb, nir_fsin(&b->nb, src[0]), + nir_fcos(&b->nb, src[0])); + return; + case Pow: op = nir_op_fpow; break; + case Exp: op = nir_op_fexp; break; + case Log: op = nir_op_flog; break; + case Exp2: op = nir_op_fexp2; break; + case Log2: op = nir_op_flog2; break; + case Sqrt: op = nir_op_fsqrt; break; + case InverseSqrt: op = nir_op_frsq; break; + + case Modf: op = nir_op_fmod; break; + case Min: op = nir_op_fmin; break; + case Max: op = nir_op_fmax; break; + case Mix: op = nir_op_flrp; break; + case Step: + val->ssa = nir_sge(&b->nb, src[1], src[0]); + return; + + case FloatBitsToInt: + case FloatBitsToUint: + case IntBitsToFloat: + case UintBitsToFloat: + /* Probably going to be removed from the final version of the spec. */ + val->ssa = src[0]; + return; + + case Fma: op = nir_op_ffma; break; + case Ldexp: op = nir_op_ldexp; break; + + /* Packing/Unpacking functions */ + case PackSnorm4x8: op = nir_op_pack_snorm_4x8; break; + case PackUnorm4x8: op = nir_op_pack_unorm_4x8; break; + case PackSnorm2x16: op = nir_op_pack_snorm_2x16; break; + case PackUnorm2x16: op = nir_op_pack_unorm_2x16; break; + case PackHalf2x16: op = nir_op_pack_half_2x16; break; + case UnpackSnorm4x8: op = nir_op_unpack_snorm_4x8; break; + case UnpackUnorm4x8: op = nir_op_unpack_unorm_4x8; break; + case UnpackSnorm2x16: op = nir_op_unpack_snorm_2x16; break; + case UnpackUnorm2x16: op = nir_op_unpack_unorm_2x16; break; + case UnpackHalf2x16: op = nir_op_unpack_half_2x16; break; + + case Length: + val->ssa = build_length(&b->nb, src[0]); + return; + case Distance: + val->ssa = build_length(&b->nb, nir_fsub(&b->nb, src[0], src[1])); + return; + case Normalize: + val->ssa = nir_fdiv(&b->nb, src[0], build_length(&b->nb, src[0])); + return; + + case UaddCarry: op = nir_op_uadd_carry; break; + case UsubBorrow: op = nir_op_usub_borrow; break; + case BitfieldExtract: op = nir_op_ubitfield_extract; break; /* TODO */ + case BitfieldInsert: op = nir_op_bitfield_insert; break; + case BitfieldReverse: op = nir_op_bitfield_reverse; break; + case BitCount: op = nir_op_bit_count; break; + case FindLSB: op = nir_op_find_lsb; break; + case FindMSB: op = nir_op_ufind_msb; break; /* TODO */ + + case Clamp: + case Asin: + case Acos: + case Atan: + case Atan2: + case Sinh: + case Cosh: + case Tanh: + case Asinh: + case Acosh: + case Atanh: + case SmoothStep: + case Frexp: + case PackDouble2x32: + case UnpackDouble2x32: + case Cross: + case Ftransform: + case FaceForward: + case Reflect: + case Refract: + case UmulExtended: + case ImulExtended: + default: + unreachable("Unhandled opcode"); + } + + nir_alu_instr *instr = nir_alu_instr_create(b->shader, op); + nir_ssa_dest_init(&instr->instr, &instr->dest.dest, + glsl_get_vector_elements(val->type), val->name); + val->ssa = &instr->dest.dest.ssa; + + for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) + instr->src[i].src = nir_src_for_ssa(src[i]); + + nir_builder_instr_insert(&b->nb, &instr->instr); +} + +bool +vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode, + const uint32_t *words, unsigned count) +{ + switch ((enum GLSL450Entrypoint)ext_opcode) { + case Determinant: + case MatrixInverse: + case InterpolateAtCentroid: + case InterpolateAtSample: + case InterpolateAtOffset: + unreachable("Unhandled opcode"); + + default: + handle_glsl450_alu(b, (enum GLSL450Entrypoint)ext_opcode, words, count); + } + + return true; +} diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 20b7f6e1dd1..f1c63ebff13 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -65,11 +65,24 @@ vtn_handle_extension(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { switch (opcode) { - case SpvOpExtInstImport: - /* Do nothing for the moment */ + case SpvOpExtInstImport: { + struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension); + if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) { + val->ext_handler = vtn_handle_glsl450_instruction; + } else { + assert(!"Unsupported extension"); + } break; + } + + case SpvOpExtInst: { + struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension); + bool handled = val->ext_handler(b, w[4], w, count); + (void)handled; + assert(handled); + break; + } - case SpvOpExtInst: default: unreachable("Unhandled opcode"); } @@ -792,10 +805,13 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpSourceExtension: case SpvOpCompileFlag: case SpvOpExtension: - case SpvOpExtInstImport: /* Unhandled, but these are for debug so that's ok. */ break; + case SpvOpExtInstImport: + vtn_handle_extension(b, opcode, w, count); + break; + case SpvOpMemoryModel: assert(w[1] == SpvAddressingModelLogical); assert(w[2] == SpvMemoryModelGLSL450); diff --git a/src/glsl/nir/spirv_to_nir_private.h b/src/glsl/nir/spirv_to_nir_private.h index 0a07b377e72..fd80dd4e161 100644 --- a/src/glsl/nir/spirv_to_nir_private.h +++ b/src/glsl/nir/spirv_to_nir_private.h @@ -43,6 +43,7 @@ enum vtn_value_type { vtn_value_type_function, vtn_value_type_block, vtn_value_type_ssa, + vtn_value_type_extension, }; struct vtn_block { @@ -74,6 +75,7 @@ struct vtn_value { struct vtn_function *func; struct vtn_block *block; nir_ssa_def *ssa; + vtn_instruction_handler ext_handler; }; }; @@ -139,3 +141,6 @@ typedef void (*vtn_decoration_foreach_cb)(struct vtn_builder *, void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value, vtn_decoration_foreach_cb cb, void *data); + +bool vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode, + const uint32_t *words, unsigned count); From 7b9c29e440d2ef3d2bd476ebd9ff06586f396da5 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 6 May 2015 12:35:30 -0700 Subject: [PATCH 34/44] nir/spirv: Make vtn_ssa_value handle constants as well as ssa values --- src/glsl/nir/spirv_to_nir.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index f1c63ebff13..62d377ed243 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -31,7 +31,26 @@ nir_ssa_def * vtn_ssa_value(struct vtn_builder *b, uint32_t value_id) { - return vtn_value(b, value_id, vtn_value_type_ssa)->ssa; + struct vtn_value *val = vtn_untyped_value(b, value_id); + switch (val->value_type) { + case vtn_value_type_constant: { + assert(glsl_type_is_vector_or_scalar(val->type)); + unsigned num_components = glsl_get_vector_elements(val->type); + nir_load_const_instr *load = + nir_load_const_instr_create(b->shader, num_components); + + for (unsigned i = 0; i < num_components; i++) + load->value.u[0] = val->constant->value.u[0]; + + nir_builder_instr_insert(&b->nb, &load->instr); + return &load->def; + } + + case vtn_value_type_ssa: + return val->ssa; + default: + unreachable("Invalid type for an SSA value"); + } } static char * From a28f8ad9f1cce99e38c93fa5bf2892056861414c Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 6 May 2015 12:36:09 -0700 Subject: [PATCH 35/44] nir/spirv: Use the correct length for copying string literals --- src/glsl/nir/spirv_to_nir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 62d377ed243..d4bad887dc1 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -57,7 +57,7 @@ static char * vtn_string_literal(struct vtn_builder *b, const uint32_t *words, unsigned word_count) { - return ralloc_strndup(b, (char *)words, (word_count - 2) * sizeof(*words)); + return ralloc_strndup(b, (char *)words, word_count * sizeof(*words)); } static const uint32_t * From 3a2db9207d5135f9cc2610e78aecaa9204d58641 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 6 May 2015 12:36:31 -0700 Subject: [PATCH 36/44] nir/spirv: Set a name on temporary variables --- src/glsl/nir/spirv_to_nir.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index d4bad887dc1..3bbf91453fd 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -595,6 +595,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, nir_variable *const_tmp = rzalloc(b->shader, nir_variable); const_tmp->type = dest_type; + const_tmp->name = "const_temp"; const_tmp->data.mode = nir_var_local; const_tmp->data.read_only = true; exec_list_push_tail(&b->impl->locals, &const_tmp->node); From 64bc58a88ee3c0131a7d540b2ff61a0c707563e4 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 6 May 2015 12:37:10 -0700 Subject: [PATCH 37/44] nir/spirv: Handle control-flow with loops --- src/glsl/nir/spirv_to_nir.c | 168 ++++++++++++++++++++++++---- src/glsl/nir/spirv_to_nir_private.h | 4 +- 2 files changed, 151 insertions(+), 21 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 3bbf91453fd..a4f13603dac 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -1000,6 +1000,13 @@ vtn_handle_first_cfg_pass_instruction(struct vtn_builder *b, SpvOp opcode, b->block = NULL; break; + case SpvOpSelectionMerge: + case SpvOpLoopMerge: + assert(b->block && b->block->merge_op == SpvOpNop); + b->block->merge_op = opcode; + b->block->merge_block_id = w[1]; + break; + default: /* Continue on as per normal */ return true; @@ -1015,19 +1022,20 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, switch (opcode) { case SpvOpLabel: { struct vtn_block *block = vtn_value(b, w[1], vtn_value_type_block)->block; + assert(block->block == NULL); + struct exec_node *list_tail = exec_list_get_tail(b->nb.cf_node_list); nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node); assert(tail_node->type == nir_cf_node_block); block->block = nir_cf_node_as_block(tail_node); + assert(exec_list_is_empty(&block->block->instr_list)); break; } case SpvOpLoopMerge: case SpvOpSelectionMerge: - assert(b->merge_block == NULL); - /* TODO: Selection Control */ - b->merge_block = vtn_value(b, w[1], vtn_value_type_block)->block; + /* This is handled by cfg pre-pass and walk_blocks */ break; case SpvOpUndef: @@ -1186,19 +1194,68 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, static void vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start, - struct vtn_block *end) + struct vtn_block *break_block, struct vtn_block *cont_block, + struct vtn_block *end_block) { struct vtn_block *block = start; - while (block != end) { + while (block != end_block) { + const uint32_t *w = block->branch; + SpvOp branch_op = w[0] & SpvOpCodeMask; + + if (block->block != NULL) { + /* We've already visited this block once before so this is a + * back-edge. Back-edges are only allowed to point to a loop + * merge. + */ + assert(block == cont_block); + return; + } + + b->block = block; vtn_foreach_instruction(b, block->label, block->branch, vtn_handle_body_instruction); - const uint32_t *w = block->branch; - SpvOp branch_op = w[0] & SpvOpCodeMask; switch (branch_op) { case SpvOpBranch: { - assert(vtn_value(b, w[1], vtn_value_type_block)->block == end); - return; + struct vtn_block *branch_block = + vtn_value(b, w[1], vtn_value_type_block)->block; + + if (branch_block == break_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_break); + nir_builder_instr_insert(&b->nb, &jump->instr); + + return; + } else if (branch_block == cont_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_continue); + nir_builder_instr_insert(&b->nb, &jump->instr); + + return; + } else if (branch_block == end_block) { + return; + } else if (branch_block->merge_op == SpvOpLoopMerge) { + /* This is the jump into a loop. */ + cont_block = branch_block; + break_block = vtn_value(b, branch_block->merge_block_id, + vtn_value_type_block)->block; + + nir_loop *loop = nir_loop_create(b->shader); + nir_cf_node_insert_end(b->nb.cf_node_list, &loop->cf_node); + + struct exec_list *old_list = b->nb.cf_node_list; + + nir_builder_insert_after_cf_list(&b->nb, &loop->body); + vtn_walk_blocks(b, branch_block, break_block, cont_block, NULL); + + nir_builder_insert_after_cf_list(&b->nb, old_list); + block = break_block; + continue; + } else { + /* TODO: Can this ever happen? */ + block = branch_block; + continue; + } } case SpvOpBranchConditional: { @@ -1207,28 +1264,99 @@ vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start, vtn_value(b, w[2], vtn_value_type_block)->block; struct vtn_block *else_block = vtn_value(b, w[3], vtn_value_type_block)->block; - struct vtn_block *merge_block = b->merge_block; nir_if *if_stmt = nir_if_create(b->shader); if_stmt->condition = nir_src_for_ssa(vtn_ssa_value(b, w[1])); nir_cf_node_insert_end(b->nb.cf_node_list, &if_stmt->cf_node); - struct exec_list *old_list = b->nb.cf_node_list; + if (then_block == break_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_break); + nir_instr_insert_after_cf_list(&if_stmt->then_list, + &jump->instr); + block = else_block; + } else if (else_block == break_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_break); + nir_instr_insert_after_cf_list(&if_stmt->else_list, + &jump->instr); + block = then_block; + } else if (then_block == cont_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_continue); + nir_instr_insert_after_cf_list(&if_stmt->then_list, + &jump->instr); + block = else_block; + } else if (else_block == cont_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_continue); + nir_instr_insert_after_cf_list(&if_stmt->else_list, + &jump->instr); + block = then_block; + } else { + /* Conventional if statement */ + assert(block->merge_op == SpvOpSelectionMerge); + struct vtn_block *merge_block = + vtn_value(b, block->merge_block_id, vtn_value_type_block)->block; - nir_builder_insert_after_cf_list(&b->nb, &if_stmt->then_list); - vtn_walk_blocks(b, then_block, merge_block); + struct exec_list *old_list = b->nb.cf_node_list; - nir_builder_insert_after_cf_list(&b->nb, &if_stmt->else_list); - vtn_walk_blocks(b, else_block, merge_block); + nir_builder_insert_after_cf_list(&b->nb, &if_stmt->then_list); + vtn_walk_blocks(b, then_block, break_block, cont_block, merge_block); - nir_builder_insert_after_cf_list(&b->nb, old_list); - block = merge_block; + nir_builder_insert_after_cf_list(&b->nb, &if_stmt->else_list); + vtn_walk_blocks(b, else_block, break_block, cont_block, merge_block); + + nir_builder_insert_after_cf_list(&b->nb, old_list); + block = merge_block; + continue; + } + + /* If we got here then we inserted a predicated break or continue + * above and we need to handle the other case. We already set + * `block` above to indicate what block to visit after the + * predicated break. + */ + + /* It's possible that the other branch is also a break/continue. + * If it is, we handle that here. + */ + if (block == break_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_break); + nir_builder_instr_insert(&b->nb, &jump->instr); + + return; + } else if (block == cont_block) { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_continue); + nir_builder_instr_insert(&b->nb, &jump->instr); + + return; + } + + /* If we got here then there was a predicated break/continue but + * the other half of the if has stuff in it. `block` was already + * set above so there is nothing left for us to do. + */ continue; } + case SpvOpReturn: { + nir_jump_instr *jump = nir_jump_instr_create(b->shader, + nir_jump_return); + nir_builder_instr_insert(&b->nb, &jump->instr); + return; + } + + case SpvOpKill: { + nir_intrinsic_instr *discard = + nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard); + nir_builder_instr_insert(&b->nb, &discard->instr); + return; + } + case SpvOpSwitch: - case SpvOpKill: - case SpvOpReturn: case SpvOpReturnValue: case SpvOpUnreachable: default: @@ -1275,7 +1403,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count, b->impl = nir_function_impl_create(func->overload); nir_builder_init(&b->nb, b->impl); nir_builder_insert_after_cf_list(&b->nb, &b->impl->body); - vtn_walk_blocks(b, func->start_block, NULL); + vtn_walk_blocks(b, func->start_block, NULL, NULL, NULL); } ralloc_free(b); diff --git a/src/glsl/nir/spirv_to_nir_private.h b/src/glsl/nir/spirv_to_nir_private.h index fd80dd4e161..d2b364bdfeb 100644 --- a/src/glsl/nir/spirv_to_nir_private.h +++ b/src/glsl/nir/spirv_to_nir_private.h @@ -47,6 +47,9 @@ enum vtn_value_type { }; struct vtn_block { + /* Merge opcode if this block contains a merge; SpvOpNop otherwise. */ + SpvOp merge_op; + uint32_t merge_block_id; const uint32_t *label; const uint32_t *branch; nir_block *block; @@ -92,7 +95,6 @@ struct vtn_builder { nir_shader *shader; nir_function_impl *impl; struct vtn_block *block; - struct vtn_block *merge_block; unsigned value_id_bound; struct vtn_value *values; From 56f533b3a05141df907a608c504bb1cc3dcf8641 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 6 May 2015 12:54:02 -0700 Subject: [PATCH 38/44] nir/spirv: Handle boolean uniforms correctly --- src/glsl/nir/spirv_to_nir.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index a4f13603dac..099bbaf42e1 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -573,7 +573,16 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, nir_builder_instr_insert(&b->nb, &load->instr); val->type = src_type; - val->ssa = &load->dest.ssa; + + if (src->var->data.mode == nir_var_uniform && + glsl_get_base_type(src_type) == GLSL_TYPE_BOOL) { + /* Uniform boolean loads need to be fixed up since they're defined + * to be zero/nonzero rather than NIR_FALSE/NIR_TRUE. + */ + val->ssa = nir_ine(&b->nb, &load->dest.ssa, nir_imm_int(&b->nb, 0)); + } else { + val->ssa = &load->dest.ssa; + } break; } From 036a4b185560a562f2b2a7ae6deb0ab23878090b Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 6 May 2015 15:33:21 -0700 Subject: [PATCH 39/44] nir/spirv: Handle jump-to-loop in a more general way --- src/glsl/nir/spirv_to_nir.c | 46 ++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 099bbaf42e1..88b0e1bc980 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -1208,9 +1208,6 @@ vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start, { struct vtn_block *block = start; while (block != end_block) { - const uint32_t *w = block->branch; - SpvOp branch_op = w[0] & SpvOpCodeMask; - if (block->block != NULL) { /* We've already visited this block once before so this is a * back-edge. Back-edges are only allowed to point to a loop @@ -1220,6 +1217,31 @@ vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start, return; } + if (block->merge_op == SpvOpLoopMerge) { + /* This is the jump into a loop. */ + cont_block = block; + break_block = vtn_value(b, block->merge_block_id, + vtn_value_type_block)->block; + + nir_loop *loop = nir_loop_create(b->shader); + nir_cf_node_insert_end(b->nb.cf_node_list, &loop->cf_node); + + struct exec_list *old_list = b->nb.cf_node_list; + + /* Reset the merge_op to prerevent infinite recursion */ + block->merge_op = SpvOpNop; + + nir_builder_insert_after_cf_list(&b->nb, &loop->body); + vtn_walk_blocks(b, block, break_block, cont_block, NULL); + + nir_builder_insert_after_cf_list(&b->nb, old_list); + block = break_block; + continue; + } + + const uint32_t *w = block->branch; + SpvOp branch_op = w[0] & SpvOpCodeMask; + b->block = block; vtn_foreach_instruction(b, block->label, block->branch, vtn_handle_body_instruction); @@ -1243,25 +1265,7 @@ vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start, return; } else if (branch_block == end_block) { return; - } else if (branch_block->merge_op == SpvOpLoopMerge) { - /* This is the jump into a loop. */ - cont_block = branch_block; - break_block = vtn_value(b, branch_block->merge_block_id, - vtn_value_type_block)->block; - - nir_loop *loop = nir_loop_create(b->shader); - nir_cf_node_insert_end(b->nb.cf_node_list, &loop->cf_node); - - struct exec_list *old_list = b->nb.cf_node_list; - - nir_builder_insert_after_cf_list(&b->nb, &loop->body); - vtn_walk_blocks(b, branch_block, break_block, cont_block, NULL); - - nir_builder_insert_after_cf_list(&b->nb, old_list); - block = break_block; - continue; } else { - /* TODO: Can this ever happen? */ block = branch_block; continue; } From 0fa9211d7ff4f8d3e058d2689b5ba05372012539 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 6 May 2015 15:36:54 -0700 Subject: [PATCH 40/44] nir/spirv: Make the global constants in spirv.h static I've been promissed in a bug that this will be fixed in a future version of the header. However, in the interest of my branch building, I'm adding these changes in myself for the moment. --- src/glsl/nir/spirv.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/glsl/nir/spirv.h b/src/glsl/nir/spirv.h index 93135c09596..da717ecd342 100644 --- a/src/glsl/nir/spirv.h +++ b/src/glsl/nir/spirv.h @@ -48,13 +48,13 @@ namespace spv { -const int MagicNumber = 0x07230203; -const int Version = 99; +static const int MagicNumber = 0x07230203; +static const int Version = 99; typedef unsigned int Id; -const unsigned int OpCodeMask = 0xFFFF; -const unsigned int WordCountShift = 16; +static const unsigned int OpCodeMask = 0xFFFF; +static const unsigned int WordCountShift = 16; enum SourceLanguage { SourceLanguageUnknown = 0, @@ -677,13 +677,13 @@ enum Op { #ifndef __cplusplus -const int SpvMagicNumber = 0x07230203; -const int SpvVersion = 99; +static const int SpvMagicNumber = 0x07230203; +static const int SpvVersion = 99; typedef unsigned int SpvId; -const unsigned int SpvOpCodeMask = 0xFFFF; -const unsigned int SpvWordCountShift = 16; +static const unsigned int SpvOpCodeMask = 0xFFFF; +static const unsigned int SpvWordCountShift = 16; typedef enum SpvSourceLanguage_ { SpvSourceLanguageUnknown = 0, From a53e7955245334b1b8e47bc94f35e6c68859b10c Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Sat, 16 May 2015 12:32:58 -0700 Subject: [PATCH 41/44] nir/types: Add support for sampler types --- src/glsl/nir/nir_types.cpp | 41 ++++++++++++++++++++++++++++++++++++++ src/glsl/nir/nir_types.h | 8 ++++++++ 2 files changed, 49 insertions(+) diff --git a/src/glsl/nir/nir_types.cpp b/src/glsl/nir/nir_types.cpp index a6d35fe6179..35421506545 100644 --- a/src/glsl/nir/nir_types.cpp +++ b/src/glsl/nir/nir_types.cpp @@ -124,6 +124,20 @@ glsl_get_struct_elem_name(const struct glsl_type *type, unsigned index) return type->fields.structure[index].name; } +glsl_sampler_dim +glsl_get_sampler_dim(const struct glsl_type *type) +{ + assert(glsl_type_is_sampler(type)); + return (glsl_sampler_dim)type->sampler_dimensionality; +} + +glsl_base_type +glsl_get_sampler_result_type(const struct glsl_type *type) +{ + assert(glsl_type_is_sampler(type)); + return (glsl_base_type)type->sampler_type; +} + bool glsl_type_is_void(const glsl_type *type) { @@ -154,6 +168,26 @@ glsl_type_is_matrix(const struct glsl_type *type) return type->is_matrix(); } +bool +glsl_type_is_sampler(const struct glsl_type *type) +{ + return type->is_sampler(); +} + +bool +glsl_sampler_type_is_shadow(const struct glsl_type *type) +{ + assert(glsl_type_is_sampler(type)); + return type->sampler_shadow; +} + +bool +glsl_sampler_type_is_array(const struct glsl_type *type) +{ + assert(glsl_type_is_sampler(type)); + return type->sampler_array; +} + const glsl_type * glsl_void_type(void) { @@ -223,6 +257,13 @@ glsl_struct_type(const glsl_struct_field *fields, return glsl_type::get_record_instance(fields, num_fields, name); } +const struct glsl_type * +glsl_sampler_type(enum glsl_sampler_dim dim, bool is_shadow, bool is_array, + enum glsl_base_type base_type) +{ + return glsl_type::get_sampler_instance(dim, is_shadow, is_array, base_type); +} + const glsl_type * glsl_function_type(const glsl_type *return_type, const glsl_function_param *params, unsigned num_params) diff --git a/src/glsl/nir/nir_types.h b/src/glsl/nir/nir_types.h index f19f0e5db5d..ceb131c9f47 100644 --- a/src/glsl/nir/nir_types.h +++ b/src/glsl/nir/nir_types.h @@ -68,12 +68,17 @@ unsigned glsl_get_length(const struct glsl_type *type); const char *glsl_get_struct_elem_name(const struct glsl_type *type, unsigned index); +enum glsl_sampler_dim glsl_get_sampler_dim(const struct glsl_type *type); +enum glsl_base_type glsl_get_sampler_result_type(const struct glsl_type *type); bool glsl_type_is_void(const struct glsl_type *type); bool glsl_type_is_vector(const struct glsl_type *type); bool glsl_type_is_scalar(const struct glsl_type *type); bool glsl_type_is_vector_or_scalar(const struct glsl_type *type); bool glsl_type_is_matrix(const struct glsl_type *type); +bool glsl_type_is_sampler(const struct glsl_type *type); +bool glsl_sampler_type_is_shadow(const struct glsl_type *type); +bool glsl_sampler_type_is_array(const struct glsl_type *type); const struct glsl_type *glsl_void_type(void); const struct glsl_type *glsl_float_type(void); @@ -91,6 +96,9 @@ const struct glsl_type *glsl_array_type(const struct glsl_type *base, unsigned elements); const struct glsl_type *glsl_struct_type(const struct glsl_struct_field *fields, unsigned num_fields, const char *name); +const struct glsl_type *glsl_sampler_type(enum glsl_sampler_dim dim, + bool is_shadow, bool is_array, + enum glsl_base_type base_type); const struct glsl_type * glsl_function_type(const struct glsl_type *return_type, const struct glsl_function_param *params, unsigned num_params); From d6f52dfb3e9e40a085ba012e69f8786462a1c3c9 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Sat, 16 May 2015 12:33:29 -0700 Subject: [PATCH 42/44] nir/spirv: Move Exp and Log to the list of currently unhandled ALU ops NIR doesn't have the native opcodes for them anymore --- src/glsl/nir/spirv_glsl450_to_nir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/glsl/nir/spirv_glsl450_to_nir.c b/src/glsl/nir/spirv_glsl450_to_nir.c index 240ff012fe1..3b9d0940aad 100644 --- a/src/glsl/nir/spirv_glsl450_to_nir.c +++ b/src/glsl/nir/spirv_glsl450_to_nir.c @@ -170,8 +170,6 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSL450Entrypoint entrypoint, nir_fcos(&b->nb, src[0])); return; case Pow: op = nir_op_fpow; break; - case Exp: op = nir_op_fexp; break; - case Log: op = nir_op_flog; break; case Exp2: op = nir_op_fexp2; break; case Log2: op = nir_op_flog2; break; case Sqrt: op = nir_op_fsqrt; break; @@ -227,6 +225,8 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSL450Entrypoint entrypoint, case FindLSB: op = nir_op_find_lsb; break; case FindMSB: op = nir_op_ufind_msb; break; /* TODO */ + case Exp: + case Log: case Clamp: case Asin: case Acos: From 4e44dcc3122d9f5d79cedeff1b506cbfff36b968 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Sat, 16 May 2015 12:34:15 -0700 Subject: [PATCH 43/44] nir/spirv: Add initial support for samplers --- src/glsl/nir/spirv_to_nir.c | 155 +++++++++++++++++++++++++++++++++++- 1 file changed, 152 insertions(+), 3 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 88b0e1bc980..1a789ee4786 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -256,7 +256,36 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, */ return vtn_value(b, args[1], vtn_value_type_type)->type; - case SpvOpTypeSampler: + case SpvOpTypeSampler: { + const struct glsl_type *sampled_type = + vtn_value(b, args[0], vtn_value_type_type)->type; + + assert(glsl_type_is_vector_or_scalar(sampled_type)); + + enum glsl_sampler_dim dim; + switch ((SpvDim)args[1]) { + case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break; + case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break; + case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break; + case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break; + case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break; + case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break; + default: + unreachable("Invalid SPIR-V Sampler dimension"); + } + + /* TODO: Handle the various texture image/filter options */ + (void)args[2]; + + bool is_array = args[3]; + bool is_shadow = args[4]; + + assert(args[5] == 0 && "FIXME: Handl multi-sampled textures"); + + return glsl_sampler_type(dim, is_shadow, is_array, + glsl_get_base_type(sampled_type)); + } + case SpvOpTypeRuntimeArray: case SpvOpTypeOpaque: case SpvOpTypeEvent: @@ -559,10 +588,16 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, } case SpvOpLoad: { - struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); nir_deref_var *src = vtn_value(b, w[3], vtn_value_type_deref)->deref; const struct glsl_type *src_type = nir_deref_tail(&src->deref)->type; + + if (glsl_get_base_type(src_type) == GLSL_TYPE_SAMPLER) { + vtn_push_value(b, w[2], vtn_value_type_deref)->deref = src; + return; + } + assert(glsl_type_is_vector_or_scalar(src_type)); + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var); @@ -635,11 +670,125 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, unreachable("Unhandled opcode"); } +static nir_tex_src +vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type) +{ + nir_tex_src src; + src.src = nir_src_for_ssa(vtn_value(b, index, vtn_value_type_ssa)->ssa); + src.src_type = type; + return src; +} + static void vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - unreachable("Unhandled opcode"); + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); + nir_deref_var *sampler = vtn_value(b, w[3], vtn_value_type_deref)->deref; + + nir_tex_src srcs[8]; /* 8 should be enough */ + nir_tex_src *p = srcs; + + unsigned coord_components = 0; + switch (opcode) { + case SpvOpTextureSample: + case SpvOpTextureSampleDref: + case SpvOpTextureSampleLod: + case SpvOpTextureSampleProj: + case SpvOpTextureSampleGrad: + case SpvOpTextureSampleOffset: + case SpvOpTextureSampleProjLod: + case SpvOpTextureSampleProjGrad: + case SpvOpTextureSampleLodOffset: + case SpvOpTextureSampleProjOffset: + case SpvOpTextureSampleGradOffset: + case SpvOpTextureSampleProjLodOffset: + case SpvOpTextureSampleProjGradOffset: + case SpvOpTextureFetchTexelLod: + case SpvOpTextureFetchTexelOffset: + case SpvOpTextureFetchSample: + case SpvOpTextureFetchTexel: + case SpvOpTextureGather: + case SpvOpTextureGatherOffset: + case SpvOpTextureGatherOffsets: + case SpvOpTextureQueryLod: { + /* All these types have the coordinate as their first real argument */ + struct vtn_value *coord = vtn_value(b, w[4], vtn_value_type_ssa); + coord_components = glsl_get_vector_elements(coord->type); + p->src = nir_src_for_ssa(coord->ssa); + p->src_type = nir_tex_src_coord; + p++; + break; + } + default: + break; + } + + nir_texop texop; + switch (opcode) { + case SpvOpTextureSample: + texop = nir_texop_tex; + + if (count == 6) { + texop = nir_texop_txb; + *p++ = vtn_tex_src(b, w[5], nir_tex_src_bias); + } + break; + + case SpvOpTextureSampleDref: + case SpvOpTextureSampleLod: + case SpvOpTextureSampleProj: + case SpvOpTextureSampleGrad: + case SpvOpTextureSampleOffset: + case SpvOpTextureSampleProjLod: + case SpvOpTextureSampleProjGrad: + case SpvOpTextureSampleLodOffset: + case SpvOpTextureSampleProjOffset: + case SpvOpTextureSampleGradOffset: + case SpvOpTextureSampleProjLodOffset: + case SpvOpTextureSampleProjGradOffset: + case SpvOpTextureFetchTexelLod: + case SpvOpTextureFetchTexelOffset: + case SpvOpTextureFetchSample: + case SpvOpTextureFetchTexel: + case SpvOpTextureGather: + case SpvOpTextureGatherOffset: + case SpvOpTextureGatherOffsets: + case SpvOpTextureQuerySizeLod: + case SpvOpTextureQuerySize: + case SpvOpTextureQueryLod: + case SpvOpTextureQueryLevels: + case SpvOpTextureQuerySamples: + default: + unreachable("Unhandled opcode"); + } + + nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs); + + const struct glsl_type *sampler_type = nir_deref_tail(&sampler->deref)->type; + instr->sampler_dim = glsl_get_sampler_dim(sampler_type); + + switch (glsl_get_sampler_result_type(sampler_type)) { + case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break; + case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break; + case GLSL_TYPE_UINT: instr->dest_type = nir_type_unsigned; break; + case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break; + default: + unreachable("Invalid base type for sampler result"); + } + + instr->op = texop; + memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src)); + instr->coord_components = coord_components; + instr->is_array = glsl_sampler_type_is_array(sampler_type); + instr->is_shadow = glsl_sampler_type_is_shadow(sampler_type); + + instr->sampler = sampler; + + nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL); + val->ssa = &instr->dest.ssa; + + nir_builder_instr_insert(&b->nb, &instr->instr); } static void From a63952510d64c09720614b971e461d7f8ed17c7a Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Sat, 16 May 2015 12:34:32 -0700 Subject: [PATCH 44/44] nir/spirv: Don't assert that the current block is empty It's possible that someone will give us SPIR-V code in which someone needlessly branches to new blocks. We should handle that ok now. --- src/glsl/nir/spirv_to_nir.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/glsl/nir/spirv_to_nir.c b/src/glsl/nir/spirv_to_nir.c index 1a789ee4786..1fc1b8bc5dc 100644 --- a/src/glsl/nir/spirv_to_nir.c +++ b/src/glsl/nir/spirv_to_nir.c @@ -1186,8 +1186,6 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node); assert(tail_node->type == nir_cf_node_block); block->block = nir_cf_node_as_block(tail_node); - - assert(exec_list_is_empty(&block->block->instr_list)); break; }