diff --git a/source/core/slang-string-util.cpp b/source/core/slang-string-util.cpp index 6f1dc2ccb8..3da4084618 100644 --- a/source/core/slang-string-util.cpp +++ b/source/core/slang-string-util.cpp @@ -690,16 +690,18 @@ String StringUtil::replaceAll( } int radix = 10; + auto isDigit = CharUtil::isDigit; auto getDigit = CharUtil::getDecimalDigitValue; if (cur + 1 < end && *cur == '0' && (*(cur + 1) == 'x' || *(cur + 1) == 'X')) { radix = 16; + isDigit = CharUtil::isHexDigit; getDigit = CharUtil::getHexDigitValue; cur += 2; } // We need at least one digit - if (cur >= end || !CharUtil::isDigit(*cur)) + if (cur >= end || !isDigit(*cur)) { return SLANG_FAIL; } diff --git a/source/slang/hlsl.meta.slang.temp.h b/source/slang/hlsl.meta.slang.temp.h new file mode 100644 index 0000000000..edbbdf6e04 --- /dev/null +++ b/source/slang/hlsl.meta.slang.temp.h @@ -0,0 +1,22452 @@ +SLANG_RAW("// Slang HLSL compatibility library\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("\n") +SLANG_RAW("typedef uint UINT;\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_RequireGLSLExtension +) +SLANG_RAW(")\n") +SLANG_RAW("void __requireGLSLExtension(String extensionName);\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("/// Represents an interface for buffer data layout.\n") +SLANG_RAW("/// This interface is used as a base for defining specific data layouts for buffers.\n") +SLANG_RAW("[sealed]\n") +SLANG_RAW("__magic_type(IBufferDataLayoutType)\n") +SLANG_RAW("interface IBufferDataLayout\n") +SLANG_RAW("{\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_DefaultBufferLayoutType +) +SLANG_RAW(")\n") +SLANG_RAW("__magic_type(DefaultDataLayoutType)\n") +SLANG_RAW("struct DefaultDataLayout : IBufferDataLayout\n") +SLANG_RAW("{};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_Std140BufferLayoutType +) +SLANG_RAW(")\n") +SLANG_RAW("[require(spirv)]\n") +SLANG_RAW("[require(glsl)]\n") +SLANG_RAW("__magic_type(Std140DataLayoutType)\n") +SLANG_RAW("struct Std140DataLayout : IBufferDataLayout\n") +SLANG_RAW("{};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_Std430BufferLayoutType +) +SLANG_RAW(")\n") +SLANG_RAW("[require(spirv)]\n") +SLANG_RAW("[require(glsl)]\n") +SLANG_RAW("__magic_type(Std430DataLayoutType)\n") +SLANG_RAW("struct Std430DataLayout : IBufferDataLayout\n") +SLANG_RAW("{};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_ScalarBufferLayoutType +) +SLANG_RAW(")\n") +SLANG_RAW("__magic_type(ScalarDataLayoutType)\n") +SLANG_RAW("struct ScalarDataLayout : IBufferDataLayout\n") +SLANG_RAW("{};\n") +SLANG_RAW("\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_GLSLShaderStorageBufferType +) +SLANG_RAW(")\n") +SLANG_RAW("__magic_type(GLSLShaderStorageBufferType)\n") +SLANG_RAW("struct GLSLShaderStorageBuffer {}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferGetDimensions +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, appendstructuredbuffer)]\n") +SLANG_RAW("uint2 __structuredBufferGetDimensions(AppendStructuredBuffer buffer);\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferGetDimensions +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, consumestructuredbuffer)]\n") +SLANG_RAW("uint2 __structuredBufferGetDimensions(ConsumeStructuredBuffer buffer);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferGetDimensions +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, structuredbuffer)]\n") +SLANG_RAW("uint2 __structuredBufferGetDimensions(StructuredBuffer buffer);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferGetDimensions +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, structuredbuffer_rw)]\n") +SLANG_RAW("uint2 __structuredBufferGetDimensions(RWStructuredBuffer buffer);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferGetDimensions +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, structuredbuffer_rw)]\n") +SLANG_RAW("uint2 __structuredBufferGetDimensions(RasterizerOrderedStructuredBuffer buffer);\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("/**\n") +SLANG_RAW("Represents an opaque handle to an append structured buffer allocated in global memory.\n") +SLANG_RAW("A structured buffer can be viewed as an array of the specified element type.\n") +SLANG_RAW("An append structure buffer internally maintains an atomic counter to keep track of the number of elements in the buffer,\n") +SLANG_RAW("and provide an atomic operation to append a new element to the buffer.\n") +SLANG_RAW(" @param T The element type of the buffer.\n") +SLANG_RAW(" @param L The memory layout of the buffer.\n") +SLANG_RAW(" @remarks\n") +SLANG_RAW("This type is supported natively when targeting HLSL.\n") +SLANG_RAW("When generating code for other targets, this type is translated into a pair or an ordinary `RWStructuredBuffer` and\n") +SLANG_RAW("a separate `RWStructuredBuffer` that holds the atomic counter.\n") +SLANG_RAW("The `L` generic parameter is used to specify the memory layout of the buffer when\n") +SLANG_RAW("generating SPIRV.\n") +SLANG_RAW("`L` must be one of `DefaultDataLayout`, `Std140DataLayout`, `Std430DataLayout` or `ScalarDataLayout`.\n") +SLANG_RAW("The default value is `DefaultDataLayout`.\n") +SLANG_RAW("When generating code for other targets, this parameter is ignored and has no effect on the generated code.\n") +SLANG_RAW(" @see `RWStructuredBuffer`, `ConsumeStructuredBuffer`, `RasterizerOrderedStructuredBuffer`.\n") +SLANG_RAW(" @category buffer_types\n") +SLANG_RAW("*/\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__magic_type(HLSLAppendStructuredBufferType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLAppendStructuredBufferType +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, appendstructuredbuffer)]\n") +SLANG_RAW("struct AppendStructuredBuffer\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferAppend +) +SLANG_RAW(")\n") +SLANG_RAW(" /// Appends a new element to the buffer.\n") +SLANG_RAW(" ///@param value The element to be appended to the buffer.\n") +SLANG_RAW(" void Append(T value);\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Get information about the number of elements and stride of the buffer.\n") +SLANG_RAW(" ///@param numStructs The number of elements in the buffer.\n") +SLANG_RAW(" ///@param stride The stride of the buffer.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void GetDimensions(\n") +SLANG_RAW(" out uint numStructs,\n") +SLANG_RAW(" out uint stride)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let result = __structuredBufferGetDimensions(this);\n") +SLANG_RAW(" numStructs = result.x;\n") +SLANG_RAW(" stride = result.y;\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("/**\n") +SLANG_RAW("Represents an opaque handle to a read-only buffer allocated in global memory that is indexed in bytes.\n") +SLANG_RAW("ByteAddressBuffer can be used when working with raw buffers. Raw buffer can be viewed as a bag of bits to\n") +SLANG_RAW("which you want raw access, that is, a buffer that you can conveniently access through chunks of one to\n") +SLANG_RAW("four 32-bit typeless address values.\n") +SLANG_RAW(" @remarks\n") +SLANG_RAW("This type is supported natively when targeting HLSL.\n") +SLANG_RAW("For all other targets, this type maps to a buffer of 32bit unsigned integers.\n") +SLANG_RAW(" @category buffer_types\n") +SLANG_RAW("*/\n") +SLANG_RAW("__magic_type(HLSLByteAddressBufferType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLByteAddressBufferType +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, byteaddressbuffer)]\n") +SLANG_RAW("struct ByteAddressBuffer\n") +SLANG_RAW("{\n") +SLANG_RAW(" /// Get the number of bytes in the buffer.\n") +SLANG_RAW(" ///@param[out] dim The number of bytes in the buffer.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, structuredbuffer)]\n") +SLANG_RAW(" void GetDimensions(out uint dim)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" dim = __structuredBufferGetDimensions(__getEquivalentStructuredBuffer(this)).x*4;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load a 32-bit unsigned integer or value with type of `T` from the buffer at the specified location.\n") +SLANG_RAW(" ///@param T The type of the value to load from the buffer.\n") +SLANG_RAW(" ///@param location The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return The value loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" /// When targeting non-HLSL, the status is always 0.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint Load(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer)]\n") +SLANG_RAW(" uint Load(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load two 32-bit unsigned integers from the buffer at the specified location\n") +SLANG_RAW(" /// with additional alignment.\n") +SLANG_RAW(" ///@param location The input address in bytes.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return Two 32-bit unsigned integers loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// This function only supports when targeting HLSL.\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" /// When targeting non-HLSL, the status is always 0.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint2 Load2(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint2 Load2(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer)]\n") +SLANG_RAW(" uint2 Load2(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load two 32-bit unsigned integers from the buffer at the specified location with alignment\n") +SLANG_RAW(" /// of stride of `uint2`, which is 8.\n") +SLANG_RAW(" ///@param location The input address in bytes, which must be a multiple of alignment of 8. Invalid\n") +SLANG_RAW(" /// value of location will cause undefined behavior.\n") +SLANG_RAW(" ///@return `uint2` Two 32-bit unsigned integers loaded from the buffer.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint2 Load2Aligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load three 32-bit unsigned integers from the buffer at the specified location.\n") +SLANG_RAW(" ///@param location The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return `uint3` Three 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// This function only supports when targeting HLSL.\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" /// When targeting non-HLSL, the status is always 0.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint3 Load3(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint3 Load3(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer)]\n") +SLANG_RAW(" uint3 Load3(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load three 32-bit unsigned integers from the buffer at the specified location with alignment\n") +SLANG_RAW(" /// of stride of `uint3`, which is 12.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiple of alignment of 12.\n") +SLANG_RAW(" ///@return `uint3` Three 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint3 Load3Aligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load four 32-bit unsigned integers from the buffer at the specified location.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiple of alignment of 4.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return `uint4` Four 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// This function only supports when targeting HLSL.\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint4 Load4(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint4 Load4(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer)]\n") +SLANG_RAW(" uint4 Load4(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load four 32-bit unsigned integers from the buffer at the specified location with alignment\n") +SLANG_RAW(" /// of `uint4`, which is 16.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiple of alignment of 16.\n") +SLANG_RAW(" ///@return `uint4` Four 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer)]\n") +SLANG_RAW(" uint4 Load4Aligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" T Load(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" T Load(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load an element with type `T` from the buffer at the specified location with alignment of `T`.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiply of size of `T`.\n") +SLANG_RAW(" ///@return T value with type `T` loaded from the buffer.\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" ///Currently, this function only supports when `T` is scalar, vector or matrix type.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" T LoadAligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("// Texture\n") +SLANG_RAW("\n") +SLANG_RAW("/// Represent a texture shape type that can be used to specify the shape of a texture.\n") +SLANG_RAW("/// Used for the `Shape` parameter of the `_Texture` type.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Implemented by `__Shape1D`, `__Shape2D`, `__Shape3D`, `__ShapeCube` and `__ShapeBuffer`.\n") +SLANG_RAW("/// @see `_Texture`.\n") +SLANG_RAW("/// @internal\n") +SLANG_RAW("[sealed]\n") +SLANG_RAW("[builtin]\n") +SLANG_RAW("interface __ITextureShape\n") +SLANG_RAW("{\n") +SLANG_RAW(" static const int flavor;\n") +SLANG_RAW(" static const int dimensions;\n") +SLANG_RAW(" static const int planeDimensions;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Represent a 1D, 2D or 3D texture shape that can be used as the `Shape` parameter of the `_Texture` type.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Implemented by `__Shape1D`, `__Shape2D` and `__Shape3D`.\n") +SLANG_RAW("/// @see `_Texture`.\n") +SLANG_RAW("/// @internal\n") +SLANG_RAW("[sealed]\n") +SLANG_RAW("[builtin]\n") +SLANG_RAW("interface __ITextureShape1D2D3D : __ITextureShape\n") +SLANG_RAW("{\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// When used as the `Shape` parameter of the `_Texture` type, specifies a 1D texture.\n") +SLANG_RAW("/// @category misc_types Miscelaneous types\n") +SLANG_RAW("__magic_type(TextureShape1DType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_TextureShape1DType +) +SLANG_RAW(")\n") +SLANG_RAW("struct __Shape1D : __ITextureShape1D2D3D\n") +SLANG_RAW("{\n") +SLANG_RAW(" static const int flavor = ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(";\n") +SLANG_RAW(" static const int dimensions = 1;\n") +SLANG_RAW(" static const int planeDimensions = 1;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// When used as the `Shape` parameter of the `_Texture` type, specifies a 2D texture.\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__magic_type(TextureShape2DType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_TextureShape2DType +) +SLANG_RAW(")\n") +SLANG_RAW("struct __Shape2D : __ITextureShape1D2D3D\n") +SLANG_RAW("{\n") +SLANG_RAW(" static const int flavor = ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(";\n") +SLANG_RAW(" static const int dimensions = 2;\n") +SLANG_RAW(" static const int planeDimensions = 2;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// When used as the `Shape` parameter of the `_Texture` type, specifies a 3D texture.\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__magic_type(TextureShape3DType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_TextureShape3DType +) +SLANG_RAW(")\n") +SLANG_RAW("struct __Shape3D : __ITextureShape1D2D3D\n") +SLANG_RAW("{\n") +SLANG_RAW(" static const int flavor = ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(";\n") +SLANG_RAW(" static const int dimensions = 3;\n") +SLANG_RAW(" static const int planeDimensions = 3;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// When used as the `Shape` parameter of the `_Texture` type, specifies a Cube texture.\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__magic_type(TextureShapeCubeType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_TextureShapeCubeType +) +SLANG_RAW(")\n") +SLANG_RAW("struct __ShapeCube : __ITextureShape\n") +SLANG_RAW("{\n") +SLANG_RAW(" static const int flavor = ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(";\n") +SLANG_RAW(" static const int dimensions = 3;\n") +SLANG_RAW(" static const int planeDimensions = 2;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// When used as the `Shape` parameter of the `_Texture` type, specifies a buffer texture.\n") +SLANG_RAW("/// @category misc_types\n") +SLANG_RAW("__magic_type(TextureShapeBufferType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_TextureShapeBufferType +) +SLANG_RAW(")\n") +SLANG_RAW("struct __ShapeBuffer : __ITextureShape\n") +SLANG_RAW("{\n") +SLANG_RAW(" static const int flavor = ") +SLANG_SPLICE(SLANG_TEXTURE_BUFFER +) +SLANG_RAW(";\n") +SLANG_RAW(" static const int dimensions = 1;\n") +SLANG_RAW(" static const int planeDimensions = 1;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("__intrinsic_op(vectorReshape)\n") +SLANG_RAW("vector __vectorReshape(vector vin);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(makeVector)\n") +SLANG_RAW("__generic\n") +SLANG_RAW("vector __makeVector(vector vec, T scalar);\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("/// A parameterized type that represents all flavors of texture types supported by the Slang language.\n") +SLANG_RAW("/// Please note that this type is not intended to be used directly in user code, and not all combinations\n") +SLANG_RAW("/// of the generic arguments are valid.\n") +SLANG_RAW("/// Instead, use the specific texture types such as `Texture1D`, `Texture2DArray` and `Sampler2D` etc.\n") +SLANG_RAW("/// This documentation is provided for reference purposes only.\n") +SLANG_RAW("/// @param T The element type of the texture. Must be a scalar or vector type.\n") +SLANG_RAW("/// @param Shape The shape of the texture. Must be one of `__Shape1D`, `__Shape2D`, `__Shape3D`, `__ShapeCube` or `__ShapeBuffer`.\n") +SLANG_RAW("/// @param isArray Indicates whether the texture is an array texture.\n") +SLANG_RAW("/// @param isMS Indicates whether the texture is a multisampled texture.\n") +SLANG_RAW("/// @param sampleCount The number of samples of a multisampled texture.\n") +SLANG_RAW("/// @param access The access mode of the texture. 0 for read-only, 1 for read-write, 2 for rasterizer-ordered, 3 for feedback.\n") +SLANG_RAW("/// @param isShadow Indicates whether the texture is a shadow texture (for combined texture-sampler only).\n") +SLANG_RAW("/// @param isCombined Indicates whether the texture is a combined texture-sampler.\n") +SLANG_RAW("/// @param format The storage format of the texture. Users should specify the format using an `[format(\"...\")]` attribute instead.\n") +SLANG_RAW("/// @see `Texture1D`, `Texture2D`, `Texture3D`, `TextureCube`, `Texture1DArray`,\n") +SLANG_RAW("/// `Texture2DArray`, `TextureCubeArray`, `Sampler1D`, `Sampler2D`, `Sampler3D`, `SamplerCube`, `Sampler1DArray`, `Sampler2DArray`, `SamplerCubeArray`,\n") +SLANG_RAW("/// `Texture2DMS`, `Texture2DMSArray`, `RWTexture1D`, `RWTexture2D`, `RWTexture3D`, `RWTexture1DArray`, `RWTexture2DArray`,\n") +SLANG_RAW("/// `RWTexture2DMS`, `RWTexture2DMSArray`, `Buffer`, `RWBuffer`, `FeedbackTexture2D`, `FeedbackTexture2DArray`.\n") +SLANG_RAW("/// @remarks\n") +SLANG_RAW("/// HLSL texture types are implemented as typealiases to the builtin `_Texture` type. Users\n") +SLANG_RAW("/// are advised to use the HLSL-specific texture types instead of `_Texture` directly.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// For read-write textures, Slang will automatically infer `format` from `T`.\n") +SLANG_RAW("/// To explicitly specify texel storage formats for read-write textures,\n") +SLANG_RAW("/// use the `[format(\"formatString\")]` attribute on the texture parameter declaration.\n") +SLANG_RAW("/// Allowed `formatString` values are:\n") +SLANG_RAW("///\n") +SLANG_RAW("/// |id | Format string | Meaning |\n") +SLANG_RAW("/// |:--|:---------------------|:------------------|\n") +SLANG_RAW("/// |1 |`\"rgba32f\"` | 4 channel 32-bit floating point texture |\n") +SLANG_RAW("/// |2 |`\"rgba16f\"` | 4 channel 16-bit floating point texture |\n") +SLANG_RAW("/// |3 |`\"rg32f\"` | 2 channel 32-bit floating point texture |\n") +SLANG_RAW("/// |4 |`\"rg16f\"` | 2 channel 16-bit floating point texture |\n") +SLANG_RAW("/// |5 |`\"r11f_g11f_b10f\"` | 3 channel 11/11/10-bit floating point texture |\n") +SLANG_RAW("/// |6 |`\"r32f\"` | 1 channel 32-bit floating point texture |\n") +SLANG_RAW("/// |7 |`\"r16f\"` | 1 channel 16-bit floating point texture |\n") +SLANG_RAW("/// |8 |`\"rgba16\"` | 4 channel 16-bit normalized unsigned integer texture |\n") +SLANG_RAW("/// |9 |`\"rgb10_a2\"` | 4 channel 10/10/10/2-bit signed integer texture |\n") +SLANG_RAW("/// |10 |`\"rgba8\"` | 4 channel 8-bit normalized unsigned integer texture |\n") +SLANG_RAW("/// |11 |`\"rg16\"` | 2 channel 16-bit normalized unsigned integer texture |\n") +SLANG_RAW("/// |12 |`\"rg8\"` | 2 channel 8-bit normalized unsigned integer texture |\n") +SLANG_RAW("/// |13 |`\"r16\"` | 1 channel 16-bit normalized unsigned integer texture |\n") +SLANG_RAW("/// |14 |`\"r8\"` | 1 channel 8-bit normalized unsigned integer texture |\n") +SLANG_RAW("/// |15 |`\"rgba16_snorm\"` | 4 channel 16-bit normalized signed integer texture |\n") +SLANG_RAW("/// |16 |`\"rgba8_snorm\"` | 4 channel 8-bit normalized signed integer texture |\n") +SLANG_RAW("/// |17 |`\"rg16_snorm\"` | 2 channel 16-bit normalized signed integer texture |\n") +SLANG_RAW("/// |18 |`\"rg8_snorm\"` | 2 channel 8-bit normalized signed integer texture |\n") +SLANG_RAW("/// |19 |`\"r16_snorm\"` | 1 channel 16-bit normalized signed integer texture |\n") +SLANG_RAW("/// |20 |`\"r8_snorm\"` | 1 channel 8-bit normalized signed integer texture |\n") +SLANG_RAW("/// |21 |`\"rgba32i\"` | 4 channel 32-bit signed integer texture |\n") +SLANG_RAW("/// |22 |`\"rgba16i\"` | 4 channel 16-bit signed integer texture |\n") +SLANG_RAW("/// |23 |`\"rgba8i\"` | 4 channel 8-bit signed integer texture |\n") +SLANG_RAW("/// |24 |`\"rg32i\"` | 2 channel 32-bit signed integer texture |\n") +SLANG_RAW("/// |25 |`\"rg16i\"` | 2 channel 16-bit signed integer texture |\n") +SLANG_RAW("/// |26 |`\"rg8i\"` | 2 channel 8-bit signed integer texture |\n") +SLANG_RAW("/// |27 |`\"r32i\"` | 1 channel 32-bit signed integer texture |\n") +SLANG_RAW("/// |28 |`\"r16i\"` | 1 channel 16-bit signed integer texture |\n") +SLANG_RAW("/// |29 |`\"r8i\"` | 1 channel 8-bit signed integer texture |\n") +SLANG_RAW("/// |30 |`\"rgba32ui\"` | 4 channel 32-bit unsigned integer texture |\n") +SLANG_RAW("/// |31 |`\"rgba16ui\"` | 4 channel 16-bit unsigned integer texture |\n") +SLANG_RAW("/// |32 |`\"rgb10_a2ui\"` | 4 channel 10/10/10/2-bit unsigned integer texture |\n") +SLANG_RAW("/// |33 |`\"rgba8ui\"` | 4 channel 8-bit unsigned integer texture |\n") +SLANG_RAW("/// |34 |`\"rg32ui\"` | 2 channel 32-bit unsigned integer texture |\n") +SLANG_RAW("/// |35 |`\"rg16ui\"` | 2 channel 16-bit unsigned integer texture |\n") +SLANG_RAW("/// |36 |`\"rg8ui\"` | 2 channel 8-bit unsigned integer texture |\n") +SLANG_RAW("/// |37 |`\"r32ui\"` | 1 channel 32-bit unsigned integer texture |\n") +SLANG_RAW("/// |38 |`\"r16ui\"` | 1 channel 16-bit unsigned integer texture |\n") +SLANG_RAW("/// |39 |`\"r8ui\"` | 1 channel 8-bit unsigned integer texture |\n") +SLANG_RAW("/// |40 |`\"r64ui\"` | 1 channel 64-bit unsigned integer texture |\n") +SLANG_RAW("/// |41 |`\"r64i\"` | 1 channel 64-bit signed integer texture |\n") +SLANG_RAW("///\n") +SLANG_RAW("/// When targeting Vulkan, a combined-texture-sampler type (`isCombined==1`) translates to a `OpTypeSampledImage` type in SPIR-V.\n") +SLANG_RAW("/// For other targets, the combined-texture-sampler type is translated to a pair of a `Texture` and `SamplerState`.\n") +SLANG_RAW("/// `isShadow` is only applicable to combined-texture-sampler types and must be `0` for non-combined texture types.\n") +SLANG_RAW("/// @internal\n") +SLANG_RAW("/// @category texture_types Texture types\n") +SLANG_RAW("__magic_type(TextureType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_TextureType +) +SLANG_RAW(")\n") +SLANG_RAW("struct _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("// Combined texture sampler specific functions\n") +SLANG_RAW("\n") +SLANG_RAW("[require(glsl, texture_sm_4_1)]\n") +SLANG_RAW("float __glsl_texture(TSampler s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"texture($0, $1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_1d_shadow(TSampler s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"texture($0, $1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_3d_array_shadow(TSampler s, TCoord value, float compare)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"texture($0, $1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_sm_4_1)]\n") +SLANG_RAW("float __glsl_texture_offset( TSampler s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureOffset($0, $1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_offset_1d_shadow(TSampler s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureOffset($0, $1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_sm_4_1)]\n") +SLANG_RAW("float __glsl_texture_level_zero(TSampler s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLod($0, $1, 0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_level_zero_1d_shadow(TSampler s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLod($0, $1, 0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_offset_level_zero(TSampler s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLodOffset($0, $1, 0, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_offset_level_zero_1d_shadow(TSampler s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLodOffset($0, $1, 0, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(glsl, texture_sm_4_1)]\n") +SLANG_RAW("float __glsl_texture(TTexture t, SamplerComparisonState s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"texture($p, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_1d_shadow(TTexture t, SamplerComparisonState s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"texture($p, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_3d_array_shadow(TTexture t, SamplerComparisonState s, TCoord value, float compare)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"texture($p, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(glsl, texture_sm_4_1)]\n") +SLANG_RAW("float __glsl_texture_offset(TTexture t,SamplerComparisonState s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureOffset($p, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_offset_1d_shadow(TTexture t,SamplerComparisonState s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureOffset($p, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(glsl, texture_sm_4_1)]\n") +SLANG_RAW("float __glsl_texture_level_zero(TTexture t,SamplerComparisonState s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLod($p, $2, 0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_level_zero_1d_shadow(TTexture t,SamplerComparisonState s, TCoord value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLod($p, $2, 0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_offset_level_zero(TTexture t,SamplerComparisonState s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLodOffset($p, $2, 0, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_texture_shadow_lod)\n") +SLANG_RAW("[require(glsl, texture_shadowlod)]\n") +SLANG_RAW("float __glsl_texture_offset_level_zero_1d_shadow(TTexture t,SamplerComparisonState s, TCoord value, constexpr TOffset offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"textureLodOffset($p, $2, 0, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" //@hidden:\n") +SLANG_RAW(" static const int access = 0;\n") +SLANG_RAW("\n") +SLANG_RAW(" //@public:\n") +SLANG_RAW(" typealias TextureCoord = vector;\n") +SLANG_RAW("\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_CombinedTextureSamplerGetTexture +) +SLANG_RAW(")\n") +SLANG_RAW(" _Texture __getTexture();\n") +SLANG_RAW("\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_CombinedTextureSamplerGetSampler +) +SLANG_RAW(")\n") +SLANG_RAW(" SamplerState __getSampler();\n") +SLANG_RAW("\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_CombinedTextureSamplerGetSampler +) +SLANG_RAW(")\n") +SLANG_RAW(" SamplerComparisonState __getComparisonSampler();\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv, texture_querylod)]\n") +SLANG_RAW(" float CalculateLevelOfDetail(TextureCoord location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().CalculateLevelOfDetail(__getSampler(), location);\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().CalculateLevelOfDetail(__getSampler(), location);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureQueryLod($0, $1).x\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return (spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float2 = OpImageQueryLod $this $location\n") +SLANG_RAW(" }).x;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv, texture_querylod)]\n") +SLANG_RAW(" float CalculateLevelOfDetailUnclamped(TextureCoord location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().CalculateLevelOfDetailUnclamped(__getSampler(), location);\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().CalculateLevelOfDetailUnclamped(__getSampler(), location);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureQueryLod($0, $1).y\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return (spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float2 = OpImageQueryLod $this $location\n") +SLANG_RAW(" }).y;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Samples the texture at the given location.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@param s The `SamplerState` to use for the sampling operation. This parameter is omitted when `this` is a combined texture sampler type (`isCombined == 0`).\n") +SLANG_RAW(" ///@param location The location to sample the texture at.\n") +SLANG_RAW(" ///@param offset Texel offset to apply.\n") +SLANG_RAW(" ///@param clamp The max level of detail to use.\n") +SLANG_RAW(" ///@param[out] status The result status of the operation.\n") +SLANG_RAW(" /// This parameter is currently only used when targeting HLSL.\n") +SLANG_RAW(" /// For other targets, the result status is always 0.\n") +SLANG_RAW(" ///@return The sampled texture value.\n") +SLANG_RAW(" ///@see `SampleBias`, `SampleLevel`, `SampleGrad`, `SampleCmp`, `SampleCmpLevelZero`.\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// The `Sample` function is defined for all read-only texture types, including\n") +SLANG_RAW(" /// `Texture1D`, `Texture2D`, `Texture3D`, `TextureCube`,\n") +SLANG_RAW(" /// `Texture1DArray`, `Texture2DArray` and `TextureCubeArray`.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// The function is not available for read-write texture types.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// For HLSL/D3D targets, the texture element type must be a scalar or vector of float or half types.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T Sample(vector location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctexture($0, $1)$z\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" if (isArray != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1DLayered<$T0>($0, ($1).x, int(($1).y))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2DLayered<$T0>($0, ($1).x, ($1).y, int(($1).z))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemapLayered<$T0>($0, ($1).x, ($1).y, ($1).z, int(($1).w))\";\n") +SLANG_RAW(" default: __intrinsic_asm \"invalid texture shape\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1D<$T0>($0, ($1))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2D<$T0>($0, ($1).x, ($1).y)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex3D<$T0>($0, ($1).x, ($1).y, ($1).z)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemap<$T0>($0, ($1).x, ($1).y, ($1).z)\";\n") +SLANG_RAW(" default: __intrinsic_asm \"invalid texture shape\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod $this $location None;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T Sample(vector location, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location, offset);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location, offset);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureOffsetClampARB($0, $1, $2)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod $this $location None|ConstOffset $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" __glsl_extension(GL_ARB_sparse_texture_clamp)\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv, texture_sm_4_1_clamp_fragment)]\n") +SLANG_RAW(" T Sample(vector location, vector offset, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location, offset, clamp);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().Sample(__getSampler(), location, offset, clamp);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureOffsetClampARB($0, $1, $2, $3)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod $this $location None|ConstOffset|MinLod $offset $clamp;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" T Sample(vector location, vector offset, float clamp, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" status = 0;\n") +SLANG_RAW(" return Sample(location, offset, clamp);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T SampleBias(vector location, float bias)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleBias(__getSampler(), location, bias);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleBias(__getSampler(), location, bias);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctexture($0, $1, $2)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod $this $location None|Bias $bias;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW("\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleBias(__getSampler(), location, bias);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T SampleBias(vector location, float bias, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleBias(__getSampler(), location, bias, offset);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleBias(__getSampler(), location, bias, offset);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureOffset($0, $1, $3, $2)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod $this $location None|Bias|ConstOffset $bias $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleBias(__getSampler(), location, bias, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmp(vector location, float compareValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_1d_shadow(this, __makeVector(__makeVector(location, 0.0), compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else if (Shape.dimensions == 3 && isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_3d_array_shadow(this, location, compareValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture(this, __makeVector(location, compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleCmp(__getComparisonSampler(), location, compareValue);\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleCmp(__getComparisonSampler(), location, compareValue);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefImplicitLod $this $location $compareValue;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleCmp(__getComparisonSampler(), location, compareValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmpLevelZero(vector location, float compareValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_level_zero_1d_shadow(this, __makeVector(__makeVector(location, 0.0), compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_level_zero(this, __makeVector(location, compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleCmpLevelZero(__getComparisonSampler(), location, compareValue);\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleCmpLevelZero(__getComparisonSampler(), location, compareValue);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const float zeroFloat = 0.0f;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefExplicitLod $this $location $compareValue Lod $zeroFloat;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleCmpLevelZero(__getComparisonSampler(), location, compareValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone] \n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmp(vector location, float compareValue, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset_1d_shadow(this, __makeVector(__makeVector(location, 0.0), compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset(this, __makeVector(location, compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleCmp(__getComparisonSampler(), location, compareValue, offset);\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleCmp(__getComparisonSampler(), location, compareValue, offset);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefImplicitLod $this $location $compareValue ConstOffset $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleCmp(__getComparisonSampler(), location, compareValue, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmpLevelZero(vector location, float compareValue, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset_level_zero_1d_shadow(this, __makeVector(__makeVector(location,0.0), compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset_level_zero(this, __makeVector(location, compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleCmpLevelZero(__getComparisonSampler(), location, compareValue, offset);\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleCmpLevelZero(__getComparisonSampler(), location, compareValue, offset);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const float zeroFloat = 0.0f;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefExplicitLod $this $location $compareValue Lod|ConstOffset $zeroFloat $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleCmpLevelZero(__getComparisonSampler(), location, compareValue, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleGrad(vector location, vector gradX, vector gradY)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureGrad($0, $1, $2, $3)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod $this $location None|Grad $gradX $gradY;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleGrad(vector location, vector gradX, vector gradY, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY, offset);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY, offset);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureGradOffset($0, $1, $2, $3, $4)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod $this $location None|Grad|ConstOffset $gradX $gradY $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" __glsl_extension(GL_ARB_sparse_texture_clamp)\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv, texture_sm_4_1_clamp_fragment)]\n") +SLANG_RAW(" T SampleGrad(vector location, vector gradX, vector gradY, constexpr vector offset, float lodClamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY, offset, lodClamp);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleGrad(__getSampler(), location, gradX, gradY, offset, lodClamp);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureGradOffsetClampARB($0, $1, $2, $3, $4, $5)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod $this $location None|Grad|ConstOffset|MinLod $gradX $gradY $offset $lodClamp;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleLevel(vector location, float level)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleLevel(__getSampler(), location, level);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleLevel(__getSampler(), location, level);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureLod($0, $1, $2)$z\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" if (isArray != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1DLayeredLod<$T0>($0, ($1).x, int(($1).y), ($2))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2DLayeredLod<$T0>($0, ($1).x, ($1).y, int(($1).z), ($2))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemapLayeredLod<$T0>($0, ($1).x, ($1).y, ($1).z, int(($1).w), ($2))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1DLod<$T0>($0, ($1), ($2))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2DLod<$T0>($0, ($1).x, ($1).y, ($2))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex3DLod<$T0>($0, ($1).x, ($1).y, ($1).z, ($2))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemapLod<$T0>($0, ($1).x, ($1).y, ($1).z, ($2))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod $this $location None|Lod $level;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleLevel(__getSampler(), location, level);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleLevel(vector location, float level, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" return __getTexture().SampleLevel(__getSampler(), location, level, offset);\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return __getTexture().SampleLevel(__getSampler(), location, level, offset);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureLodOffset($0, $1, $2, $3)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod $this $location None|Lod|ConstOffset $level $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __getTexture().SampleLevel(__getSampler(), location, level, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Non-combined texture types specific functions\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" typealias TextureCoord = vector;\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv, texture_querylod)]\n") +SLANG_RAW(" float CalculateLevelOfDetail(SamplerState s, TextureCoord location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".CalculateLevelOfDetail\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \".calculate_clamped_lod\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureQueryLod($p, $2).x\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return (spirv_asm {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" result:$$float2 = OpImageQueryLod %sampledImage $location;\n") +SLANG_RAW(" }).x;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv, texture_querylod)]\n") +SLANG_RAW(" float CalculateLevelOfDetailUnclamped(SamplerState s, TextureCoord location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".CalculateLevelOfDetailUnclamped\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \".calculate_unclamped_lod\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureQueryLod($p, $2).y\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return (spirv_asm {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" result:$$float2 = OpImageQueryLod %sampledImage $location;\n") +SLANG_RAW(" }).y;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T Sample(SamplerState s, vector location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).x, uint(($2).y))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xyz, uint(($2).w))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctexture($p, $2)$z\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" if (isArray != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1DLayered<$T0>($0, ($2).x, int(($2).y))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2DLayered<$T0>($0, ($2).x, ($2).y, int(($2).z))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemapLayered<$T0>($0, ($2).x, ($2).y, ($2).z, int(($2).w))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1D<$T0>($0, ($2))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2D<$T0>($0, ($2).x, ($2).y)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex3D<$T0>($0, ($2).x, ($2).y, ($2).z)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemap<$T0>($0, ($2).x, ($2).y, ($2).z)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod %sampledImage $location None;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, ($2).x, i32(($2).y))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, ($2).xy, i32(($2).z))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, ($2).xyz, i32(($2).w))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, $2)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T Sample(SamplerState s, vector location, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureOffset($p, $2, $3)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod %sampledImage $location None|ConstOffset $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, ($2).x, i32(($2).y), $3)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, ($2).xy, i32(($2).z), $3)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, ($2).xyz, i32(($2).w), $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSample($0, $1, $2, $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" __glsl_extension(GL_ARB_sparse_texture_clamp)\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T Sample(SamplerState s, vector location, constexpr vector offset, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), min_lod_clamp($4), $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, min_lod_clamp($4), $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureOffsetClampARB($p, $2, $3, $4)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod %sampledImage $location None|ConstOffset|MinLod $offset $clamp;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" T Sample(SamplerState s, vector location, constexpr vector offset, float clamp, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".Sample\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" status = 0;\n") +SLANG_RAW(" return Sample(s, location, offset, clamp);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T SampleBias(SamplerState s, vector location, float bias)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".SampleBias\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleBias\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), bias($3))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xyz, uint(($2).w), bias($3))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, bias($3))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctexture($p, $2, $3)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod %sampledImage $location None|Bias $bias;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, ($2).x, i32(($2).y), $3)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, ($2).xy, i32(($2).z), $3)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, ($2).xyz, i32(($2).w), $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, $2, $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0_fragment)]\n") +SLANG_RAW(" T SampleBias(SamplerState s, vector location, float bias, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".SampleBias\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleBias\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), bias($3), $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, bias($3), $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureOffset($p, $2, $4, $3)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleImplicitLod %sampledImage $location None|Bias|ConstOffset $bias $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, ($2).x, i32(($2).y), $3, $4)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, ($2).xy, i32(($2).z), $3, $4)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, ($2).xyz, i32(($2).w), $3, $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleBias($0, $1, $2, $3, $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmp(SamplerComparisonState s, vector location, float compareValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_1d_shadow(this, s, __makeVector(__makeVector(location, 0.0), compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else if (Shape.dimensions == 3 && isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_3d_array_shadow(this, s, location, compareValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture(this, s, __makeVector(location,compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleCmp\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, ($2).xy, uint(($2).z), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, ($2).xyz, uint(($2).w), $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \".sample_compare\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefImplicitLod %sampledImage $location $compareValue;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, ($2).x, i32(($2).y), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, ($2).xy, i32(($2).z), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, ($2).xyz, i32(($2).w), $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmpLevelZero(SamplerComparisonState s, vector location, float compareValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_level_zero_1d_shadow(this, s, __makeVector(__makeVector(location, 0.0), compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_level_zero(this, s, __makeVector(location,compareValue));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleCmpLevelZero\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, ($2).xy, uint(($2).z), $3, level(0))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, ($2).xyz, uint(($2).w), $3, level(0))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, $2, $3, level(0))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const float zeroFloat = 0.0f;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefExplicitLod %sampledImage $location $compareValue Lod $zeroFloat;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, ($2).x, i32(($2).y), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, ($2).xy, i32(($2).z), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, ($2).xyz, i32(($2).w), $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmp(SamplerComparisonState s, vector location, float compareValue, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset_1d_shadow(this, s, __makeVector(__makeVector(location, 0.0), compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset(this, s, __makeVector(location,compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleCmp\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, ($2).xy, uint(($2).z), $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \".sample_compare\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefImplicitLod %sampledImage $location $compareValue ConstOffset $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, ($2).x, i32(($2).y), $3, $4)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, ($2).xy, i32(($2).z), $3, $4)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, ($2).xyz, i32(($2).w), $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompare($0, $1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_shadowlod)]\n") +SLANG_RAW(" float SampleCmpLevelZero(SamplerComparisonState s, vector location, float compareValue, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (Shape.dimensions == 1 && isArray == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset_level_zero_1d_shadow(this, s, __makeVector(__makeVector(location,0.0),compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __glsl_texture_offset_level_zero(this, s, __makeVector(location,compareValue), offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleCmpLevelZero\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // T sample_compare(sampler s, float2 coord, uint array, float compare_value, lod_options options, int2 offset = int2(0)) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, ($2).xy, uint(($2).z), $3, level(0), $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // T sample_compare(sampler s, float2 coord, float compare_value, lod_options options, int2 offset = int2(0)) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample_compare($1, $2, $3, level(0), $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const float zeroFloat = 0.0f;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" result:$$float = OpImageSampleDrefExplicitLod %sampledImage $location $compareValue Lod|ConstOffset $zeroFloat $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, ($2).x, i32(($2).y), $3, $4)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, ($2).xy, i32(($2).z), $3, $4)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, ($2).xyz, i32(($2).w), $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleCompareLevel($0, $1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleGrad(SamplerState s, vector location, vector gradX, vector gradY)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".SampleGrad\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleGrad\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), gradient2d($3, $4))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xyz, uint(($2).w), gradientcube($3, $4))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, gradient2d($3, $4))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, gradient3d($3, $4))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, gradientcube($3, $4))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureGrad($p, $2, $3, $4)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod %sampledImage $location None|Grad $gradX $gradY;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, ($2).x, i32(($2).y), $3, $4)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, ($2).xy, i32(($2).z), $3, $4)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, ($2).xyz, i32(($2).w), $3, $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, $2, $3, $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleGrad(SamplerState s, vector location, vector gradX, vector gradY, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".SampleGrad\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleGrad\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), gradient2d($3, $4), $5)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, gradient2d($3, $4), $5)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, gradient3d($3, $4), $5)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureGradOffset($p, $2, $3, $4, $5)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod %sampledImage $location None|Grad|ConstOffset $gradX $gradY $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW("\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, ($2).x, i32(($2).y), $3, $4, $5)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, ($2).xy, i32(($2).z), $3, $4, $5)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, ($2).xyz, i32(($2).w), $3, $4, $5)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleGrad($0, $1, $2, $3, $4, $5)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" __glsl_extension(GL_ARB_sparse_texture_clamp)\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleGrad(SamplerState s, vector location, vector gradX, vector gradY, constexpr vector offset, float lodClamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".SampleGrad\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleGrad\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), gradient2d($3, $4), min_lod_clamp($6), $5)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, gradient2d($3, $4), min_lod_clamp($6), $5)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, gradient3d($3, $4), min_lod_clamp($6), $5)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureGradOffsetClampARB($p, $2, $3, $4, $5, $6)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod %sampledImage $location None|Grad|ConstOffset|MinLod $gradX $gradY $offset $lodClamp;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleLevel(SamplerState s, vector location, float level)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".SampleLevel\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleLevel\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).x, uint(($2).y))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), level($3))$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xyz, uint(($2).w), level($3))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, level($3))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureLod($p, $2, $3)$z\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" if (isArray != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1DLayeredLod<$T0>($0, ($2).x, int(($2).y), ($3))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2DLayeredLod<$T0>($0, ($2).x, ($2).y, int(($2).z), ($3))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemapLayeredLod<$T0>($0, ($2).x, ($2).y, ($2).z, int(($2).w), ($3))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex1DLod<$T0>($0, ($2), ($3))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex2DLod<$T0>($0, ($2).x, ($2).y, ($3))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"tex3DLod<$T0>($0, ($2).x, ($2).y, ($2).z, ($3))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"texCubemapLod<$T0>($0, ($2).x, ($2).y, ($2).z, ($3))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod %sampledImage $location None|Lod $level;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, ($2).x, i32(($2).y), $3)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, ($2).xy, i32(($2).z), $3)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, ($2).xyz, i32(($2).w), $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, $2, $3)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_0)]\n") +SLANG_RAW(" T SampleLevel(SamplerState s, vector location, float level, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".SampleLevel\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" || T is half || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"HLSL supports only float and half type textures\");\n") +SLANG_RAW(" __intrinsic_asm \".SampleLevel\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xy, uint(($2).z), level($3), $4)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, ($2).xyz, uint(($2).w), level($3), $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.sample($1, $2, level($3), $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$ctextureLodOffset($p, $2, $3, $4)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(this) = OpSampledImage $this $s;\n") +SLANG_RAW(" %sampled : __sampledType(T) = OpImageSampleExplicitLod %sampledImage $location None|Lod|ConstOffset $level $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(T is float || T is vector || T is vector || T is vector\n") +SLANG_RAW(" , \"WGSL supports only f32 type textures\");\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, ($2).x, i32(($2).y), $3, $4)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, ($2).xy, i32(($2).z), $3, $4)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, ($2).xyz, i32(($2).w), $3, $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureSampleLevel($0, $1, $2, $3, $4)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Texture.GetDimensions and Sampler.GetDimensions\n") + +const char* kTextureShapeTypeNames[] = { + "__Shape1D", "__Shape2D", "__Shape3D", "__ShapeCube"}; +for (int shapeIndex = 0; shapeIndex < 4; shapeIndex++) +for (int isArray = 0; isArray <= 1; isArray++) +for (int isMS = 0; isMS <= 1; isMS++) { + if (isMS) + { + if (shapeIndex != kCoreModule_ShapeIndex2D) + continue; + } + if (isArray) + { + if (shapeIndex == kCoreModule_ShapeIndex3D) + continue; + } + auto shapeTypeName = kTextureShapeTypeNames[shapeIndex]; + TextureTypeInfo textureTypeInfo(kBaseTextureShapes[shapeIndex], isArray, isMS, 0, sb, path); +SLANG_RAW("#line 2423 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") + + textureTypeInfo.writeGetDimensionFunctions(); + +SLANG_RAW("#line 2430 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +} +SLANG_RAW("#line 2435 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// Texture.GetSamplePosition(int s);\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_spirv, texture_sm_4_1_vertex_fragment_geometry)]\n") +SLANG_RAW(" float2 GetSamplePosition(int s);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_MakeArray +) +SLANG_RAW(")\n") +SLANG_RAW("Array __makeArray(T v0, T v1, T v2, T v3);\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// Beginning of Texture Gather\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_metal_spirv_wgsl, texture_gather)]\n") +SLANG_RAW("vector __texture_gather(\n") +SLANG_RAW(" _Texture texture,\n") +SLANG_RAW(" SamplerState s,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" int component)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($p, $2, $3)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" // Tv gather(sampler s, float2 coord, uint array, int2 offset = int2(0), component c = component::x) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather($1, ($2).xy, uint(($2).z), int2(0), metal::component($3))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" // Tv gather(sampler s, float3 coord, uint array, component c = component::x) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather($1, ($2).xyz, uint(($2).w), metal::component($3))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" if (Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(")\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Tv gather(sampler s, float3 coord, component c = component::x) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather($1, $2, metal::component($3))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // Tv gather(sampler s, float2 coord, int2 offset = int2(0), component c = component::x) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather($1, $2, int2(0), metal::component($3))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(texture) = OpSampledImage $texture $s;\n") +SLANG_RAW(" result:$$vector = OpImageGather %sampledImage $location $component;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // If depth texture, `textureGather` doesn't take channel value, `$3`.\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, ($2).xy, u32(($2).z))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, ($2).xyz, u32(($2).w))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($3, $0, $1, ($2).xy, u32(($2).z))\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($3, $0, $1, ($2).xyz, u32(($2).w))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($3, $0, $1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gather(\n") +SLANG_RAW(" _Texture sampler,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" int component)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, $2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpImageGather $sampler $location $component;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_metal_spirv_wgsl, texture_gather)]\n") +SLANG_RAW("vector __texture_gather_offset(\n") +SLANG_RAW(" _Texture texture,\n") +SLANG_RAW(" SamplerState s,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" constexpr vector offset,\n") +SLANG_RAW(" int component)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffset($p, $2, $3, $4)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Metal supports offset variant of Gather only for 2D textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Tv gather(sampler s, float2 coord, uint array, int2 offset = int2(0), component c = component::x) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather($1, ($2).xy, uint(($2).z), $3, metal::component($4))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // Tv gather(sampler s, float2 coord, int2 offset = int2(0), component c = component::x) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather($1, $2, $3, metal::component($4))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageGatherExtended;\n") +SLANG_RAW(" %sampledImage : __sampledImageType(texture) = OpSampledImage $texture $s;\n") +SLANG_RAW(" result:$$vector = OpImageGather %sampledImage $location $component Offset $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // If depth texture, `textureGather` doesn't take channel value, `$4`.\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, ($2).xy, u32(($2).z), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, ($2).xyz, u32(($2).w), $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($4, $0, $1, ($2).xy, u32(($2).z), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($4, $0, $1, ($2).xyz, u32(($2).w), $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($4, $0, $1, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gather_offset(\n") +SLANG_RAW(" _Texture sampler,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" constexpr vector offset,\n") +SLANG_RAW(" int component)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffset($0, $1, $2, $3)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageGatherExtended;\n") +SLANG_RAW(" result:$$vector = OpImageGather $sampler $location $component Offset $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gather_offsets(\n") +SLANG_RAW(" _Texture texture,\n") +SLANG_RAW(" SamplerState s,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" constexpr vector offset1,\n") +SLANG_RAW(" constexpr vector offset2,\n") +SLANG_RAW(" constexpr vector offset3,\n") +SLANG_RAW(" constexpr vector offset4,\n") +SLANG_RAW(" int component)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffsets($p, $2, $T3[]($3, $4, $5, $6)), $7\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let offsets = __makeArray(offset1,offset2,offset3,offset4);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageGatherExtended;\n") +SLANG_RAW(" %sampledImage : __sampledImageType(texture) = OpSampledImage $texture $s;\n") +SLANG_RAW(" result:$$vector = OpImageGather %sampledImage $location $component ConstOffsets $offsets;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gather_offsets(\n") +SLANG_RAW(" _Texture sampler,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" constexpr vector offset1,\n") +SLANG_RAW(" constexpr vector offset2,\n") +SLANG_RAW(" constexpr vector offset3,\n") +SLANG_RAW(" constexpr vector offset4,\n") +SLANG_RAW(" int component)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffsets($0, $1, $T2[]($2, $3, $4, $5), $6)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let offsets = __makeArray(offset1,offset2,offset3,offset4);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageGatherExtended;\n") +SLANG_RAW(" result:$$vector = OpImageGather $sampler $location $component ConstOffsets $offsets;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_metal_spirv_wgsl, texture_gather)]\n") +SLANG_RAW("vector __texture_gatherCmp(\n") +SLANG_RAW(" _Texture texture,\n") +SLANG_RAW(" SamplerComparisonState s,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" TElement compareValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($p, $2, $3)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" // Tv gather_compare(sampler s, float2 coord, uint array, float compare_value, int2 offset = int2(0)) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather_compare($1, ($2).xy, uint(($2).z), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" // Tv gather_compare(sampler s, float3 coord, uint array, float compare_value) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather_compare($1, ($2).xyz, uint(($2).w), $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // Tv gather_compare(sampler s, float2 coord, float compare_value, int2 offset = int2(0)) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather_compare($1, $2, $3)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(texture) = OpSampledImage $texture $s;\n") +SLANG_RAW(" result:$$vector = OpImageDrefGather %sampledImage $location $compareValue;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(isShadow == 1, \"WGSL supports textureGatherCompare only for depth textures.\");\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherCompare($0, $1, ($2).xy, u32(($2).z), $3)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherCompare($0, $1, ($2).xyz, u32(($2).w), $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherCompare($0, $1, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gatherCmp(\n") +SLANG_RAW(" _Texture sampler,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" TElement compareValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGather($0, $1, $2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpImageDrefGather $sampler $location $compareValue;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_metal_spirv_wgsl, texture_gather)]\n") +SLANG_RAW("vector __texture_gatherCmp_offset(\n") +SLANG_RAW(" _Texture texture,\n") +SLANG_RAW(" SamplerComparisonState s,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" TElement compareValue,\n") +SLANG_RAW(" constexpr vector offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffset($p, $2, $3, $4)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Metal supports depth compare Gather only for 2D texture\");\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Tv gather_compare(sampler s, float2 coord, uint array, float compare_value, int2 offset = int2(0)) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather_compare($1, ($2).xy, uint(($2).z), $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // Tv gather_compare(sampler s, float2 coord, float compare_value, int2 offset = int2(0)) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.gather_compare($1, $2, $3, $4)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %sampledImage : __sampledImageType(texture) = OpSampledImage $texture $s;\n") +SLANG_RAW(" result:$$vector = OpImageDrefGather %sampledImage $location $compareValue ConstOffset $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(isShadow == 1, \"WGSL supports textureGatherCompare only for depth textures.\");\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherCompare($0, $1, ($2).xy, u32(($2).z), $3, $4)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherCompare($0, $1, ($2).xyz, u32(($2).w), $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherCompare($0, $1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gatherCmp_offset(\n") +SLANG_RAW(" _Texture sampler,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" TElement compareValue,\n") +SLANG_RAW(" constexpr vector offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffset($0, $1, $2, $3)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpImageDrefGather $sampler $location $compareValue ConstOffset $offset;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gatherCmp_offsets(\n") +SLANG_RAW(" _Texture texture,\n") +SLANG_RAW(" SamplerComparisonState s,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" TElement compareValue,\n") +SLANG_RAW(" vector offset1,\n") +SLANG_RAW(" vector offset2,\n") +SLANG_RAW(" vector offset3,\n") +SLANG_RAW(" vector offset4)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffsets($p, $2, $3, $T4[]($4, $5, $6, $7))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let offsets = __makeArray(offset1,offset2,offset3,offset4);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageGatherExtended;\n") +SLANG_RAW(" %sampledImage : __sampledImageType(texture) = OpSampledImage $texture $s;\n") +SLANG_RAW(" result:$$vector = OpImageDrefGather %sampledImage $location $compareValue ConstOffsets $offsets;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, texture_gather)]\n") +SLANG_RAW("vector __texture_gatherCmp_offsets(\n") +SLANG_RAW(" _Texture sampler,\n") +SLANG_RAW(" vector location,\n") +SLANG_RAW(" TElement compareValue,\n") +SLANG_RAW(" vector offset1,\n") +SLANG_RAW(" vector offset2,\n") +SLANG_RAW(" vector offset3,\n") +SLANG_RAW(" vector offset4)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureGatherOffsets($0, $1, $2, $T3[]($3, $4, $5, $6))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let offsets = __makeArray(offset1,offset2,offset3,offset4);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageGatherExtended;\n") +SLANG_RAW(" result:$$vector = OpImageDrefGather $sampler $location $compareValue ConstOffsets $offsets;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +for (int isCombined = 0; isCombined < 2; isCombined++) +for (int isScalarTexture = 0; isScalarTexture < 2; isScalarTexture++) +{ + const char* extSizeParam = isScalarTexture ? "" : ", let N:int"; + const char* extTexType = isScalarTexture ? "T" : "vector"; + +SLANG_RAW("#line 2868 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("// Gather for [TextureType = ") +SLANG_SPLICE(extTexType +) +SLANG_RAW(", isCombined = ") +SLANG_SPLICE(isCombined +) +SLANG_RAW("]\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture<") +SLANG_SPLICE(extTexType +) +SLANG_RAW(",Shape,isArray,0,sampleCount,0,isShadow,") +SLANG_SPLICE(isCombined +) +SLANG_RAW(",format>\n") +SLANG_RAW("{\n") + + for (int isShadow = 0; isShadow < 2; isShadow++) + for (auto componentId = 0; componentId < 5; componentId++) + { + const char* compareFunc = isShadow ? "Cmp" : ""; + const char* compareParam = isShadow ? ", T compareValue" : ""; + const char* compareArg = isShadow ? ", compareValue" : ""; + + // Some targets support the combined texture natively + const char* samplerParam = isCombined ? "" : (isShadow ? "SamplerComparisonState s," : "SamplerState s,"); + const char* samplerArg = isCombined ? "" : ", s"; + const char* getTexture = isCombined ? "__getTexture()" : "this"; + const char* getSampler = isCombined ? (isShadow ? ", __getComparisonSampler()" : ", __getSampler()") : samplerArg; + + const char* componentFuncString[] = { "", "Red", "Green", "Blue", "Alpha"}; + const char* componentArgString[] = { ", 0", ", 0", ", 1", ", 2", ", 3" }; + const char* componentFunc = componentFuncString[componentId]; + const char* componentArg = (isShadow ? "" : componentArgString[componentId]); +SLANG_RAW("#line 2892 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_gather)]\n") +SLANG_RAW(" vector Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("(\n") +SLANG_RAW(" ") +SLANG_SPLICE(samplerParam +) +SLANG_RAW("\n") +SLANG_RAW(" vector location\n") +SLANG_RAW(" ") +SLANG_SPLICE(compareParam +) +SLANG_RAW(")\n") +SLANG_RAW(" {\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Gather is supported only for 2D and 3D textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __texture_gather") +SLANG_SPLICE(compareFunc +) +SLANG_RAW("(") +SLANG_SPLICE(getTexture +) +SLANG_RAW(" ") +SLANG_SPLICE(getSampler +) +SLANG_RAW(", location ") +SLANG_SPLICE(compareArg +) +SLANG_RAW(" ") +SLANG_SPLICE(componentArg +) +SLANG_RAW(");\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return __texture_gather") +SLANG_SPLICE(compareFunc +) +SLANG_RAW("(this ") +SLANG_SPLICE(samplerArg +) +SLANG_RAW(", location ") +SLANG_SPLICE(compareArg +) +SLANG_RAW(" ") +SLANG_SPLICE(componentArg +) +SLANG_RAW(");\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, texture_gather)]\n") +SLANG_RAW(" vector Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("(\n") +SLANG_RAW(" ") +SLANG_SPLICE(samplerParam +) +SLANG_RAW("\n") +SLANG_RAW(" vector location\n") +SLANG_RAW(" ") +SLANG_SPLICE(compareParam +) +SLANG_RAW(",\n") +SLANG_RAW(" out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Gather is supported only for 2D and 3D textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_metal_spirv_wgsl, texture_gather)]\n") +SLANG_RAW(" vector Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("(\n") +SLANG_RAW(" ") +SLANG_SPLICE(samplerParam +) +SLANG_RAW("\n") +SLANG_RAW(" vector location\n") +SLANG_RAW(" ") +SLANG_SPLICE(compareParam +) +SLANG_RAW(",\n") +SLANG_RAW(" constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Gather is supported only for 2D and 3D textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return __texture_gather") +SLANG_SPLICE(compareFunc +) +SLANG_RAW("_offset(") +SLANG_SPLICE(getTexture +) +SLANG_RAW(" ") +SLANG_SPLICE(getSampler +) +SLANG_RAW(", location ") +SLANG_SPLICE(compareArg +) +SLANG_RAW(", offset ") +SLANG_SPLICE(componentArg +) +SLANG_RAW(");\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return __texture_gather") +SLANG_SPLICE(compareFunc +) +SLANG_RAW("_offset(this ") +SLANG_SPLICE(samplerArg +) +SLANG_RAW(", location ") +SLANG_SPLICE(compareArg +) +SLANG_RAW(", offset ") +SLANG_SPLICE(componentArg +) +SLANG_RAW(");\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, texture_gather)]\n") +SLANG_RAW(" vector Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("(\n") +SLANG_RAW(" ") +SLANG_SPLICE(samplerParam +) +SLANG_RAW("\n") +SLANG_RAW(" vector location\n") +SLANG_RAW(" ") +SLANG_SPLICE(compareParam +) +SLANG_RAW(",\n") +SLANG_RAW(" constexpr vector offset,\n") +SLANG_RAW(" out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Gather is supported only for 2D and 3D textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, texture_gather)]\n") +SLANG_RAW(" vector Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("(\n") +SLANG_RAW(" ") +SLANG_SPLICE(samplerParam +) +SLANG_RAW("\n") +SLANG_RAW(" vector location\n") +SLANG_RAW(" ") +SLANG_SPLICE(compareParam +) +SLANG_RAW(",\n") +SLANG_RAW(" constexpr vector offset1,\n") +SLANG_RAW(" constexpr vector offset2,\n") +SLANG_RAW(" constexpr vector offset3,\n") +SLANG_RAW(" constexpr vector offset4)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Gather is supported only for 2D and 3D textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return __texture_gather") +SLANG_SPLICE(compareFunc +) +SLANG_RAW("_offsets(this ") +SLANG_SPLICE(samplerArg +) +SLANG_RAW(", location ") +SLANG_SPLICE(compareArg +) +SLANG_RAW(", offset1,offset2,offset3,offset4 ") +SLANG_SPLICE(componentArg +) +SLANG_RAW(");\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, texture_gather)]\n") +SLANG_RAW(" vector Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("(\n") +SLANG_RAW(" ") +SLANG_SPLICE(samplerParam +) +SLANG_RAW("\n") +SLANG_RAW(" vector location\n") +SLANG_RAW(" ") +SLANG_SPLICE(compareParam +) +SLANG_RAW(",\n") +SLANG_RAW(" constexpr vector offset1,\n") +SLANG_RAW(" constexpr vector offset2,\n") +SLANG_RAW(" constexpr vector offset3,\n") +SLANG_RAW(" constexpr vector offset4,\n") +SLANG_RAW(" out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(",\n") +SLANG_RAW(" \"Gather is supported only for 2D and 3D textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Gather") +SLANG_SPLICE(compareFunc +) +SLANG_SPLICE(componentFunc +) +SLANG_RAW("\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + } // for (componentId) +SLANG_RAW("#line 3019 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("} // End of: Gather for [TextureType = ") +SLANG_SPLICE(extTexType +) +SLANG_RAW(", isCombined = ") +SLANG_SPLICE(isCombined +) +SLANG_RAW("]\n") +SLANG_RAW("\n") + +} // for (isScalarTexture) +SLANG_RAW("#line 3024 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("// End of all Texture Gather\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// Load/Subscript for readonly, no MS textures\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW(" static const int isMS = 0;\n") +SLANG_RAW(" static const int access = ") +SLANG_SPLICE(kCoreModule_ResourceAccessReadOnly +) +SLANG_RAW(";\n") +SLANG_RAW("//@public:\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" T Load(vector location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" // lod is not supported for 1D texture\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Tv read(uint coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(uint(($1).x), uint(($1).y))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(uint(($1).x))$z\";\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // T read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), uint(($1).w))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // T read(uint2 coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Tv read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), uint(($1).w))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint2 coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" if (isShadow == 0 && isArray == 0)\n") +SLANG_RAW(" // Tv read(uint3 coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xyz), uint(($1).w))$z\";\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" static_assert(isArray == 0, \"Unsupported 'Load' of 'texture cube array' for 'metal' target\");\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // T read(uint2 coord, uint face, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // T read(uint2 coord, uint face, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), uint(($1).w))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Tv read(uint2 coord, uint face, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint2 coord, uint face, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), uint(($1).w))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" static_assert(false, \"Unsupported 'Load' of 'texture' for 'metal' target\");\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (isCombined == 0)\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" __intrinsic_asm \"$ctexelFetch($0, ($1).$w1b, ($1).$w1e)$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const int lodLoc = Shape.dimensions+isArray;\n") +SLANG_RAW(" let coord = __vectorReshape(location);\n") +SLANG_RAW(" let lod = location[lodLoc];\n") +SLANG_RAW(" if (isCombined != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %image:__imageType(this) = OpImage $this;\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch %image $coord Lod $lod;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch $this $coord Lod $lod;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW("\n") +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL doesn't supports textureLoad for Cube texture.\");\n") +SLANG_RAW(" static_assert(isArray == 0 || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL supports textureLoad for texture_2d_array but not for array of 1D, 3D or Cube.\");\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, ($1).xy, i32(($1).z), ($1).w)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, ($1).x, ($1).y)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, ($1).xy, ($1).z)$z\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, ($1).xyz, ($1).w)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return T();\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_spirv, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" T Load(vector location, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (isCombined == 0)\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" __intrinsic_asm \"$ctexelFetchOffset($0, ($1).$w1b, ($1).$w1e, ($2))$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const int lodLoc = Shape.dimensions+isArray;\n") +SLANG_RAW(" let coord = __vectorReshape(location);\n") +SLANG_RAW(" let lod = location[lodLoc];\n") +SLANG_RAW(" if (isCombined != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %image:__imageType(this) = OpImage $this;\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch %image $coord Lod|ConstOffset $lod $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch $this $coord Lod|ConstOffset $lod $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" T Load(vector location, constexpr vector offset, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" status = 0;\n") +SLANG_RAW(" return Load(location, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __subscript(vector location) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".operator[]\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" return Load(__makeVector(location, 0));\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (isCombined == 0)\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW("\n") +SLANG_RAW(" return Load(__makeVector(location, 0));\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (isCombined != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %image:__imageType(this) = OpImage $this;\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch %image $location;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch $this $location;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return Load(__makeVector(location, 0));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Texture Load/Subscript for readonly, MS textures\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW(" static const int access = ") +SLANG_SPLICE(kCoreModule_ResourceAccessReadOnly +) +SLANG_RAW(";\n") +SLANG_RAW(" static const int isMS = 1;\n") +SLANG_RAW("//@public:\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" T Load(vector location, int sampleIndex)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Document seems to have a typo. `lod` must be `sample`.\n") +SLANG_RAW(" // Tv read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(($1).xy, ($1).z, uint($2))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // T read(uint2 coord, uint sample) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read($1, uint($2))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Document seems to have a typo. `lod` must be `sample`.\n") +SLANG_RAW(" // Tv read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(($1).xy, ($1).z, uint($2))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint2 coord, uint sample) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read($1, uint($2))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (isCombined == 0)\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" __intrinsic_asm \"$ctexelFetch($0, $1, ($2))$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (isCombined != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %image:__imageType(this) = OpImage $this;\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch %image $location Sample $sampleIndex;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch $this $location Sample $sampleIndex;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL supports textureLoad for texture_multisampled_2d but not for multisampled of 1D, 3D or Cube.\");\n") +SLANG_RAW(" static_assert(isArray == 0\n") +SLANG_RAW(" , \"WGSL doesn't support array variants of multisampled textures for textureLoad.\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, $1, $2)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_spirv_wgsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" T Load(vector locationAndSampleIndex)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return Load(__vectorReshape(locationAndSampleIndex), locationAndSampleIndex[Shape.dimensions + isArray]);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_spirv, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" T Load(vector location, int sampleIndex, constexpr vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (isCombined == 0)\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" __intrinsic_asm \"$ctexelFetchOffset($0, $1, ($2), ($3))$z\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (isCombined != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %image:__imageType(this) = OpImage $this;\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch %image $location ConstOffset|Sample $offset $sampleIndex;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageFetch $this $location ConstOffset|Sample $offset $sampleIndex;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" T Load(vector location, int sampleIndex, constexpr vector offset, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" status = 0;\n") +SLANG_RAW(" return Load(location, sampleIndex, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __subscript(vector location) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"($0).sample[$1]\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return Load(location, 0);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (isCombined == 0)\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" return Load(location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __subscript(vector location, int sampleIndex) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1_samplerless)]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"($0).sample[$2][$1]\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return Load(location, sampleIndex);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (isCombined == 0)\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" return Load(location, sampleIndex);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Load/Subscript for readwrite textures\n") + + for (int access = kCoreModule_ResourceAccessReadWrite; access <= kCoreModule_ResourceAccessRasterizerOrdered; access++) { + const char* glslIntrinsic = "$cimageLoad($0, $1)$z"; + const char* glslIntrinsicOffset = "$cimageLoad($0, ($1)+($2))$z"; + const char* glslIntrinsicMS = "$cimageLoad($0, $1, $2)$z"; + const char* glslIntrinsicMSOffset = "$cimageLoad($0, ($1)+($3), $2)$z"; +SLANG_RAW("#line 3449 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") + + if (access != kCoreModule_ResourceAccessWriteOnly) + { + +SLANG_RAW("#line 3457 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1)]\n") +SLANG_RAW(" T Load(vector location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"") +SLANG_SPLICE(glslIntrinsic +) +SLANG_RAW("\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" if (isArray != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf1DLayeredread$C<$T0>($0, ($1).x * $E, ($1).y, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf2DLayeredread$C<$T0>($0, ($1).x * $E, ($1).y, ($1).z, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf3DLayeredread$C<$T0>($0, ($1).x * $E, ($1).y, ($1).z, ($1).w, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf1Dread$C<$T0>($0, ($1) * $E, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf2Dread$C<$T0>($0, ($1).x * $E, ($1).y, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf3Dread$C<$T0>($0, ($1).x * $E, ($1).y, ($1).z, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageRead $this $location;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" // lod is not supported for 1D texture\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Tv read(uint coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(uint(($1).x), uint(($1).y))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(uint($1))$z\";\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // T read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // T read(uint2 coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Tv read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z))$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint2 coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" if (isShadow == 0 && isArray == 0)\n") +SLANG_RAW(" // Tv read(uint3 coord, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xyz))$z\";\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_CUBE +) +SLANG_RAW(":\n") +SLANG_RAW(" static_assert(isArray == 0, \"Unsupported 'Load' of 'texture cube array' for 'metal' target\");\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // T read(uint2 coord, uint face, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.read(vec(($1).xy), uint(($1).z)%6, uint(($1).z)/6, uint(($1).w))\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // T read(uint2 coord, uint face, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), uint(($1).w))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // Tv read(uint2 coord, uint face, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$0.read(vec(($1).xy), uint(($1).z)%6, uint(($1).z)/6, uint(($1).w))\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint2 coord, uint face, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), uint(($1).w))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" static_assert(false, \"Unsupported 'Load' of 'texture' for 'metal' target\");\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW("\n") +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL doesn't supports textureLoad for Cube texture.\");\n") +SLANG_RAW(" static_assert(isArray == 0 || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL supports textureLoad for 2d_array but not for array of 1D, 3D or Cube.\");\n") +SLANG_RAW(" static_assert(isShadow == 0 || T is float\n") +SLANG_RAW(" , \"WGSL supports only f32 depth textures\");\n") +SLANG_RAW("\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, ($1).xy, i32(($1).z))$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, $1)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_spirv, texture_sm_4_1)]\n") +SLANG_RAW(" T Load(vector location, vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"") +SLANG_SPLICE(glslIntrinsicOffset +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageRead $this $location ConstOffset $offset;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" T Load(vector location, vector offset, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" status = 0;\n") +SLANG_RAW(" return Load(location, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + } // if (access != kCoreModule_ResourceAccessWriteOnly) + +SLANG_RAW("#line 3626 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW(" [require(glsl, texture_sm_4_1)]\n") +SLANG_RAW(" void __glslImageStore(vector location, T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __intrinsic_asm \"imageStore($0, $1, $V2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" \n") +SLANG_RAW(" [require(metal, texture_sm_4_1)]\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_ImageStore +) +SLANG_RAW(")\n") +SLANG_RAW(" static void __metalImageStoreArray(This val, vector location, T value, uint arrayIndex);\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(metal, texture_sm_4_1)]\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_ImageStore +) +SLANG_RAW(")\n") +SLANG_RAW(" static void __metalImageStore(This val, vector location, T value);\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void Store(vector location, T newValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".operator[]\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __glslImageStore(location, newValue);\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" if (isArray != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf1DLayeredwrite$C<$T0>($2, $0, ($1).x * $E, ($1).y, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf2DLayeredwrite$C<$T0>($2, $0, ($1).x * $E, ($1).y, ($1).z, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf3DLayeredwrite$C<$T0>($2, $0, ($1).x * $E, ($1).y, ($1).z, ($1).w, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" switch(Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf1Dwrite$C<$T0>($2, $0, ($1) * $E, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf2Dwrite$C<$T0>($2, $0, ($1).x * $E, ($1).y, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW(":\n") +SLANG_RAW(" __intrinsic_asm \"surf3Dwrite$C<$T0>($2, $0, ($1).x * $E, ($1).y, ($1).z, SLANG_CUDA_BOUNDARY_MODE)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpImageWrite $this $location __convertTexel(newValue);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" if (isArray != 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // last arg will be replaced with the split off array index\n") +SLANG_RAW(" __metalImageStoreArray(this, __vectorReshape(location), newValue, location[Shape.dimensions + isArray - 1]);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __metalImageStore(this, location, newValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_1D +) +SLANG_RAW("\n") +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_3D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL doesn't supports textureStore for Cube texture.\");\n") +SLANG_RAW(" static_assert(isArray == 0 || Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL supports textureStore for texture_store_2d_array but not for array of 1D, 3D or Cube.\");\n") +SLANG_RAW("\n") +SLANG_RAW(" // WGSL requires the value type to be always `vec4`\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (T is int32_t || T is int16_t || T is int8_t) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($V2)\";\n") +SLANG_RAW(" if (T is int32_t2 || T is int16_t2 || T is int8_t2) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is int32_t3 || T is int16_t3 || T is int8_t3) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 1))\";\n") +SLANG_RAW(" if (T is uint32_t || T is uint16_t || T is uint8_t) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 0, 0, 1))\";\n") +SLANG_RAW(" if (T is uint32_t2 || T is uint16_t2 || T is uint8_t2) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is uint32_t3 || T is uint16_t3 || T is uint8_t3) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 1))\";\n") +SLANG_RAW(" if (T is half) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 0, 0, 1))\";\n") +SLANG_RAW(" if (T is half2) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is half3) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 1))\";\n") +SLANG_RAW(" if (T is float) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 0, 0, 1))\";\n") +SLANG_RAW(" if (T is float2) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is float3) __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), vec4($2, 1))\";\n") +SLANG_RAW(" __intrinsic_asm \"textureStore($0, ($1).xy, i32(($1).z), $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" if (T is int32_t || T is int16_t || T is int8_t) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 0, 1))\";\n") +SLANG_RAW(" if (T is int32_t2 || T is int16_t2 || T is int8_t2) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is int32_t3 || T is int16_t3 || T is int8_t3) __intrinsic_asm \"textureStore($0, $1, vec4($2, 1))\";\n") +SLANG_RAW(" if (T is uint32_t || T is uint16_t || T is uint8_t) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 0, 1))\";\n") +SLANG_RAW(" if (T is uint32_t2 || T is uint16_t2 || T is uint8_t2) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is uint32_t3 || T is uint16_t3 || T is uint8_t3) __intrinsic_asm \"textureStore($0, $1, vec4($2, 1))\";\n") +SLANG_RAW(" if (T is half) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 0, 1))\";\n") +SLANG_RAW(" if (T is half2) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is half3) __intrinsic_asm \"textureStore($0, $1, vec4($2, 1))\";\n") +SLANG_RAW(" if (T is float) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 0, 1))\";\n") +SLANG_RAW(" if (T is float2) __intrinsic_asm \"textureStore($0, $1, vec4($2, 0, 1))\";\n") +SLANG_RAW(" if (T is float3) __intrinsic_asm \"textureStore($0, $1, vec4($2, 1))\";\n") +SLANG_RAW(" __intrinsic_asm \"textureStore($0, $1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + if (access != kCoreModule_ResourceAccessWriteOnly) + { + +SLANG_RAW("#line 3739 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW(" __subscript(vector location) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1)]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".operator[]\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return Load(location);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [nonmutating]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1)]\n") +SLANG_RAW(" set(T newValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" Store(location, newValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // If a 'Texture[location]' is referred to by a '__ref', call 'kIROp_ImageSubscript(location)'.\n") +SLANG_RAW(" // This allows call's to stay aware that the input is from a 'Texture'.\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_ImageSubscript +) +SLANG_RAW(") \n") +SLANG_RAW(" ref;\n") +SLANG_RAW(" }\n") + + } // if (access != kCoreModule_ResourceAccessWriteOnly) + +SLANG_RAW("#line 3776 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +if (access == kCoreModule_ResourceAccessReadWrite) { +SLANG_RAW("#line 3782 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// RW MS textures.\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_metal_spirv_wgsl, texture_sm_4_1_compute_fragment)]\n") +SLANG_RAW(" T Load(vector location, int sampleIndex)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" switch (Shape.flavor)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW(":\n") +SLANG_RAW(" if (isShadow == 1)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // The document seems to have a typo. `lod` must mean `sample`.\n") +SLANG_RAW(" // Tv read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), $2)$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // T read(uint2 coord, uint sample) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), $2)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (isArray == 1)\n") +SLANG_RAW(" // The document seems to have a typo. `lod` must mean `sample`.\n") +SLANG_RAW(" // Tv read(uint2 coord, uint array, uint lod = 0) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), uint(($1).z), $2)$z\";\n") +SLANG_RAW(" else\n") +SLANG_RAW(" // Tv read(uint2 coord, uint sample) const\n") +SLANG_RAW(" __intrinsic_asm \"$c$0.read(vec(($1).xy), $2)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" break;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // TODO: This needs to be handled by the capability system\n") +SLANG_RAW(" __intrinsic_asm \"\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"") +SLANG_SPLICE(glslIntrinsicMS +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageRead $this $location Sample $sampleIndex;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" static_assert(Shape.flavor == ") +SLANG_SPLICE(SLANG_TEXTURE_2D +) +SLANG_RAW("\n") +SLANG_RAW(" , \"WGSL supports textureLoad for texture_multisampled_2d but not for multisampled of 1D, 3D or Cube.\");\n") +SLANG_RAW(" static_assert(isArray == 0\n") +SLANG_RAW(" , \"WGSL doesn't support array variants of multisampled textures for textureLoad.\");\n") +SLANG_RAW("\n") +SLANG_RAW(" __intrinsic_asm \"textureLoad($0, $1, $2)$z\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_spirv, texture_sm_4_1_compute_fragment)]\n") +SLANG_RAW(" T Load(vector location, int sampleIndex, vector offset)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"") +SLANG_SPLICE(glslIntrinsicMSOffset +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %sampled:__sampledType(T) = OpImageRead $this $location ConstOffset|Sample $offset $sampleIndex;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" T Load(vector location, int sampleIndex, vector offset, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \".Load\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" status = 0;\n") +SLANG_RAW(" return Load(location, sampleIndex, offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(glsl, texture_sm_4_1_compute_fragment)]\n") +SLANG_RAW(" void __glslImageStore(vector location, int sampleIndex, T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __intrinsic_asm \"imageStore($0, $1, $2, $V3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __subscript(vector location, int sampleIndex) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_spirv_wgsl, texture_sm_4_1_compute_fragment)]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample[$2][$1]\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return Load(location, sampleIndex);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [nonmutating]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_glsl_hlsl_spirv, texture_sm_4_1_compute_fragment)]\n") +SLANG_RAW(" set(T newValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0.sample[$2][$1]\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __glslImageStore(location, sampleIndex, newValue);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpImageWrite $this $location __convertTexel(newValue) Sample $sampleIndex;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // If a 'Texture[location, sampleIndex]' is referred to by a '__ref', call 'kIROp_ImageSubscript(location, sampleIndex)'.\n") +SLANG_RAW(" // This allows call's to stay aware that the input is from a 'Texture'.\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_ImageSubscript +) +SLANG_RAW(")\n") +SLANG_RAW(" ref;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +} // if (access == kCoreModule_ResourceAccessReadWrite) // for RW MS textures. +} // for (access). +SLANG_RAW("#line 3936 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// Definitions to support the legacy texture .mips[][] operator.\n") +SLANG_RAW("struct __TextureMip\n") +SLANG_RAW("{\n") +SLANG_RAW(" _Texture tex;\n") +SLANG_RAW(" int mip;\n") +SLANG_RAW(" __subscript(vector pos)->T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get { return tex.Load(__makeVector(pos, mip)); }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("struct __TextureMips\n") +SLANG_RAW("{\n") +SLANG_RAW(" _Texture tex;\n") +SLANG_RAW(" __subscript(int mip)->__TextureMip\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get { return { tex, mip }; }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" property __TextureMips mips\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get { return { this }; }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Definitions to support the .sample[][] operator.\n") +SLANG_RAW("struct __TextureSample\n") +SLANG_RAW("{\n") +SLANG_RAW(" _Texture tex;\n") +SLANG_RAW(" int sample;\n") +SLANG_RAW(" __subscript(vector pos)->T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get { return tex[pos, sample]; }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("struct __TextureSampleMS\n") +SLANG_RAW("{\n") +SLANG_RAW(" _Texture tex;\n") +SLANG_RAW(" __subscript(int sample)->__TextureSample\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get { return { tex, sample }; }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" property __TextureSampleMS sample\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get { return { this }; }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("\n") +SLANG_RAW("// Texture type aliases.\n") +SLANG_RAW("// T, Shape: __ITextureShape, let isArray:int, let isMS:int, let sampleCount:int, let access:int, let isShadow:int, let isCombined:int, let format:int\n") + + const char* shapeTypeNames[] = {"1D", "2D", "3D", "Cube"}; + const char* accessPrefix[] = {"", "RW", "W", "RasterizerOrdered", "Feedback"}; + const char* accessDocumentation[] = {"read-only", "read-write", "write-only", "rasterizer-ordered", "feedback"}; + const char* arrayPostFix[] = {"", "Array"}; + const char* msPostFix[] = {"", "MS"}; + for (int shape = 0; shape < 4; shape++) + for (int isArray = 0; isArray<=1; isArray++) + for (int isMS = 0; isMS<=1; isMS++) + for (int isCombined = 0; isCombined<=1; isCombined++) + for (int access = kCoreModule_ResourceAccessReadOnly; access <= kCoreModule_ResourceAccessFeedback; access++) { + if (access != kCoreModule_ResourceAccessReadOnly) + { + // No RW Cube. + if (shape == kCoreModule_ShapeIndexCube) continue; + } + if (access == kCoreModule_ResourceAccessFeedback) + { + // Feedback only defined for Texture2D and Texture2DArray. + if (shape != 1) continue; + if (isMS) continue; + if (isCombined) continue; + } + if (isMS) + { + // Only Texture2DMS. + if (shape != kCoreModule_ShapeIndex2D) + continue; + // Only Texture2DMS or RWTexture2DMS. + if (access >= kCoreModule_ShapeIndex3D) + continue; + } + // No 3D Array. + if (shape == kCoreModule_ShapeIndex3D && isArray == 1) + continue; + const char* textureTypeName = isCombined ? "Sampler" : "Texture"; +SLANG_RAW("#line 4043 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("/// Represents a handle to a ") +SLANG_SPLICE(accessDocumentation[access] +) +SLANG_SPLICE(isMS?", multisampled": "" +) +SLANG_RAW(" ") +SLANG_SPLICE(shapeTypeNames[shape] +) +SLANG_RAW(" ") +SLANG_SPLICE(isCombined?"combined texture-sampler": "texture" +) +SLANG_SPLICE(isArray?" array":"" +) +SLANG_RAW(".\n") +SLANG_RAW("/// @param T The texel type of the texture.\n") +SLANG_RAW("/// @param sampleCount The number of samples in the texture, when the texture is multisampled.\n") +SLANG_RAW("/// @param format The storage format of the texture.\n") +SLANG_RAW("/// @see Please refer to `_Texture` for more information about texture types.\n") +SLANG_RAW("/// @category texture_types\n") +SLANG_RAW("typealias ") +SLANG_SPLICE(accessPrefix[access] +) +SLANG_SPLICE(textureTypeName +) +SLANG_SPLICE(shapeTypeNames[shape] +) +SLANG_SPLICE(msPostFix[isMS] +) +SLANG_SPLICE(arrayPostFix[isArray] +) +SLANG_RAW(" = _Texture;\n") + +} +SLANG_RAW("#line 4053 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// Atomic intrinsic insts.\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicExchange +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_exchange(__ref T val, T newValue, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicCompareExchange +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_compare_exchange(\n") +SLANG_RAW(" __ref T val,\n") +SLANG_RAW(" T compareValue,\n") +SLANG_RAW(" T newValue,\n") +SLANG_RAW(" MemoryOrder successOrder = MemoryOrder.Relaxed,\n") +SLANG_RAW(" MemoryOrder failOrder = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicAdd +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_add(__ref T val, T value, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicSub +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_sub(__ref T val, T value, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicMax +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_max(__ref T val, T value, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicMin +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_min(__ref T val, T value, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicAnd +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_and(__ref T val, T value, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicOr +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_or(__ref T val, T value, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicXor +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_xor(__ref T val, T value, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicInc +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_increment(__ref T val, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_AtomicDec +) +SLANG_RAW(")\n") +SLANG_RAW("T __atomic_decrement(__ref T val, MemoryOrder order = MemoryOrder.Relaxed);\n") +SLANG_RAW("\n") +SLANG_RAW("// Conversion between uint64_t and uint2\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, sm_4_0_version)]\n") +SLANG_RAW("uint2 __asuint2(uint64_t i)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return uint2(uint(i), uint(uint64_t(i) >> 32));\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, sm_4_0_version)]\n") +SLANG_RAW("uint64_t __asuint64(uint2 i)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return (uint64_t(i.y) << 32) | i.x;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ByteAddressBufferLoad +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, byteaddressbuffer)]\n") +SLANG_RAW("T __byteAddressBufferLoad(ByteAddressBuffer buffer, int offset, int alignment);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ByteAddressBufferLoad +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, byteaddressbuffer_rw)]\n") +SLANG_RAW("T __byteAddressBufferLoad(RWByteAddressBuffer buffer, int offset, int alignment);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ByteAddressBufferLoad +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, byteaddressbuffer_rw)]\n") +SLANG_RAW("T __byteAddressBufferLoad(RasterizerOrderedByteAddressBuffer buffer, int offset, int alignment);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ByteAddressBufferStore +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW("void __byteAddressBufferStore(RWByteAddressBuffer buffer, int offset, int alignment, T value);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ByteAddressBufferStore +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW("void __byteAddressBufferStore(RasterizerOrderedByteAddressBuffer buffer, int offset, int alignment, T value);\n") +SLANG_RAW("\n") +SLANG_RAW("/**\n") +SLANG_RAW("Represents an opaque handle to a read-only structured buffer allocated in global memory.\n") +SLANG_RAW("A structured buffer can be viewed as an array of the specified element type.\n") +SLANG_RAW("@param T The element type of the buffer.\n") +SLANG_RAW("@param L The memory layout of the buffer.\n") +SLANG_RAW("@remarks\n") +SLANG_RAW("The `L` generic parameter is used to specify the memory layout of the buffer when\n") +SLANG_RAW("generating SPIRV.\n") +SLANG_RAW("`L` must be one of `DefaultDataLayout`, `Std140DataLayout`, `Std430DataLayout` or `ScalarDataLayout`.\n") +SLANG_RAW("The default value is `DefaultDataLayout`.\n") +SLANG_RAW("When generating code for other targets, this parameter is ignored and has no effect on the generated code.\n") +SLANG_RAW("@see `RWStructuredBuffer`, `AppendStructuredBuffer`, `ConsumeStructuredBuffer`, `RasterizerOrderedStructuredBuffer`.\n") +SLANG_RAW("@category buffer_types Buffer types\n") +SLANG_RAW("**/\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__magic_type(HLSLStructuredBufferType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLStructuredBufferType +) +SLANG_RAW(")\n") +SLANG_RAW("struct StructuredBuffer\n") +SLANG_RAW("{\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Get the dimensions of the buffer.\n") +SLANG_RAW(" /// @param numStructs The number of structures in the buffer.\n") +SLANG_RAW(" /// @param stride The stride, in bytes, of each structure element.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void GetDimensions(\n") +SLANG_RAW(" out uint numStructs,\n") +SLANG_RAW(" out uint stride)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let rs = __structuredBufferGetDimensions(this);\n") +SLANG_RAW(" numStructs = rs.x;\n") +SLANG_RAW(" stride = rs.y;\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load a element from the buffer at the specified location.\n") +SLANG_RAW(" /// @param TIndex Type of the index.\n") +SLANG_RAW(" /// @param location The index of buffer.\n") +SLANG_RAW(" /// @param[out] status The status of the operation.\n") +SLANG_RAW(" /// @return The element at the specified index.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// @remarks\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferLoad +) +SLANG_RAW(")\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, structuredbuffer)]\n") +SLANG_RAW(" T Load(TIndex location);\n") +SLANG_RAW("\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferLoadStatus +) +SLANG_RAW(")\n") +SLANG_RAW(" [require(hlsl, structuredbuffer)]\n") +SLANG_RAW(" T Load(TIndex location, out uint status);\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load a element from the buffer at the specified location.\n") +SLANG_RAW(" /// @param TIndex Type of the index.\n") +SLANG_RAW(" /// @param index The index of buffer.\n") +SLANG_RAW(" /// @return The element at the specified index.\n") +SLANG_RAW(" __generic\n") +SLANG_RAW(" __subscript(TIndex index) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferLoad +) +SLANG_RAW(")\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_spirv, structuredbuffer)]\n") +SLANG_RAW(" get;\n") +SLANG_RAW(" };\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("/**\n") +SLANG_RAW("Represents an opaque handle to a consume structured buffer allocated in global memory.\n") +SLANG_RAW("A structured buffer can be viewed as an array of the specified element type.\n") +SLANG_RAW("An append structure buffer internally maintains an atomic counter to keep track of the number of elements in the buffer,\n") +SLANG_RAW("and provide an atomic operation to append a new element to the buffer.\n") +SLANG_RAW("@param T The element type of the buffer.\n") +SLANG_RAW("@param L The memory layout of the buffer.\n") +SLANG_RAW("@remarks\n") +SLANG_RAW("This type is supported natively when targeting HLSL.\n") +SLANG_RAW("When generating code for other targets, this type is translated into a pair or an ordinary `StructuredBuffer` and\n") +SLANG_RAW("a separate `RWStructuredBuffer` that holds the atomic counter.\n") +SLANG_RAW("The `L` generic parameter is used to specify the memory layout of the buffer when\n") +SLANG_RAW("generating SPIRV.\n") +SLANG_RAW("`L` must be one of `DefaultDataLayout`, `Std140DataLayout`, `Std430DataLayout` or `ScalarDataLayout`.\n") +SLANG_RAW("The default value is `DefaultDataLayout`.\n") +SLANG_RAW("When generating code for other targets, this parameter is ignored and has no effect on the generated code.\n") +SLANG_RAW("@see `StructuredBuffer`, `AppendStructuredBuffer`, `RWStructuredBuffer`, `RasterizerOrderedStructuredBuffer`.\n") +SLANG_RAW("@category buffer_types\n") +SLANG_RAW("*/\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__magic_type(HLSLConsumeStructuredBufferType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLConsumeStructuredBufferType +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, consumestructuredbuffer)]\n") +SLANG_RAW("struct ConsumeStructuredBuffer\n") +SLANG_RAW("{\n") +SLANG_RAW(" /// Reading the element at the end of the buffer indicated by the associated atomic counter\n") +SLANG_RAW(" /// and decrement the builtin atomic counter by 1.\n") +SLANG_RAW(" ///@return The element read from the buffer, it can be a structure.\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_StructuredBufferConsume +) +SLANG_RAW(")\n") +SLANG_RAW(" T Consume();\n") +SLANG_RAW("\n") +SLANG_RAW(" ///Gets the dimensions of the resource.\n") +SLANG_RAW(" ///@param[out] numStructs The number of structures in the buffer.\n") +SLANG_RAW(" ///@param[out] stride The stride, in bytes, of each element\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void GetDimensions(\n") +SLANG_RAW(" out uint numStructs,\n") +SLANG_RAW(" out uint stride)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let result = __structuredBufferGetDimensions(this);\n") +SLANG_RAW(" numStructs = result.x;\n") +SLANG_RAW(" stride = result.y;\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetElement +) +SLANG_RAW(")\n") +SLANG_RAW("T __getElement(U collection, I index);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io Stage IO types\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(glsl_hlsl_spirv, hull)]\n") +SLANG_RAW("__magic_type(HLSLInputPatchType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLInputPatchType +) +SLANG_RAW(")\n") +SLANG_RAW("struct InputPatch\n") +SLANG_RAW("{\n") +SLANG_RAW(" __generic\n") +SLANG_RAW(" __subscript(TIndex index)->T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".operator[]\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __getElement(this, index);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(glsl_hlsl_spirv, domain_hull)]\n") +SLANG_RAW("__magic_type(HLSLOutputPatchType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLOutputPatchType +) +SLANG_RAW(")\n") +SLANG_RAW("struct OutputPatch\n") +SLANG_RAW("{\n") +SLANG_RAW(" __generic\n") +SLANG_RAW(" __subscript(TIndex index)->T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".operator[]\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __getElement(this, index);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") + +static const struct { + IROp op; + char const* name; +} kMutableByteAddressBufferCases[] = +{ + { kIROp_HLSLRWByteAddressBufferType, "RWByteAddressBuffer" }, + { kIROp_HLSLRasterizerOrderedByteAddressBufferType, "RasterizerOrderedByteAddressBuffer" }, +}; +for(auto item : kMutableByteAddressBufferCases) { +SLANG_RAW("#line 4298 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("/**\n") +SLANG_RAW("Represents an opaque handle to a read-write buffer allocated in global memory that is indexed in bytes.\n") +SLANG_RAW("This type can be used when working with raw buffers. Raw buffer can be viewed as a bag of bits to\n") +SLANG_RAW("which you want raw access, that is, a buffer that you can conveniently access through chunks of one to\n") +SLANG_RAW("four 32-bit typeless address values.\n") +SLANG_RAW(" @remarks\n") +SLANG_RAW("This type is supported natively when targeting HLSL.\n") +SLANG_RAW(" @category buffer_types\n") +SLANG_RAW("*/\n") +SLANG_RAW("__magic_type(HLSL") +SLANG_SPLICE(item.name +) +SLANG_RAW("Type)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(item.op +) +SLANG_RAW(")\n") +SLANG_RAW("struct ") +SLANG_SPLICE(item.name +) +SLANG_RAW("\n") +SLANG_RAW("{\n") +SLANG_RAW(" // Note(tfoley): supports all operations from `ByteAddressBuffer`\n") +SLANG_RAW(" // TODO(tfoley): can this be made a sub-type?\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Get the number of bytes in the buffer.\n") +SLANG_RAW(" ///@param[out] dim The number of bytes in the buffer.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_spirv, structuredbuffer_rw)]\n") +SLANG_RAW(" void GetDimensions(out uint dim)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" dim = __structuredBufferGetDimensions(__getEquivalentStructuredBuffer(this)).x*4;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load a 32-bit unsigned integer or value with type of `T` from the buffer at the specified location.\n") +SLANG_RAW(" ///@param T The type of the value to load from the buffer.\n") +SLANG_RAW(" ///@param location The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return The value loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" /// When targeting non-HLSL, the status is always 0.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint Load(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint Load(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load two 32-bit unsigned integers from the buffer at the specified location\n") +SLANG_RAW(" /// with additional alignment.\n") +SLANG_RAW(" ///@param location The input address in bytes.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return Two 32-bit unsigned integers loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// This function only supports when targeting HLSL.\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" /// When targeting non-HLSL, the status is always 0.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint2 Load2(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint2 Load2(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load two 32-bit unsigned integers from the buffer at the specified location with alignment\n") +SLANG_RAW(" /// of `uint2`, which is 8.\n") +SLANG_RAW(" ///@param location The input address in bytes, which must be a multiple of alignment of 8.\n") +SLANG_RAW(" ///@return `uint2` Two 32-bit unsigned integers loaded from the buffer.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint2 Load2Aligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint2 Load2(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load2\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load three 32-bit unsigned integers from the buffer at the specified location.\n") +SLANG_RAW(" ///@param location The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return `uint3` Three 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// This function only supports when targeting HLSL.\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" /// When targeting non-HLSL, the status is always 0.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint3 Load3(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint3 Load3(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load three 32-bit unsigned integers from the buffer at the specified location with alignment\n") +SLANG_RAW(" /// of `uint3`, which is 12.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiple of alignment of 12.\n") +SLANG_RAW(" ///@return `uint3` Three 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint3 Load3Aligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint3 Load3(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load3\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load four 32-bit unsigned integers from the buffer at the specified location.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiple of alignment of 4.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param[out] status The status of the operation.\n") +SLANG_RAW(" ///@return `uint4` Four 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" /// This function only supports when targeting HLSL.\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint4 Load4(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint4 Load4(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load four 32-bit unsigned integers from the buffer at the specified location with alignment\n") +SLANG_RAW(" /// of `uint4`, which is 16.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiple of alignment of 16.\n") +SLANG_RAW(" ///@return `uint4` Four 32-bit unsigned integer value loaded from the buffer.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint4 Load4Aligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(hlsl, byteaddressbuffer_rw)]\n") +SLANG_RAW(" uint4 Load4(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load4\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" T Load(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, 0);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" T Load(int location, int alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, alignment);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load an element with type `T` from the buffer at the specified location with alignment of `T`.\n") +SLANG_RAW(" ///@param location The input address in bytes which must be a multiple of size of `T`.\n") +SLANG_RAW(" ///@return T value with type `T` loaded from the buffer.\n") +SLANG_RAW(" ///@remarks\n") +SLANG_RAW(" ///Currently, this function only supports when `T` is scalar, vector, or matrix type.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" T LoadAligned(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return __byteAddressBufferLoad(this, location, __naturalStrideOf());\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + struct BufferAtomicOps + { + const char* name; + const char* internalName; + }; + const BufferAtomicOps bufferAtomicOps[] = { + {"Max", "max"}, + {"Min", "min"}, + {"Add", "add"}, + {"And", "and"}, + {"Or", "or"}, + {"Xor", "xor"}, + {"Exchange", "exchange"} + }; + if (item.op == kIROp_HLSLRWByteAddressBufferType) + { +SLANG_RAW("#line 4622 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" // float32 and int64 atomic support. This is a Slang specific extension, it uses\n") +SLANG_RAW(" // GL_EXT_shader_atomic_float on Vulkan\n") +SLANG_RAW(" // NvAPI support on DX\n") +SLANG_RAW(" // NOTE! To use this feature on HLSL based targets the path to 'nvHLSLExtns.h' from the NvAPI SDK must\n") +SLANG_RAW(" // be set. That this include will be added to the *output* that is passed to a downstram compiler.\n") +SLANG_RAW(" // Also note that you *can* include NVAPI headers in your Slang source, and directly use NVAPI functions\n") +SLANG_RAW(" // Directly using NVAPI functions does *not* add the #include on the output\n") +SLANG_RAW(" // Finally note you can *mix* NVAPI direct calls, and use of NVAPI intrinsics below. This doesn't cause\n") +SLANG_RAW(" // any clashes, as Slang will emit any NVAPI function it parsed (say via a include in Slang source) with\n") +SLANG_RAW(" // unique functions.\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#VK_EXT_shader_atomic_float\n") +SLANG_RAW(" // https://htmlpreview.github.io/?https://github.com/KhronosGroup/SPIRV-Registry/blob/master/extensions/EXT/SPV_EXT_shader_atomic_float_add.html\n") +SLANG_RAW("\n") +SLANG_RAW(" // F32 Add\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 32-bit floating point atomic add operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic add operation.\n") +SLANG_RAW(" /// @param valueToAdd The value to add to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param originalValue The original value at `byteAddress` before the add operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicFAdd`. For HLSL, this function translates to an NVAPI call\n") +SLANG_RAW(" /// due to lack of native HLSL intrinsic for floating point atomic add. For CUDA, this function\n") +SLANG_RAW(" /// maps to `atomicAdd`.\n") +SLANG_RAW(" __cuda_sm_version(2.0)\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_nvapi_cuda_metal_float1)]\n") +SLANG_RAW(" void InterlockedAddF32(uint byteAddress, float valueToAdd, out float originalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($3 = NvInterlockedAddFp32($0, $1, $2))\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"(*$3 = atomicAdd($0._getPtrAt($1), $2))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" originalValue = __atomic_add(buf[byteAddress / 4], valueToAdd);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cuda, cuda_sm_6_0)]\n") +SLANG_RAW(" [require(spirv, spvAtomicFloat64AddEXT)]\n") +SLANG_RAW(" void InterlockedAddF64(uint byteAddress, double valueToAdd, out double originalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"(*$3 = atomicAdd($0._getPtrAt($1), $2))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" originalValue = __atomic_add(buf[byteAddress / 8], valueToAdd);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" // FP16x2\n") +SLANG_RAW("\n") +SLANG_RAW(" ///@internal\n") +SLANG_RAW(" /// Maps to the `NvInterlockedAddFp16x2` NVAPI function.\n") +SLANG_RAW(" /// Perform 2 16-bit floating point atomic add operations at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic add operation.\n") +SLANG_RAW(" /// @param fp16x2Value Two 16-bit floating point values are packed into a 32-bit unsigned integer.\n") +SLANG_RAW(" /// @return The 2 16-bit floating point values packed into a 32-bit unsigned integer.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_hlsl_spirv)]\n") +SLANG_RAW(" uint _NvInterlockedAddFp16x2(uint byteAddress, uint fp16x2Value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"NvInterlockedAddFp16x2($0, $1, $2)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" return bit_cast(__atomic_add(buf[byteAddress / 4], bit_cast(fp16x2Value)));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 16-bit floating point atomic add operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic add operation.\n") +SLANG_RAW(" /// @param value The value to add to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param originalValue The original value at `byteAddress` before the add operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicFAdd` and requires `SPV_EXT_shader_atomic_float16_add` extension.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// For HLSL, this function translates to an NVAPI call\n") +SLANG_RAW(" /// due to lack of native HLSL intrinsic for floating point atomic add. For CUDA, this function\n") +SLANG_RAW(" /// maps to `atomicAdd`.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void InterlockedAddF16(uint byteAddress, half value, out half originalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" if ((byteAddress & 2) == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" uint packedInput = asuint16(value);\n") +SLANG_RAW(" originalValue = asfloat16((uint16_t)_NvInterlockedAddFp16x2(byteAddress, packedInput));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" byteAddress = byteAddress & ~3;\n") +SLANG_RAW(" uint packedInput = ((uint)asuint16(value)) << 16;\n") +SLANG_RAW(" originalValue = asfloat16((uint16_t)(_NvInterlockedAddFp16x2(byteAddress, packedInput) >> 16));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" originalValue = __atomic_add(buf[byteAddress/2], value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 16-bit floating point atomic add operation at `byteAddress` through emulation using `half2` atomics.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic add operation.\n") +SLANG_RAW(" /// @param value The value to add to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param originalValue The original value at `byteAddress` before the add operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicFAdd` on a `half2` vector with the correct part set to `value`\n") +SLANG_RAW(" /// and the remaining part set to 0. This requires the `AtomicFloat16VectorNV` capability introduced by the `SPV_NV_shader_atomic_fp16_vector`\n") +SLANG_RAW(" /// extension.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// For HLSL, this function translates to an equivalent NVAPI call\n") +SLANG_RAW(" /// due to lack of native HLSL intrinsic for floating point atomic add. For CUDA, this function\n") +SLANG_RAW(" /// maps to `atomicAdd`.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void InterlockedAddF16Emulated(uint byteAddress, half value, out half originalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" if ((byteAddress & 2) == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" uint packedInput = asuint16(value);\n") +SLANG_RAW(" originalValue = asfloat16((uint16_t)_NvInterlockedAddFp16x2(byteAddress, packedInput));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" byteAddress = byteAddress & ~3;\n") +SLANG_RAW(" uint packedInput = ((uint)asuint16(value)) << 16;\n") +SLANG_RAW(" originalValue = asfloat16((uint16_t)(_NvInterlockedAddFp16x2(byteAddress, packedInput) >> 16));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" if ((byteAddress & 2) == 0)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" originalValue = __atomic_add(buf[byteAddress/4], half2(value, half(0.0))).x;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" originalValue = __atomic_add(buf[byteAddress/4], half2(half(0.0), value)).y;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Without returning original value\n") +SLANG_RAW("\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" __cuda_sm_version(2.0)\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_nvapi_cuda_metal_float1)]\n") +SLANG_RAW(" void InterlockedAddF32(uint byteAddress, float valueToAdd)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"(NvInterlockedAddFp32($0, $1, $2))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" __atomic_add(buf[byteAddress / 4], valueToAdd);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Int64 Add\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 64-bit integer atomic add operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic add operation.\n") +SLANG_RAW(" /// @param valueToAdd The value to add to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param originalValue The original value at `byteAddress` before the add operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicAdd`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedAdd64` and requires shader model 6.6.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicAdd`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedAddI64(uint byteAddress, int64_t valueToAdd, out int64_t originalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" InterlockedAdd64(byteAddress, valueToAdd, originalValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Without returning original value\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedAddI64(uint byteAddress, int64_t valueToAdd)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" InterlockedAdd64(byteAddress, valueToAdd);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Cas uint64_t\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 64-bit integer atomic compare-and-exchange operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic compare-and-exchange operation.\n") +SLANG_RAW(" /// @param compareValue The value to compare to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress` if the comparison is successful.\n") +SLANG_RAW(" /// @param outOriginalValue The original value at `byteAddress` before the add operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicCompareExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedCompareExchange64` and requires shader model 6.6.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedCompareExchangeU64(uint byteAddress, uint64_t compareValue, uint64_t value, out uint64_t outOriginalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"(*$4 = atomicCAS($0._getPtrAt($1), $2, $3))\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".InterlockedCompareExchange64\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" outOriginalValue = __atomic_compare_exchange(buf[byteAddress / 8], compareValue, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // SM6.6 6 64bit atomics.\n") +SLANG_RAW("\n") +SLANG_RAW(" // InterlockedMax64, InterlockedMin64, InterlockedAdd64, InterlockedAnd64, InterlockedOr64, InterlockedXor64, InterlockedExchange64\n") + + for (auto op : bufferAtomicOps) { +SLANG_RAW("#line 4861 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 64-bit unsigned integer atomic ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation.\n") +SLANG_RAW(" /// @param value The operand for the ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" uint64_t Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("U64(uint byteAddress, uint64_t value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" uint64_t originalValue;\n") +SLANG_RAW(" Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("64(byteAddress, value, originalValue);\n") +SLANG_RAW(" return originalValue;\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("64(uint byteAddress, int64_t value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" int64_t oldValue;\n") +SLANG_RAW(" Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("64(byteAddress, value, oldValue);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 64-bit integer atomic ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation.\n") +SLANG_RAW(" /// @param value The operand for the ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation.\n") +SLANG_RAW(" /// @param outOriginalValue The original value at `byteAddress` before the ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("64(uint byteAddress, T value, out T outOriginalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("64\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" outOriginalValue = __atomic_") +SLANG_SPLICE(op.internalName +) +SLANG_RAW("(buf[byteAddress / 8], value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") + +} // for (each bufferOps) +SLANG_RAW("#line 4902 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 64-bit integer atomic compare-and-exchange operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic compare-and-exchange operation.\n") +SLANG_RAW(" /// @param compareValue The value to compare to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress` if the comparison is successful.\n") +SLANG_RAW(" /// @param outOriginalValue The original value at `byteAddress` before the add operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicCompareExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedCompareExchange64` and requires shader model 6.6.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedCompareExchange64(uint byteAddress, T compareValue, T value, out T outOriginalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".InterlockedCompareExchange64\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" outOriginalValue = __atomic_compare_exchange(buf[byteAddress / 8], compareValue, value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a floating-point atomic bitwise compare-and-exchange operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic exchange operation.\n") +SLANG_RAW(" /// @param compareValue The value to compare to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress`.\n") +SLANG_RAW(" /// @param [out] outOriginalValue The original value at `byteAddress` before the exchange operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicCompareExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedCompareExchangeFloatBitwise` and requires shader model 6.6.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedCompareExchangeFloatBitwise(uint byteAddress, float compareValue, float value, out float outOriginalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".InterlockedCompareExchangeFloatBitwise\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" outOriginalValue = __atomic_compare_exchange(buf[byteAddress / 4], compareValue, value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a floating-point atomic bitwise exchange operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic exchange operation.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress`.\n") +SLANG_RAW(" /// @param [out] outOriginalValue The original value at `byteAddress` before the exchange operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedExchangeFloat` and requires shader model 6.6.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicExch`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedExchangeFloat(uint byteAddress, float value, out float outOriginalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".InterlockedExchangeFloat\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" outOriginalValue = __atomic_exchange(buf[byteAddress / 4], value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 64-bit integer atomic compare-and-store operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic store operation.\n") +SLANG_RAW(" /// @param compareValue The value to compare to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress` if the the value at address is equal to `compareValue`.\n") +SLANG_RAW(" /// @param [out] outOriginalValue The original value at `byteAddress` before the store operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicCompareExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedCompareStore64` and requires shader model 6.6.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedCompareStore64(uint byteAddress, T compareValue, T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".InterlockedCompareStore64\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" __atomic_compare_exchange(buf[byteAddress / 4], compareValue, value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" \n") +SLANG_RAW(" /// Perform a floating-point atomic bitwise compare-and-store operation at `byteAddress`.\n") +SLANG_RAW(" /// @param byteAddress The address at which to perform the atomic compare-and-exchange operation.\n") +SLANG_RAW(" /// @param compareValue The value to perform bitwise comparison to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress` if the comparison is successful.\n") +SLANG_RAW(" /// @param [out] outOriginalValue The original value at `byteAddress` before the compare-and-exchange operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicCompareExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedCompareStoreFloatBitwise` and requires shader model 6.6.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_spirv, atomic_glsl_hlsl_cuda9_int64)]\n") +SLANG_RAW(" void InterlockedCompareStoreFloatBitwise(uint byteAddress, float compareValue, float value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".InterlockedCompareStoreFloatBitwise\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" __atomic_compare_exchange(buf[byteAddress / 4], compareValue, value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + } // endif (type == RWByteAddressBuffer) +SLANG_RAW("#line 5017 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" // 32-bit atomic operations:\n") +SLANG_RAW(" // InterlockedMax, InterlockedMin, InterlockedAdd, InterlockedAnd, InterlockedOr, InterlockedXor, InterlockedExchange\n") + + for (auto op : bufferAtomicOps) { +SLANG_RAW("#line 5023 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform an atomic ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation at the specified byte\n") +SLANG_RAW(" /// location of the byte address buffer.\n") +SLANG_RAW(" /// @param dest The byte address at which to perform the atomic ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation.\n") +SLANG_RAW(" /// @param value The operand of the atomic operation.\n") +SLANG_RAW(" /// @param original_value The original value at `dest` before the ") +SLANG_SPLICE(op.internalName +) +SLANG_RAW(" operation.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("(\n") +SLANG_RAW(" UINT dest,\n") +SLANG_RAW(" UINT value,\n") +SLANG_RAW(" out UINT original_value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" ::Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("(buf[dest / 4], value, original_value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("(\n") +SLANG_RAW(" UINT dest,\n") +SLANG_RAW(" UINT value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" ::Interlocked") +SLANG_SPLICE(op.name +) +SLANG_RAW("(buf[dest / 4], value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") + +} // for (buffer atomic ops) +SLANG_RAW("#line 5062 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 32-bit integer atomic compare-and-exchange operation at\n") +SLANG_RAW(" /// the specified byte address within the `RWByteAddressBuffer`.\n") +SLANG_RAW(" /// @param dest The address at which to perform the atomic compare-and-exchange operation.\n") +SLANG_RAW(" /// @param compare_value The value to perform bitwise comparison to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress` if the comparison is successful.\n") +SLANG_RAW(" /// @param original_value The original value at `byteAddress` before the compare-and-exchange operation.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicCompareExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedCompareExchange`.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void InterlockedCompareExchange(\n") +SLANG_RAW(" UINT dest,\n") +SLANG_RAW(" UINT compare_value,\n") +SLANG_RAW(" UINT value,\n") +SLANG_RAW(" out UINT original_value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".InterlockedCompareExchange\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" ::InterlockedCompareExchange(buf[dest / 4], compare_value, value, original_value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Perform a 32-bit integer atomic compare-and-store operation at\n") +SLANG_RAW(" /// the specified byte address within the `RWByteAddressBuffer`.\n") +SLANG_RAW(" /// @param dest The address at which to perform the atomic add operation.\n") +SLANG_RAW(" /// @param compare_value The value to perform comparison to the value at `byteAddress`.\n") +SLANG_RAW(" /// @param value The value to store at `byteAddress` if the comparison is successful.\n") +SLANG_RAW(" /// @remarks For SPIR-V, this function maps to `OpAtomicCompareExchange`. For HLSL, this function\n") +SLANG_RAW(" /// translates to `InterlockedCompareStore`.\n") +SLANG_RAW(" /// For CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void InterlockedCompareStore(\n") +SLANG_RAW(" UINT dest,\n") +SLANG_RAW(" UINT compare_value,\n") +SLANG_RAW(" UINT value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".InterlockedCompareStore\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let buf = __getEquivalentStructuredBuffer(this);\n") +SLANG_RAW(" ::InterlockedCompareStore(buf[dest / 4], compare_value, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set one value to the buffer at the specified location.\n") +SLANG_RAW(" ///@param T The type of the value to load from the buffer.\n") +SLANG_RAW(" ///@param value The input value.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store(uint address, uint value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, 0, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set two values to the buffer at the specified location.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param value Two input values.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store2(uint address, uint2 value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, 0, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store2(uint address, uint2 value, uint alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, alignment, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set two values to the buffer at the specified location, the address will be aligned\n") +SLANG_RAW(" /// to the alignment of `uint2`, which is 8.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of 8.\n") +SLANG_RAW(" ///@param value Two input values.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store2Aligned(uint address, uint2 value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, __naturalStrideOf(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set three values to the buffer at the specified location.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param value Three input values.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store3(uint address, uint3 value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, 0, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store3(uint address, uint3 value, uint alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, alignment, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set three values to the buffer at the specified location, the address will be aligned\n") +SLANG_RAW(" /// to the alignment of `uint3`, which is 12.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of 12.\n") +SLANG_RAW(" ///@param value Three input values.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store3Aligned(uint address, uint3 value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, __naturalStrideOf(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set four values to the buffer at the specified location.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of 4.\n") +SLANG_RAW(" ///@param value Four input values.\n") +SLANG_RAW(" ///@param alignment Specifies the alignment of the location, which must be a multiple of 4.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store4(uint address, uint4 value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, 0, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store4(uint address, uint4 value, uint alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, alignment, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set four values to the buffer at the specified location, the address will be aligned\n") +SLANG_RAW(" /// to the alignment of `uint4`, which is 16.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of 16.\n") +SLANG_RAW(" ///@param value Four input values.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, byteaddressbuffer_rw)]\n") +SLANG_RAW(" void Store4Aligned(uint address, uint4 value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Store4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, __naturalStrideOf(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void Store(uint address, T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, 0, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void Store(uint address, T value, uint alignment)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, alignment, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Set four values to the buffer at the specified location, the address will be aligned\n") +SLANG_RAW(" /// to the alignment of `T`.\n") +SLANG_RAW(" ///@param T The type of the input value.\n") +SLANG_RAW(" ///@param address The input address in bytes, which must be a multiple of size of `T`.\n") +SLANG_RAW(" ///@param value The input value.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" void StoreAligned(uint address, T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __byteAddressBufferStore(this, address, __naturalStrideOf(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") + +} +SLANG_RAW("#line 5292 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") + +static const struct { + IROp op; + char const* name; +} kMutableStructuredBufferCases[] = +{ + { kIROp_HLSLRWStructuredBufferType, "RWStructuredBuffer" }, + { kIROp_HLSLRasterizerOrderedStructuredBufferType, "RasterizerOrderedStructuredBuffer" }, +}; +for(auto item : kMutableStructuredBufferCases) { +SLANG_RAW("#line 5304 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__magic_type(HLSL") +SLANG_SPLICE(item.name +) +SLANG_RAW("Type)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(item.op +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, structuredbuffer_rw)]\n") +SLANG_RAW("/**\n") +SLANG_RAW("Represents an opaque handle to a mutable structured buffer allocated in global memory.\n") +SLANG_RAW("A structured buffer can be viewed as an array of the specified element type.\n") +SLANG_RAW(" @param T The element type of the buffer.\n") +SLANG_RAW(" @param L The memory layout of the buffer.\n") +SLANG_RAW(" @remarks\n") +SLANG_RAW("The `L` generic parameter is used to specify the memory layout of the buffer when\n") +SLANG_RAW("generating SPIRV.\n") +SLANG_RAW("`L` must be one of `DefaultDataLayout`, `Std140DataLayout`, `Std430DataLayout` or `ScalarDataLayout`.\n") +SLANG_RAW("The default value is `DefaultDataLayout`.\n") +SLANG_RAW("When generating code for other targets, this parameter is ignored and has no effect on the generated code.\n") +SLANG_RAW(" @see `StructuredBuffer`, `AppendStructuredBuffer`, `ConsumeStructuredBuffer`\n") +SLANG_RAW(" @category buffer_types\n") +SLANG_RAW("**/\n") +SLANG_RAW("struct ") +SLANG_SPLICE(item.name +) +SLANG_RAW("\n") +SLANG_RAW("{\n") +SLANG_RAW(" /// Decrements the object's hidden counter.\n") +SLANG_RAW(" /// @return The post-decremented counter value.\n") +SLANG_RAW(" /// @remarks\n") +SLANG_RAW(" /// This function is not implemented when targeting non-HLSL.\n") +SLANG_RAW(" uint DecrementCounter();\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Get the dimensions of the buffer.\n") +SLANG_RAW(" /// @param numStructs The number of structures in the buffer.\n") +SLANG_RAW(" /// @param stride The stride, in bytes, of each structure element.\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(cpp_cuda_glsl_hlsl_metal_spirv, structuredbuffer_rw)]\n") +SLANG_RAW(" void GetDimensions(\n") +SLANG_RAW(" out uint numStructs,\n") +SLANG_RAW(" out uint stride)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let rs = __structuredBufferGetDimensions(this);\n") +SLANG_RAW(" numStructs = rs.x;\n") +SLANG_RAW(" stride = rs.y;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Increment the object's hidden counter.\n") +SLANG_RAW(" /// @return The pre-incremented counter value.\n") +SLANG_RAW(" /// @remarks\n") +SLANG_RAW(" /// This function is not implemented when targeting non-HLSL.\n") +SLANG_RAW(" uint IncrementCounter();\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load a element from the buffer at the specified location.\n") +SLANG_RAW(" /// @param TIndex Type of the index.\n") +SLANG_RAW(" /// @param location The index of buffer.\n") +SLANG_RAW(" /// @param[out] status The status of the operation.\n") +SLANG_RAW(" /// @return The element at the specified index.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// @remarks\n") +SLANG_RAW(" /// You can't access the output parameter `status` directly; instead,\n") +SLANG_RAW(" /// pass the status to the `CheckAccessFullyMapped` intrinsic function.\n") +SLANG_RAW(" /// `CheckAccessFullyMapped` returns TRUE if all values from the corresponding Sample,\n") +SLANG_RAW(" /// Gather, or Load operation accessed mapped tiles in a tiled resource.\n") +SLANG_RAW(" /// If any values were taken from an unmapped tile, `CheckAccessFullyMapped` returns FALSE.\n") +SLANG_RAW(" /// When targeting non-HLSL, the status is always 0.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_RWStructuredBufferLoad +) +SLANG_RAW(")\n") +SLANG_RAW(" T Load(TIndex location);\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_RWStructuredBufferLoadStatus +) +SLANG_RAW(")\n") +SLANG_RAW(" T Load(TIndex location, out uint status);\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Load a element from the buffer at the specified location.\n") +SLANG_RAW(" /// @param TIndex Type of the index.\n") +SLANG_RAW(" /// @param index The index of buffer.\n") +SLANG_RAW(" /// @return The element at the specified index.\n") +SLANG_RAW(" __generic\n") +SLANG_RAW(" __subscript(TIndex index) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // If a 'Buffer[index]' is referred to by a '__ref', call 'kIROp_RWStructuredBufferGetElementPtr(index)'.\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // This allows call's to stay aware that the input is from a 'Buffer'.\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [nonmutating]\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_RWStructuredBufferGetElementPtr +) +SLANG_RAW(")\n") +SLANG_RAW(" ref;\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") + +} +SLANG_RAW("#line 5398 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(glsl_hlsl_spirv, geometry)]\n") +SLANG_RAW("__magic_type(HLSLPointStreamType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLPointStreamType +) +SLANG_RAW(")\n") +SLANG_RAW("struct PointStream\n") +SLANG_RAW("{\n") +SLANG_RAW(" [KnownBuiltin(\"GeometryStreamAppend\")]\n") +SLANG_RAW(" void Append(T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"EmitVertex()\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Append\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpEmitVertex; };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [KnownBuiltin(\"GeometryStreamRestart\")]\n") +SLANG_RAW(" void RestartStrip()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"EndPrimitive()\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".RestartStrip\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpEndPrimitive; };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(glsl_hlsl_spirv, geometry)]\n") +SLANG_RAW("__magic_type(HLSLLineStreamType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLLineStreamType +) +SLANG_RAW(")\n") +SLANG_RAW("struct LineStream\n") +SLANG_RAW("{\n") +SLANG_RAW(" [KnownBuiltin(\"GeometryStreamAppend\")]\n") +SLANG_RAW(" void Append(T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"EmitVertex()\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Append\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpEmitVertex; };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [KnownBuiltin(\"GeometryStreamRestart\")]\n") +SLANG_RAW(" void RestartStrip()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"EndPrimitive()\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".RestartStrip\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpEndPrimitive; };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(glsl_hlsl_spirv, geometry)]\n") +SLANG_RAW("__magic_type(HLSLTriangleStreamType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLTriangleStreamType +) +SLANG_RAW(")\n") +SLANG_RAW("struct TriangleStream\n") +SLANG_RAW("{\n") +SLANG_RAW(" [KnownBuiltin(\"GeometryStreamAppend\")]\n") +SLANG_RAW(" void Append(T value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"EmitVertex()\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Append\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpEmitVertex; };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [KnownBuiltin(\"GeometryStreamRestart\")]\n") +SLANG_RAW(" void RestartStrip()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"EndPrimitive()\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".RestartStrip\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpEndPrimitive; };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("#define VECTOR_MAP_UNARY(TYPE, COUNT, FUNC, VALUE) \\\n") +SLANG_RAW(" vector result; for(int i = 0; i < COUNT; ++i) { result[i] = FUNC(VALUE[i]); } return result\n") +SLANG_RAW("\n") +SLANG_RAW("#define MATRIX_MAP_UNARY(TYPE, ROWS, COLS, FUNC, VALUE) \\\n") +SLANG_RAW(" matrix result; for(int i = 0; i < ROWS; ++i) { result[i] = FUNC(VALUE[i]); } return result\n") +SLANG_RAW("\n") +SLANG_RAW("#define VECTOR_MAP_BINARY(TYPE, COUNT, FUNC, LEFT, RIGHT) \\\n") +SLANG_RAW(" vector result; for(int i = 0; i < COUNT; ++i) { result[i] = FUNC(LEFT[i], RIGHT[i]); } return result\n") +SLANG_RAW("\n") +SLANG_RAW("#define MATRIX_MAP_BINARY(TYPE, ROWS, COLS, FUNC, LEFT, RIGHT) \\\n") +SLANG_RAW(" matrix result; for(int i = 0; i < ROWS; ++i) { result[i] = FUNC(LEFT[i], RIGHT[i]); } return result\n") +SLANG_RAW("\n") +SLANG_RAW("#define VECTOR_MAP_TRINARY(TYPE, COUNT, FUNC, A, B, C) \\\n") +SLANG_RAW(" vector result; for(int i = 0; i < COUNT; ++i) { result[i] = FUNC(A[i], B[i], C[i]); } return result\n") +SLANG_RAW("\n") +SLANG_RAW("#define MATRIX_MAP_TRINARY(TYPE, ROWS, COLS, FUNC, A, B, C) \\\n") +SLANG_RAW(" matrix result; for(int i = 0; i < ROWS; ++i) { result[i] = FUNC(A[i], B[i], C[i]); } return result\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("\n") +SLANG_RAW("/// Try to terminate the current draw or dispatch call (HLSL SM 4.0)\n") +SLANG_RAW("void abort();\n") +SLANG_RAW("\n") +SLANG_RAW("/// The abs function returns the absolute value of x.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The absolute value of x.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T abs(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_abs($0)\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_abs($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 SAbs $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" //default:\n") +SLANG_RAW(" // Note: this simple definition may not be appropriate for floating-point inputs\n") +SLANG_RAW(" // return x < 0 ? -x : x;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector abs(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 SAbs $x;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, abs, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix abs(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, abs, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T abs(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_abs($0)\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_abs($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 FAbs $x;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector abs(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 FAbs $x;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, abs, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix abs(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"abs\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, abs, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Float-point absolute value.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The absolute value of `x`.\n") +SLANG_RAW("/// @remarks For metal targets, this function is equivalent to the `fabs` metal intrinsic.\n") +SLANG_RAW("/// For other targets, this function is equivalent to the `abs` slang function.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T fabs(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fabs\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return abs(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector fabs(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fabs\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return abs(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Arc cosine. Returns the angle whose cosine is the specified number.\n") +SLANG_RAW("/// @param x The cosine value.\n") +SLANG_RAW("/// @return The angle in radians, in the range of [0, pi].\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T acos(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_acos($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_acos($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Acos $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector acos(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Acos $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, acos, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix acos(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"acos\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, acos, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Arc hyperbolic cosine. Returns the arc hyperbolic cosine of the specified value.\n") +SLANG_RAW("/// @param x The value.\n") +SLANG_RAW("/// @return The arc hyperbolic cosine of the specified value.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T acosh(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_acosh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_acosh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"acosh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"acosh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Acosh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"acosh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return log(x + sqrt( x * x - T(1)));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector acosh(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"acosh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"acosh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Acosh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"acosh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, acosh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// Test if all components are non-zero.\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("bool all(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"bool($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"all\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"all\";\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"all\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let zero = __default();\n") +SLANG_RAW(" if (__isInt())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpINotEqual $$bool result $x $zero\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isFloat())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpFUnordNotEqual $$bool result $x $zero\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isBool())\n") +SLANG_RAW(" return __slang_noop_cast(x);\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return false;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("bool all(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" if(N == 1)\n") +SLANG_RAW(" return all(x[0]);\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"all\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"all\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"all(bvec$N0($0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isBool())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpAll $$bool result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let zero = __default>();\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpINotEqual $$vector %castResult $x $zero;\n") +SLANG_RAW(" OpAll $$bool result %castResult\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let zero = __default>();\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpFUnordNotEqual $$vector %castResult $x $zero;\n") +SLANG_RAW(" OpAll $$bool result %castResult\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"all\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" bool result = true;\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" result = result && all(x[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("bool all(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"all\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" bool result = true;\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" result = result && all(x[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Barrier for writes to all memory spaces.\n") +SLANG_RAW("/// @category barrier Memory and control barriers\n") +SLANG_RAW("__glsl_extension(GL_KHR_memory_scope_semantics)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv_wgsl, memorybarrier)]\n") +SLANG_RAW("void AllMemoryBarrier()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"AllMemoryBarrier\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"memoryBarrier(gl_ScopeDevice, (gl_StorageSemanticsShared|gl_StorageSemanticsImage|gl_StorageSemanticsBuffer), gl_SemanticsAcquireRelease)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__threadfence()\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"threadgroup_barrier(mem_flags::mem_device | mem_flags::mem_threadgroup | mem_flags::mem_texture | mem_flags::mem_threadgroup_imageblock)\";\n") +SLANG_RAW(" case spirv: spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpMemoryBarrier Device AcquireRelease|UniformMemory|WorkgroupMemory|ImageMemory;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"storageBarrier(); textureBarrier(); workgroupBarrier();\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Thread-group sync and barrier for writes to all memory spaces.\n") +SLANG_RAW("/// @category barrier\n") +SLANG_RAW("__glsl_extension(GL_KHR_memory_scope_semantics)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv_wgsl, memorybarrier)]\n") +SLANG_RAW("void AllMemoryBarrierWithGroupSync()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"AllMemoryBarrierWithGroupSync\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"controlBarrier(gl_ScopeWorkgroup, gl_ScopeDevice, (gl_StorageSemanticsShared|gl_StorageSemanticsImage|gl_StorageSemanticsBuffer), gl_SemanticsAcquireRelease)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__syncthreads()\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"threadgroup_barrier(mem_flags::mem_device | mem_flags::mem_threadgroup | mem_flags::mem_texture | mem_flags::mem_threadgroup_imageblock)\";\n") +SLANG_RAW(" case spirv: spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpControlBarrier Workgroup Device AcquireRelease|UniformMemory|WorkgroupMemory|ImageMemory;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"storageBarrier(); textureBarrier(); workgroupBarrier();\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Returns the workgroup size of the calling entry point.\n") +SLANG_RAW("[require(compute)]\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetWorkGroupSize +) +SLANG_RAW(")\n") +SLANG_RAW("int3 WorkgroupSize();\n") +SLANG_RAW("\n") +SLANG_RAW("// Test if any components is non-zero.\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("bool any(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __intrinsic_asm \"bool($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"any\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"any\";\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"any\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let zero = __default();\n") +SLANG_RAW(" if (__isInt())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpINotEqual $$bool result $x $zero\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isFloat())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpFUnordNotEqual $$bool result $x $zero\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isBool())\n") +SLANG_RAW(" return __slang_noop_cast(x);\n") +SLANG_RAW(" return false;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("bool any(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" if(N == 1)\n") +SLANG_RAW(" return any(x[0]);\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"any\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"any\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"any(bvec$N0($0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isBool())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpAny $$bool result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let zero = __default>();\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpINotEqual $$vector %castResult $x $zero;\n") +SLANG_RAW(" OpAny $$bool result %castResult\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let zero = __default>();\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpFUnordNotEqual $$vector %castResult $x $zero;\n") +SLANG_RAW(" OpAny $$bool result %castResult\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"any\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" bool result = false;\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" result = result || any(x[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv)]\n") +SLANG_RAW("bool any(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"any\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" bool result = false;\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" result = result || any(x[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reinterpret bits as a double.\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("__glsl_extension(GL_ARB_gpu_shader5)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("double asdouble(uint lowbits, uint highbits)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asdouble\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"packDouble2x32(uvec2($0, $1))\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asdouble($0, $1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asdouble($0, $1)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" %v:$$uint2 = OpCompositeConstruct $lowbits $highbits;\n") +SLANG_RAW(" result:$$double = OpExtInst glsl450 59 %v\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_ARB_gpu_shader5)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("double2 asdouble(uint2 lowbits, uint2 highbits)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"asdouble($0, $1)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return double2(asdouble(lowbits.x, highbits.x), asdouble(lowbits.y, highbits.y));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reinterpret bits as a float.\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("float asfloat(int x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asfloat($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asfloat($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"intBitsToFloat\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$float result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("float asfloat(uint x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asfloat($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asfloat($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"uintBitsToFloat\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$float result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("vector asfloat(vector< int, N> x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"intBitsToFloat\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$vector result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(float, N, asfloat, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("vector asfloat(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"uintBitsToFloat\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$vector result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(float, N, asfloat, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("matrix asfloat(matrix< int,N,M> x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(float, N, M, asfloat, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("matrix asfloat(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(float, N, M, asfloat, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("float asfloat(float x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("vector asfloat(vector x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("matrix asfloat(matrix x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("/// Arc sine. Returns the angle whose sine is the specified number.\n") +SLANG_RAW("/// @param x The sine value.\n") +SLANG_RAW("/// @return The angle in radians, in the range of [-pi/2, pi/2].\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T asin(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asin($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asin($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Asin $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector asin(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Asin $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T,N,asin,x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix asin(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asin\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T,N,M,asin,x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Arc hyperbolic sine. Returns the arc hyperbolic sine of the specified value.\n") +SLANG_RAW("/// @param x The hyperbolic sine value.\n") +SLANG_RAW("/// @return The arc hyperbolic sine of the specified value.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T asinh(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asinh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asinh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"asinh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"asinh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Asinh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"asinh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return log(x + sqrt(x * x + T(1)));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector asinh(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"asinh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"asinh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Asinh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"asinh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, asinh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reinterpret bits as an int.\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("int asint(float x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asint($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asint($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"floatBitsToInt\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$int result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("int asint(uint x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asint($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asint($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"int($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$int result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("vector asint(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"floatBitsToInt\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$vector result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(int, N, asint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("vector asint(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" if(N == 1)\n") +SLANG_RAW(" return vector(asint(x[0]));\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"ivec$N0($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$vector result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(int, N, asint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_4_0)]\n") +SLANG_RAW("matrix asint(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(int, N, M, asint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_4_0)]\n") +SLANG_RAW("matrix asint(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(int, N, M, asint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// No op\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("int asint(int x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("vector asint(vector x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("matrix asint(matrix x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reinterpret bits of double as a uint.\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("__glsl_extension(GL_ARB_gpu_shader5)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_4_0)]\n") +SLANG_RAW("void asuint(double value, out uint lowbits, out uint highbits)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"{ uvec2 v = unpackDouble2x32($0); $1 = v.x; $2 = v.y; }\";\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"$P_asuint($0, $1, $2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let uv = spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result : $$uint2 = OpBitcast $value;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" lowbits = uv.x;\n") +SLANG_RAW(" highbits = uv.y;\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Reinterpret bits as a uint.\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("uint asuint(float x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asuint($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asuint($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"floatBitsToUint\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$uint result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("uint asuint(int x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_asuint($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_asuint($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"uint($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$uint result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("vector asuint(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"floatBitsToUint\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$vector result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint, N, asuint, x);\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_4_0)]\n") +SLANG_RAW("vector asuint(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" if(N == 1)\n") +SLANG_RAW(" return vector(asuint(x[0]));\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"uvec$N0($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$vector result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast<$TR>($0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint, N, asuint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_4_0)]\n") +SLANG_RAW("matrix asuint(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(uint, N, M, asuint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_4_0)]\n") +SLANG_RAW("matrix asuint(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(uint, N, M, asuint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("uint asuint(uint x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("vector asuint(vector x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("matrix asuint(matrix x)\n") +SLANG_RAW("{ return x; }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// 16-bit bitcast ops (HLSL SM 6.2)\n") +SLANG_RAW("//\n") +SLANG_RAW("// TODO: We need to map these to GLSL/SPIR-V\n") +SLANG_RAW("// operations that don't require an intermediate\n") +SLANG_RAW("// conversion to fp32.\n") +SLANG_RAW("\n") +SLANG_RAW("// Identity cases:\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reinterpret bits as a float16 (HLSL SM 6.2).\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] float16_t asfloat16(float16_t value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] vector asfloat16(vector value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] matrix asfloat16(matrix value) { return value; }\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reinterpret bits as a int16_t (HLSL SM 6.2).\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] int16_t asint16(int16_t value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] vector asint16(vector value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] matrix asint16(matrix value) { return value; }\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reinterpret bits as a uint16_t (HLSL SM 6.2).\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] uint16_t asuint16(uint16_t value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] vector asuint16(vector value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] matrix asuint16(matrix value) { return value; }\n") +SLANG_RAW("\n") +SLANG_RAW("// Signed<->unsigned cases:\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] int16_t asint16(uint16_t value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] vector asint16(vector value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] matrix asint16(matrix value) { return value; }\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] uint16_t asuint16(int16_t value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] vector asuint16(vector value) { return value; }\n") +SLANG_RAW("[__unsafeForceInlineEarly][__readNone] matrix asuint16(matrix value) { return value; }\n") +SLANG_RAW("\n") +SLANG_RAW("// Float->unsigned cases:\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("uint16_t asuint16(float16_t value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__half_as_ushort\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"uint16_t(packHalf2x16(vec2($0, 0.0)))\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint16\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$uint16_t result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("vector asuint16(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asuint16\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpBitcast $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint16_t, N, asuint16, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("matrix asuint16(matrix value)\n") +SLANG_RAW("{ MATRIX_MAP_UNARY(uint16_t, R, C, asuint16, value); }\n") +SLANG_RAW("\n") +SLANG_RAW("// Unsigned->float cases:\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("float16_t asfloat16(uint16_t value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__ushort_as_half\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"float16_t(unpackHalf2x16($0).x)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat16\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$float16_t result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("vector asfloat16(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat16\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpBitcast $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(float16_t, N, asfloat16, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("matrix asfloat16(matrix value)\n") +SLANG_RAW("{ MATRIX_MAP_UNARY(float16_t, R, C, asfloat16, value); }\n") +SLANG_RAW("\n") +SLANG_RAW("// Float<->signed cases:\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_hlsl_metal_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("int16_t asint16(float16_t value) \n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__half_as_short\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint16\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$int16_t result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default: return asuint16(value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone] \n") +SLANG_RAW("[require(cuda_hlsl_metal_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("vector asint16(vector value) \n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint16\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" default: return asuint16(value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("matrix asint16(matrix value) \n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asint16\";\n") +SLANG_RAW(" default: return asuint16(value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[__unsafeForceInlineEarly] \n") +SLANG_RAW("[require(cuda_hlsl_metal_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("float16_t asfloat16(int16_t value) \n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__short_as_half\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat16\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$float16_t result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default: return asfloat16(asuint16(value));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_hlsl_metal_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("vector asfloat16(vector value) \n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat16\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type<$TR>($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpBitcast $$vector result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default: return asfloat16(asuint16(value));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("matrix asfloat16(matrix value) \n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"asfloat16\";\n") +SLANG_RAW(" default: return asfloat16(asuint16(value));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Arc tangent. Returns the angle whose tangent is the specified number.\n") +SLANG_RAW("/// @param x The tangent value.\n") +SLANG_RAW("/// @return The angle in radians, in the range of [-pi/2, pi/2].\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T atan(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_atan($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_atan($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Atan $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector atan(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Atan $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, atan, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix atan(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"atan\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, atan, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Arc tangent of y/x. Returns the angle whose tangent is the quotient of two specified numbers.\n") +SLANG_RAW("/// @param y The numerator.\n") +SLANG_RAW("/// @param x The denominator.\n") +SLANG_RAW("/// @return The angle in radians, in the range of [-pi, pi].\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T atan2(T y, T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_atan2($0, $1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_atan2($0, $1)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"atan($0,$1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"atan2\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"atan2\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Atan2 $y $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"atan2\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector atan2(vector y, vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"atan($0,$1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"atan2\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"atan2\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Atan2 $y $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"atan2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, atan2, y, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix atan2(matrix y, matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"atan2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, atan2, y, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Hyperbolic arc tangent. Returns the hyperbolic arc tangent of the specified value.\n") +SLANG_RAW("/// @param x The value.\n") +SLANG_RAW("/// @return The hyperbolic arc tangent of the specified value.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T atanh(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_atanh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_atanh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"atanh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"atanh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Atanh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"atanh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return T(0.5) * log((T(1) + x) / (T(1) - x));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector atanh(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"atanh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"atanh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Atanh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"atanh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, atanh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Ceiling. Returns the smallest integer that is greater than or equal to the specified value.\n") +SLANG_RAW("/// @param x The value.\n") +SLANG_RAW("/// @return The smallest integer that is greater than or equal to the specified value.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T ceil(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_ceil($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_ceil($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Ceil $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector ceil(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Ceil $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, ceil, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix ceil(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ceil\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, ceil, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Copy-sign. Returns a value whose magnitude is from one operand and whose sign is from another operand.\n") +SLANG_RAW("/// @param x The value to use as the magnitude.\n") +SLANG_RAW("/// @param y The value to use as the sign.\n") +SLANG_RAW("/// @return A value whose magnitude is from `x` and whose sign is from `y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("vector copysign_half(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" let ux = reinterpret>(x);\n") +SLANG_RAW(" let uy = reinterpret>(y);\n") +SLANG_RAW(" vector signY = (uy & (uint16_t(1) << uint16_t(15)));\n") +SLANG_RAW(" vector newX = (ux & ((uint16_t(1) << uint16_t(15)) - uint16_t(1))) + signY;\n") +SLANG_RAW(" return reinterpret>(newX);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Copy-sign. Returns a value whose magnitude is from one operand and whose sign is from another operand.\n") +SLANG_RAW("/// @param x The value to use as the magnitude.\n") +SLANG_RAW("/// @param y The value to use as the sign.\n") +SLANG_RAW("/// @return A value whose magnitude is from `x` and whose sign is from `y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("vector copysign_float(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" let ux = reinterpret>(x);\n") +SLANG_RAW(" let uy = reinterpret>(y);\n") +SLANG_RAW(" vector signY = (uy & (uint32_t(1) << uint32_t(31)));\n") +SLANG_RAW(" vector newX = (ux & ((uint32_t(1) << uint32_t(31)) - uint32_t(1))) + signY;\n") +SLANG_RAW(" return reinterpret>(newX);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Copy-sign. Returns a value whose magnitude is from one operand and whose sign is from another operand.\n") +SLANG_RAW("/// @param x The value to use as the magnitude.\n") +SLANG_RAW("/// @param y The value to use as the sign.\n") +SLANG_RAW("/// @return A value whose magnitude is from `x` and whose sign is from `y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("vector copysign_double(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" let ux = reinterpret>(x);\n") +SLANG_RAW(" let uy = reinterpret>(y);\n") +SLANG_RAW(" vector signY = (uy & (uint64_t(1) << uint64_t(63)));\n") +SLANG_RAW(" vector newX = (ux & ((uint64_t(1) << uint64_t(63)) - uint64_t(1))) + signY;\n") +SLANG_RAW(" return reinterpret>(newX);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_FloatCast +) +SLANG_RAW(")\n") +SLANG_RAW("vector __real_cast(vector val);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Copy-sign. Returns a value whose magnitude is from one operand and whose sign is from another operand.\n") +SLANG_RAW("/// @param x The value to use as the magnitude.\n") +SLANG_RAW("/// @param y The value to use as the sign.\n") +SLANG_RAW("/// @return A value whose magnitude is from x and whose sign is from y.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("vector copysign(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"copysign\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // sign of -0.0 needs to be respected.\n") +SLANG_RAW(" if (T is half)\n") +SLANG_RAW(" return __real_cast(copysign_half(\n") +SLANG_RAW(" __real_cast(x),\n") +SLANG_RAW(" __real_cast(y)));\n") +SLANG_RAW(" if (T is float)\n") +SLANG_RAW(" return __real_cast(copysign_float(\n") +SLANG_RAW(" __real_cast(x),\n") +SLANG_RAW(" __real_cast(y)));\n") +SLANG_RAW(" return __real_cast(copysign_double(\n") +SLANG_RAW(" __real_cast(x),\n") +SLANG_RAW(" __real_cast(y)));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("T copysign(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"copysign\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return copysign(vector(x), vector(y))[0];\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// Check access status to tiled resource\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("bool CheckAccessFullyMapped(out uint status)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"CheckAccessFullyMapped\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Clamp. Returns the specified value clamped to the specified minimum and maximum bounds.\n") +SLANG_RAW("/// @param x The value to clamp.\n") +SLANG_RAW("/// @param minBound The minimum bound.\n") +SLANG_RAW("/// @param maxBound The maximum bound.\n") +SLANG_RAW("/// @return The clamped value.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T clamp(T x, T minBound, T maxBound)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 SClamp $x $minBound $maxBound\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 UClamp $x $minBound $maxBound\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(max(x, minBound), maxBound);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector clamp(vector x, vector minBound, vector maxBound)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 SClamp $x $minBound $maxBound\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 UClamp $x $minBound $maxBound\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(max(x, minBound), maxBound);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix clamp(matrix x, matrix minBound, matrix maxBound)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(max(x, minBound), maxBound);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T clamp(T x, T minBound, T maxBound)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 FClamp $x $minBound $maxBound\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(max(x, minBound), maxBound);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector clamp(vector x, vector minBound, vector maxBound)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 FClamp $x $minBound $maxBound\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(max(x, minBound), maxBound);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix clamp(matrix x, matrix minBound, matrix maxBound)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clamp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(max(x, minBound), maxBound);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Clip (discard) fragment conditionally\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, fragment)]\n") +SLANG_RAW("void clip(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clip\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" if(x < T(0)) discard;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, fragment)]\n") +SLANG_RAW("void clip(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clip\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" if(any(x < T(0))) discard;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, fragment)]\n") +SLANG_RAW("void clip(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"clip\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" if(any(x < T(0))) discard;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Cosine. Returns the cosine of the specified angle.\n") +SLANG_RAW("/// @param x The angle in radians.\n") +SLANG_RAW("/// @return The cosine of the specified angle.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T cos(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_cos($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_cos($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Cos $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector cos(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Cos $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T,N, cos, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix cos(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cos\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, cos, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Hyperbolic cosine. Returns the hyperbolic cosine of the specified value.\n") +SLANG_RAW("/// @param x The specified value.\n") +SLANG_RAW("/// @return The hyperbolic cosine of the specified value.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T cosh(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_cosh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_cosh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Cosh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector cosh(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Cosh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T,N, cosh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix cosh(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cosh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, cosh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute the cosine of pi times the input.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The cosine of pi times the input.\n") +SLANG_RAW("/// @remarks This function is equivalent to `cos(PI * x)`. On Metal, this function is implemented using the `cospi` intrinsic.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T cospi(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"cospi\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return cos(T.getPi() * x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector cospi(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"cospi\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return cos(T.getPi() * x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Population count.\n") +SLANG_RAW("/// Counts the number of set bits in the binary representation of a value.\n") +SLANG_RAW("/// @param value The value to count bits in.\n") +SLANG_RAW("/// @return The number of bits in the binary representation of `value` that are set to one.\n") +SLANG_RAW("/// @remarks For SPIR-V, this function maps to `OpBitCount`.\n") +SLANG_RAW("/// @category bitops\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("uint countbits(uint value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"countbits\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"bitCount\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"popcount\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"$P_countbits($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpBitCount $$uint result $value};\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"countOneBits\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic \n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector countbits(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"countbits\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"bitCount\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"popcount\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpBitCount $$vector result $value};\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"countOneBits\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint, N, countbits, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Cross product. Returns the cross product of two 3D vectors.\n") +SLANG_RAW("/// @param left The first vector.\n") +SLANG_RAW("/// @param right The second vector.\n") +SLANG_RAW("/// @return The cross product of `left` and `right`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector cross(vector left, vector right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" // TODO: SPIRV does not support integer vectors.\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"cross\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cross\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"cross\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Cross $left $right\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"cross\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return vector(\n") +SLANG_RAW(" left.y * right.z - left.z * right.y,\n") +SLANG_RAW(" left.z * right.x - left.x * right.z,\n") +SLANG_RAW(" left.x * right.y - left.y * right.x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector cross(vector left, vector right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"cross\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"cross\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Cross $left $right\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"cross\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return vector(\n") +SLANG_RAW(" left.y * right.z - left.z * right.y,\n") +SLANG_RAW(" left.z * right.x - left.x * right.z,\n") +SLANG_RAW(" left.x * right.y - left.y * right.x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Convert encoded color\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("int4 D3DCOLORtoUBYTE4(float4 color)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"D3DCOLORtoUBYTE4\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"bitcast(pack4x8unorm($0)).zyxw\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let scaled = color.zyxw * 255.001999f;\n") +SLANG_RAW(" return int4(scaled);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Partial-difference derivatives\n") + +const char* diffDimensions[2] = {"x", "y"}; +for (auto xOrY : diffDimensions) { +SLANG_RAW("#line 7462 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("/// Take the partial derivative of `p` with respect to ") +SLANG_SPLICE(xOrY +) +SLANG_RAW(" in screen space.\n") +SLANG_RAW("/// @param p The value to take partial derivative for.\n") +SLANG_RAW("/// @return The partial derivative of `p`.\n") +SLANG_RAW("/// @remarks For SPIR-V, this function maps to `OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("`.\n") +SLANG_RAW("/// @category derivative Derivative functions\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, fragmentprocessing)]\n") +SLANG_RAW("T dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("(T p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"dFd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"dfd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW(" $$T result $p};\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"dpd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, fragmentprocessing)]\n") +SLANG_RAW("vector dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("(vector p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"dFd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"dfd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW(" $$vector result $p};\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"dpd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, fragmentprocessing)]\n") +SLANG_RAW("matrix dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("(matrix p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW(", p);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Take the coarse partial derivative of `p` with respect to ") +SLANG_SPLICE(xOrY +) +SLANG_RAW(" in screen space.\n") +SLANG_RAW("/// @param p The value to take partial derivative for.\n") +SLANG_RAW("/// @return The partial derivative of `p`.\n") +SLANG_RAW("/// @remarks For SPIR-V, this function maps to `OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Coarse`.\n") +SLANG_RAW("/// @category derivative\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_ARB_derivative_control)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, fragmentprocessing_derivativecontrol)]\n") +SLANG_RAW("T dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_coarse(T p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_coarse\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"dFd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Coarse\";\n") +SLANG_RAW(" case spirv: return spirv_asm {OpCapability DerivativeControl; result:$$T = OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Coarse $p};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_ARB_derivative_control)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, fragmentprocessing_derivativecontrol)]\n") +SLANG_RAW("vector dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_coarse(vector p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_coarse\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"dFd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Coarse\";\n") +SLANG_RAW(" case spirv: return spirv_asm {OpCapability DerivativeControl; result:$$vector = OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Coarse $p};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, fragmentprocessing_derivativecontrol)]\n") +SLANG_RAW("matrix dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_coarse(matrix p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_coarse\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_coarse, p);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Take the fine partial derivative of `p` with respect to ") +SLANG_SPLICE(xOrY +) +SLANG_RAW(" in screen space.\n") +SLANG_RAW("/// @param p The value to take partial derivative for.\n") +SLANG_RAW("/// @return The partial derivative of `p`.\n") +SLANG_RAW("/// @remarks For SPIR-V, this function maps to `OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Fine`.\n") +SLANG_RAW("/// @category derivative\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_ARB_derivative_control)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, fragmentprocessing_derivativecontrol)]\n") +SLANG_RAW("T dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_fine(T p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_fine\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"dFd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Fine\";\n") +SLANG_RAW(" case spirv: return spirv_asm {OpCapability DerivativeControl; result:$$T = OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Fine $p};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_ARB_derivative_control)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, fragmentprocessing_derivativecontrol)]\n") +SLANG_RAW("vector dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_fine(vector p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_fine\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"dFd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Fine\";\n") +SLANG_RAW(" case spirv: return spirv_asm {OpCapability DerivativeControl; result:$$vector = OpDPd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("Fine $p};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, fragmentprocessing_derivativecontrol)]\n") +SLANG_RAW("matrix dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_fine(matrix p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_fine\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, dd") +SLANG_SPLICE(xOrY +) +SLANG_RAW("_fine, p);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +} // for (xOrY) +SLANG_RAW("#line 7631 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Convert radians to degrees.\n") +SLANG_RAW("/// @param x The angle in radians.\n") +SLANG_RAW("/// @return The angle in degrees.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("T degrees(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"degrees\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"degrees\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Degrees $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"degrees\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x * (T(180) / T.getPi());\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("vector degrees(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"degrees\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"degrees\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Degrees $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"degrees\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, degrees, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("matrix degrees(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"degrees\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, degrees, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute matrix determinant.\n") +SLANG_RAW("/// @param m The matrix.\n") +SLANG_RAW("/// @return The determinant of the matrix.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[PreferCheckpoint]\n") +SLANG_RAW("[require(glsl_hlsl_metal_spirv_wgsl)]\n") +SLANG_RAW("T determinant(matrix m)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"determinant\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"determinant\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"determinant\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Determinant $m\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"determinant\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Barrier for device memory.\n") +SLANG_RAW("/// @category barrier\n") +SLANG_RAW("__glsl_extension(GL_KHR_memory_scope_semantics)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv_wgsl, memorybarrier)]\n") +SLANG_RAW("void DeviceMemoryBarrier()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"DeviceMemoryBarrier\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"memoryBarrier(gl_ScopeDevice, (gl_StorageSemanticsImage|gl_StorageSemanticsBuffer), gl_SemanticsAcquireRelease)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__threadfence()\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"threadgroup_barrier(mem_flags::mem_device | mem_flags::mem_threadgroup | mem_flags::mem_texture | mem_flags::mem_threadgroup_imageblock)\";\n") +SLANG_RAW(" case spirv: spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpMemoryBarrier Device AcquireRelease|UniformMemory|ImageMemory;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"storageBarrier(); textureBarrier(); workgroupBarrier();\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Barrier for device memory with group synchronization.\n") +SLANG_RAW("/// @category barrier\n") +SLANG_RAW("__glsl_extension(GL_KHR_memory_scope_semantics)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv_wgsl, memorybarrier)]\n") +SLANG_RAW("void DeviceMemoryBarrierWithGroupSync()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"DeviceMemoryBarrierWithGroupSync\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"controlBarrier(gl_ScopeWorkgroup, gl_ScopeDevice, (gl_StorageSemanticsImage|gl_StorageSemanticsBuffer), gl_SemanticsAcquireRelease)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__syncthreads()\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"threadgroup_barrier(mem_flags::mem_device | mem_flags::mem_threadgroup | mem_flags::mem_texture | mem_flags::mem_threadgroup_imageblock)\";\n") +SLANG_RAW(" case spirv: spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpControlBarrier Workgroup Device AcquireRelease|UniformMemory|ImageMemory;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"storageBarrier(); textureBarrier(); workgroupBarrier();\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Vector distance. Returns the distance between two points.\n") +SLANG_RAW("/// @param x The first point.\n") +SLANG_RAW("/// @param y The second point.\n") +SLANG_RAW("/// @return The distance between `x` and `y`.\n") +SLANG_RAW("/// @remarks This function is equivalent to `length(x - y)`. When `x` and `y` are scalars, this function is equivalent to `abs(x - y)`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T distance(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"distance\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"distance\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"distance\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Distance $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"distance\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return length(x - y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T distance(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"distance\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Distance $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"distance\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return length(x - y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Computes `max(0, x-y)`.\n") +SLANG_RAW("/// @param x The first value.\n") +SLANG_RAW("/// @param y The second value.\n") +SLANG_RAW("/// @return The result of `max(0, x-y)`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T fdim(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fdim\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return max(T(0), x - y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector fdim(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fdim\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return max(T(0), x - y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Divide values.\n") +SLANG_RAW("/// @param x The dividend.\n") +SLANG_RAW("/// @param y The divisor.\n") +SLANG_RAW("/// @return The result of dividing `x` by `y`, element-wise for vector types.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("T divide(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"divide\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x / y;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv)]\n") +SLANG_RAW("vector divide(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"divide\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x / y;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Vector dot product. Returns the dot product of two vectors.\n") +SLANG_RAW("/// @param x The first vector.\n") +SLANG_RAW("/// @param y The second vector.\n") +SLANG_RAW("/// @return The dot product of `x` and `y`.\n") +SLANG_RAW("/// @remarks When `x` and `y` are scalars, this function is equivalent to `x * y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T dot(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x * y;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T dot(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpDot $$T result $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" T result = T(0);\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" result += x[i] * y[i];\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T dot(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" T result = T(0);\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" result += x[i] * y[i];\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Helper for computing distance terms for lighting (obsolete).\n") +SLANG_RAW("/// Use the subtraction operator '-' instead.\n") +SLANG_RAW("/// @deprecated\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic vector dst(vector x, vector y);\n") +SLANG_RAW("\n") +SLANG_RAW("// Given a RWByteAddressBuffer allow it to be interpreted as a RWStructuredBuffer\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetEquivalentStructuredBuffer +) +SLANG_RAW(")\n") +SLANG_RAW("RWStructuredBuffer __getEquivalentStructuredBuffer(RWByteAddressBuffer b);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetEquivalentStructuredBuffer +) +SLANG_RAW(")\n") +SLANG_RAW("StructuredBuffer __getEquivalentStructuredBuffer(ByteAddressBuffer b);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetEquivalentStructuredBuffer +) +SLANG_RAW(")\n") +SLANG_RAW("RasterizerOrderedStructuredBuffer __getEquivalentStructuredBuffer(RasterizerOrderedByteAddressBuffer b);\n") +SLANG_RAW("\n") +SLANG_RAW("// Error message\n") +SLANG_RAW("\n") +SLANG_RAW("// void errorf( string format, ... );\n") +SLANG_RAW("\n") +SLANG_RAW("// Attribute evaluation\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO: The matrix cases of these functions won't actuall work\n") +SLANG_RAW("// when compiled to GLSL, since they only support scalar/vector\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO: Should these be constrains to `__BuiltinFloatingPointType`?\n") +SLANG_RAW("// TODO: SPIRV-direct does not support non-floating-point types.\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("T EvaluateAttributeAtCentroid(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtCentroid\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 InterpolateAtCentroid $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("vector EvaluateAttributeAtCentroid(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtCentroid\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 InterpolateAtCentroid $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("matrix EvaluateAttributeAtCentroid(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtCentroid\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, EvaluateAttributeAtCentroid, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("T EvaluateAttributeAtSample(T x, uint sampleindex)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtSample($0, int($1))\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 InterpolateAtSample $x $sampleindex\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("vector EvaluateAttributeAtSample(vector x, uint sampleindex)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtSample($0, int($1))\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 InterpolateAtSample $x $sampleindex\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("matrix EvaluateAttributeAtSample(matrix x, uint sampleindex)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtSample($0, int($1))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result[i] = EvaluateAttributeAtSample(x[i], sampleindex);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("T EvaluateAttributeSnapped(T x, int2 offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtOffset($0, vec2($1) / 16.0f)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" const float2 tmp = float2(16.f, 16.f);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %foffset:$$float2 = OpConvertSToF $offset;\n") +SLANG_RAW(" %offsetdiv16:$$float2 = OpFDiv %foffset $tmp;\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 InterpolateAtOffset $x %offsetdiv16\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("vector EvaluateAttributeSnapped(vector x, int2 offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtOffset($0, vec2($1) / 16.0f)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" const float2 tmp = float2(16.f, 16.f);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %foffset:$$float2 = OpConvertSToF $offset;\n") +SLANG_RAW(" %offsetdiv16:$$float2 = OpFDiv %foffset $tmp;\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 InterpolateAtOffset $x %offsetdiv16\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("matrix EvaluateAttributeSnapped(matrix x, int2 offset)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"interpolateAtOffset($0, vec2($1) / 16.0f)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for(int i = 0; i < N; ++i)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result[i] = EvaluateAttributeSnapped(x[i], offset);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Computes base-e exponent.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The base-e exponent of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T exp(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_exp($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_exp($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Exp $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector exp(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Exp $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, exp, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix exp(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"exp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, exp, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Computes base-2 exponent.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The base-2 exponent of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T exp2(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"exp2($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isHalf())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm { OpExtInst $$T result glsl450 Exp2 $x };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" float xf = __realCast(x);\n") +SLANG_RAW(" return T(spirv_asm {\n") +SLANG_RAW(" result:$$float = OpExtInst glsl450 Exp2 $xf\n") +SLANG_RAW(" });\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"exp2($0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"exp2\";\n") +SLANG_RAW(" case cpp: \n") +SLANG_RAW(" __intrinsic_asm \"$P_exp2($0)\";\n") +SLANG_RAW(" case cuda: \n") +SLANG_RAW(" __intrinsic_asm \"$P_exp2($0)\";\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"exp2\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" \n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector exp2(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"exp2($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"exp2\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"exp2\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Exp2 $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"exp2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, exp2, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix exp2(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"exp2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, exp2, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Computes base-10 exponent.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The base-10 exponent of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T exp10(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"exp10\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" const T ln10 = T(2.302585092994045901); // ln(10)\n") +SLANG_RAW(" return exp(x * ln10);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector exp10(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"exp10\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" const T ln10 = T(2.30258509299); // ln(10)\n") +SLANG_RAW(" return exp(x * ln10);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix exp10(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, exp10, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Convert 16-bit float stored in low bits of integer\n") +SLANG_RAW("/// @category conversion Conversion functions\n") +SLANG_RAW("__glsl_version(420)\n") +SLANG_RAW("__cuda_sm_version(6.0)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("float f16tof32(uint value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"unpackHalf2x16($0).x\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"f16tof32($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__half2float(__ushort_as_half($0))\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"f16tof32($0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type((ushort)($0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %lowBits = OpUConvert $$uint16_t $value;\n") +SLANG_RAW(" %half = OpBitcast $$half %lowBits;\n") +SLANG_RAW(" result:$$float = OpFConvert %half\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"unpack2x16float($0).x\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector f16tof32(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"f16tof32\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %lowBits = OpUConvert $$vector $value;\n") +SLANG_RAW(" %half = OpBitcast $$vector %lowBits;\n") +SLANG_RAW(" result:$$vector = OpFConvert %half\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(float, N, f16tof32, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Convert to 16-bit float stored in low bits of integer.\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("__glsl_version(420)\n") +SLANG_RAW("__cuda_sm_version(6.0)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("uint f32tof16(float value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"packHalf2x16(vec2($0,0.0))\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"f32tof16($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__half_as_ushort(__float2half($0))\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"f32tof16($0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"as_type((half)($0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %half = OpFConvert $$half $value;\n") +SLANG_RAW(" %lowBits = OpBitcast $$uint16_t %half;\n") +SLANG_RAW(" result:$$uint = OpUConvert %lowBits\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"pack2x16float(vec2f($0,0.0))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector f32tof16(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"f32tof16\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %half = OpFConvert $$vector $value;\n") +SLANG_RAW(" %lowBits = OpBitcast $$vector %half;\n") +SLANG_RAW(" result:$$vector = OpUConvert %lowBits\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint, N, f32tof16, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n") +SLANG_RAW("// The following is Slang specific and NOT part of standard HLSL\n") +SLANG_RAW("// It's not clear what happens with float16 time in HLSL -> can the float16 coerce to uint for example? If so that would\n") +SLANG_RAW("// give the wrong result\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_version(420)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("float f16tof32(float16_t value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"unpackHalf2x16($0).x\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"f16tof32($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__half2float($0)\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"f16tof32($0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"float($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$float = OpFConvert $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"f32($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector f16tof32(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__half2float\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"f16tof32\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"$TR($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpFConvert $$vector result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(float, N, f16tof32, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Convert to float16_t.\n") +SLANG_RAW("/// @category conversion\n") +SLANG_RAW("__glsl_version(420)\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_glsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("float16_t f32tof16_(float value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__float2half\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"packHalf2x16(vec2($0,0.0))\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"half($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpFConvert $$float16_t result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"f16($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cuda_glsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector f32tof16_(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__float2half\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"$TR($0)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpFConvert $$vector result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(float16_t, N, f32tof16_, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n") +SLANG_RAW("\n") +SLANG_RAW("/// Flip vector to face forward, if needed.\n") +SLANG_RAW("/// @param n The vector to orient.\n") +SLANG_RAW("/// @param i The incident vector.\n") +SLANG_RAW("/// @param ng The geometric normal vector.\n") +SLANG_RAW("/// @return `n` if the dot product of `ng` and `i` is less than 0, otherwise `-n`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector faceforward(vector n, vector i, vector ng)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"faceforward\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"faceforward\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"faceforward\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 FaceForward $n $i $ng\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"faceForward\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return dot(ng, i) < T(0.0f) ? n : -n;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Find first set bit starting at high bit and working down.\n") +SLANG_RAW("/// @param value The value to find set bits in.\n") +SLANG_RAW("/// @return The bit index number of the most significant bit,\n") +SLANG_RAW("/// or returns -1 if `value` is either 0 if `value is\n") +SLANG_RAW("/// a signed type and equal to -1.\n") +SLANG_RAW("/// @remarks If `value` is unsigned, or signed with positive value, the bit index returned is the highest 1-bit.\n") +SLANG_RAW("/// If `value` is signed with negative value, the bit index returned is the highest 0-bit.\n") +SLANG_RAW("/// For SPIR-V, this function maps to GLSL extended instruction `FindSMsb` if `value` is signed,\n") +SLANG_RAW("/// or `FindUMsb` if `value` is unsigned.\n") +SLANG_RAW("/// @category bitops Bit operation functions\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("int firstbithigh(int value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_firstbithigh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_firstbithigh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findMSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbithigh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$int result glsl450 FindSMsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstLeadingBit\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector firstbithigh(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findMSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbithigh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 FindSMsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstLeadingBit\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(int, N, firstbithigh, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("uint firstbithigh(uint value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_firstbithigh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_firstbithigh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findMSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbithigh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$uint result glsl450 FindUMsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstLeadingBit\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector firstbithigh(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findMSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbithigh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"clz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 FindUMsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstLeadingBit\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint, N, firstbithigh, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Find first set bit starting at low bit and working up.\n") +SLANG_RAW("/// @param value The value to find set bits in.\n") +SLANG_RAW("/// @return The bit index number of the least significant set bit,\n") +SLANG_RAW("/// or all ones (-1 when interpretted as signed) if `value` is 0.\n") +SLANG_RAW("/// @remarks For SPIR-V, this function maps to GLSL extended instruction `FindILsb`.\n") +SLANG_RAW("/// @category bitops\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("int firstbitlow(int value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_firstbitlow($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_firstbitlow($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findLSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbitlow\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ctz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$int result glsl450 FindILsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstTrailingBit\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector firstbitlow(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findLSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbitlow\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ctz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 FindILsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstTrailingBit\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(int, N, firstbitlow, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("uint firstbitlow(uint value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_firstbitlow($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_firstbitlow($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findLSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbitlow\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ctz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$uint result glsl450 FindILsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstTrailingBit\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector firstbitlow(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"findLSB\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"firstbitlow\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ctz\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 FindILsb $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"firstTrailingBit\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint, N, firstbitlow, value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Floor. Returns the largest integer value not greater than `x`.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The largest integer value not greater than `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T floor(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_floor($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_floor($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Floor $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector floor(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Floor $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, floor, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix floor(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"floor\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, floor, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Fused multiply-add.\n") +SLANG_RAW("/// @param a The first value to multiply.\n") +SLANG_RAW("/// @param b The second value to multiply.\n") +SLANG_RAW("/// @param c The value to add to the product of `a` and `b`.\n") +SLANG_RAW("/// @return The result of `a * b + c`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("T fma(T a, T b, T c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_fma($0, $1, $2)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_fma($0, $1, $2)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" if (__isFloat() || __isHalf())\n") +SLANG_RAW(" return mad(a, b, c);\n") +SLANG_RAW(" else\n") +SLANG_RAW(" __intrinsic_asm \"fma($0, $1, $2)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Fma $a $b $c\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return a*b + c;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector fma(vector a, vector b, vector c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Fma $a $b $c\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_TRINARY(T, N, fma, a, b, c);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("matrix fma(matrix a, matrix b, matrix c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_TRINARY(T, N, M, fma, a, b, c);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Floating point remainder of x/y.\n") +SLANG_RAW("/// The floating-point remainder is calculated such that x = i * y + f,\n") +SLANG_RAW("/// where i is an integer, f has the same sign as x, and the absolute value\n") +SLANG_RAW("/// of f is less than the absolute value of y.\n") +SLANG_RAW("/// @param x The dividend.\n") +SLANG_RAW("/// @param y The divisor.\n") +SLANG_RAW("/// @return The floating-point remainder of x/y.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T fmod(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" // In HLSL, `fmod` returns a remainder.\n") +SLANG_RAW(" // Definition of `fmod` in HLSL is,\n") +SLANG_RAW(" // \"The floating-point remainder is calculated such that x = i * y + f,\n") +SLANG_RAW(" // where i is an integer, f has the same sign as x, and the absolute value\n") +SLANG_RAW(" // of f is less than the absolute value of y.\"\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // In GLSL, `mod` is a Modulus function.\n") +SLANG_RAW(" // OpenGL document defines \"Modulus\" as \"Returns x - y * floor(x / y)\".\n") +SLANG_RAW(" // The use of \"Floor()\" makes the difference.\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // In Metal, `fmod` is Modulus function.\n") +SLANG_RAW(" // Metal document defines it as \"Returns x - y * trunc(x/y)\".\n") +SLANG_RAW(" // Note that the function name is same to HLSL but it behaves differently.\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // The tricky ones are when x or y is a negative value.\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // | Remainder | Modulus\n") +SLANG_RAW(" // x y | x= i*y +f | x-y*floor(x/y)\n") +SLANG_RAW(" // ------+-----------+------------------------------\n") +SLANG_RAW(" // 4 3 | 4= 1*3 +1 | 4-3*floor( 4/3) = 4-3* 1 = 1\n") +SLANG_RAW(" // 3 3 | 3= 1*3 +0 | 3-3*floor( 3/3) = 3-3* 1 = 0\n") +SLANG_RAW(" // 2 3 | 2= 0*3 +2 | 2-3*floor( 2/3) = 2-3* 0 = 2\n") +SLANG_RAW(" // 1 3 | 1= 0*3 +1 | 1-3*floor( 1/3) = 1-3* 0 = 1\n") +SLANG_RAW(" // 0 3 | 0= 0*3 +0 | 0-3*floor( 0/3) = 0-3* 0 = 0\n") +SLANG_RAW(" // -1 3 |-1= 0*3 -1 |-1-3*floor(-1/3) =-1-3*-1 = 2\n") +SLANG_RAW(" // -2 3 |-2= 0*3 -2 |-2-3*floor(-2/3) =-2-3*-1 = 1\n") +SLANG_RAW(" // -3 3 |-3=-1*3 0 |-3-3*floor(-3/3) =-3-3*-1 = 0\n") +SLANG_RAW(" // -4 3 |-4=-1*3 -1 |-4-3*floor(-4/3) =-4-3*-2 = 2\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // When y is a negative value,\n") +SLANG_RAW(" //\n") +SLANG_RAW(" // | Remainder | Modulus\n") +SLANG_RAW(" // x y | x= i*y +f | x-y*floor(x/y)\n") +SLANG_RAW(" // ------+-----------+------------------------------\n") +SLANG_RAW(" // 4 -3 | 4=-1*-3+1 | 4+3*floor( 4/-3) = 4+3*-2 =-2\n") +SLANG_RAW(" // 3 -3 | 3=-1*-3+0 | 3+3*floor( 3/-3) = 3+3*-1 = 0\n") +SLANG_RAW(" // 2 -3 | 2= 0*-3+2 | 2+3*floor( 2/-3) = 2+3*-1 =-1\n") +SLANG_RAW(" // 1 -3 | 1= 0*-3+1 | 1+3*floor( 1/-3) = 1+3*-1 =-2\n") +SLANG_RAW(" // 0 -3 | 0= 0*-3+0 | 0+3*floor( 0/-3) = 0+3* 0 = 0\n") +SLANG_RAW(" // -1 -3 |-1= 0*-3-1 |-1+3*floor(-1/-3) =-1+3* 0 =-1\n") +SLANG_RAW(" // -2 -3 |-2= 0*-3-2 |-2+3*floor(-2/-3) =-2+3* 0 =-2\n") +SLANG_RAW(" // -3 -3 |-3= 1*-3 0 |-3+3*floor(-3/-3) =-3+3* 1 = 0\n") +SLANG_RAW(" // -4 -3 |-4= 1*-3-1 |-4+3*floor(-4/-3) =-4+3* 1 =-1\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_fmod($0, $1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_fmod($0, $1)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" // GLSL doesn't have a function for remainder.\n") +SLANG_RAW(" __intrinsic_asm \"(($0 < 0.0) ? -mod(-$0,abs($1)) : mod($0,abs($1)))\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"fmod\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" // Metal doesn't have a function for remainder.\n") +SLANG_RAW(" __intrinsic_asm \"(($0 < 0.0) ? -fmod(-$0,abs($1)) : fmod($0,abs($1)))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" // OpFRem return \"The floating-point remainder whose sign\n") +SLANG_RAW(" // matches the sign of Operand 1\", where Operand 1 is \"x\".\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$T = OpFRem $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"(($0) % ($1))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector fmod(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"fmod\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpFRem $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, fmod, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix fmod(matrix x, matrix y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"fmod\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, fmod, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Extract the fractional part of a floating-point number.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The fractional part of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T frac(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_frac($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_frac($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fract\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"frac\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fract\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Fract $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"fract\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector frac(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fract\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"frac\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fract\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Fract $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"fract\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, frac, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("matrix frac(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, frac, x);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Extract the fractional part of a floating-point number.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The fractional part of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T fract(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return frac(x);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector fract(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return frac(x);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Split float into mantissa and exponent.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @param[out] exp The output exponent.\n") +SLANG_RAW("/// @return The mantissa of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T frexp(T x, out int exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_frexp($0, $1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_frexp($0, $1)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"frexp\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"frexp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"frexp($0, *($1))\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 Frexp $x &exp\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" T fract;\n") +SLANG_RAW(" __wgsl_frexp(x, fract, exp);\n") +SLANG_RAW(" return fract;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(wgsl)]\n") +SLANG_RAW("void __wgsl_frexp(T x, out T fract, out int exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_asm \"{ var s = frexp($0); ($1) = s.fract; ($2) = s.exp; }\";\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector frexp(vector x, out vector exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"frexp\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"frexp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"frexp($0, *($1))\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 Frexp $x &exp\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" vector fract;\n") +SLANG_RAW(" __wgsl_frexp(x, fract, exp);\n") +SLANG_RAW(" return fract;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, frexp, x, exp);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(wgsl)]\n") +SLANG_RAW("void __wgsl_frexp(vector x, out vector fract, out vector exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_asm \"{ var s = frexp($0); ($1) = s.fract; ($2) = s.exp; }\";\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix frexp(matrix x, out matrix exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"frexp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, frexp, x, exp);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Texture filter width.\n") +SLANG_RAW("/// Calculates the sum abs(ddx(`p`)) + abs(ddy(`p`)).\n") +SLANG_RAW("/// @param p The value to sum x and y partial derivative magnitudes for.\n") +SLANG_RAW("/// @return The sum of abs(ddx(`p`)) and abs(ddy(`p`)).\n") +SLANG_RAW("/// @remarks For SPIR-V, this function maps to `OpFwidth`.\n") +SLANG_RAW("/// @category derivative\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_metal_spirv_wgsl, fragmentprocessing)]\n") +SLANG_RAW("T fwidth(T p)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpFwidth $$T result $p;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv_wgsl, fragmentprocessing)]\n") +SLANG_RAW("vector fwidth(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __requireComputeDerivative();\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpFwidth $$vector result $x;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, fragmentprocessing)]\n") +SLANG_RAW("matrix fwidth(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"fwidth($0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, fwidth, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetPerVertexInputArray +) +SLANG_RAW(")\n") +SLANG_RAW("Array __GetPerVertexInputArray(T attribute);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Get the value of a vertex attribute at a specific vertex.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// The `GetAttributeAtVertex()` function can be used in a fragment shader\n") +SLANG_RAW("/// to get the value of the given `attribute` at the vertex of the primitive\n") +SLANG_RAW("/// that corresponds to the given `vertexIndex`.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Note that the `attribute` must have been a declared varying input to\n") +SLANG_RAW("/// the fragment shader with the `nointerpolation` modifier.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// This function can be applied to scalars, vectors, and matrices of\n") +SLANG_RAW("/// built-in scalar types.\n") +SLANG_RAW("///\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("__glsl_version(450)\n") +SLANG_RAW("__glsl_extension(GL_EXT_fragment_shader_barycentric)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, getattributeatvertex)]\n") +SLANG_RAW("[KnownBuiltin(\"GetAttributeAtVertex\")]\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("T GetAttributeAtVertex(T attribute, uint vertexIndex)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"GetAttributeAtVertex\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return __GetPerVertexInputArray(attribute)[vertexIndex];\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Get the value of a vertex attribute at a specific vertex.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// The `GetAttributeAtVertex()` function can be used in a fragment shader\n") +SLANG_RAW("/// to get the value of the given `attribute` at the vertex of the primitive\n") +SLANG_RAW("/// that corresponds to the given `vertexIndex`.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Note that the `attribute` must have been a declared varying input to\n") +SLANG_RAW("/// the fragment shader with the `nointerpolation` modifier.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// This function can be applied to scalars, vectors, and matrices of\n") +SLANG_RAW("/// built-in scalar types.\n") +SLANG_RAW("///\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("__glsl_version(450)\n") +SLANG_RAW("__glsl_extension(GL_EXT_fragment_shader_barycentric)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, getattributeatvertex)]\n") +SLANG_RAW("vector GetAttributeAtVertex(vector attribute, uint vertexIndex)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"GetAttributeAtVertex\";\n") +SLANG_RAW(" case glsl: \n") +SLANG_RAW(" __intrinsic_asm \"$0[$1]\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %_ptr_Input_vectorT = OpTypePointer Input $$vector;\n") +SLANG_RAW(" %addr = OpAccessChain %_ptr_Input_vectorT $attribute $vertexIndex;\n") +SLANG_RAW(" result:$$vector = OpLoad %addr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Get the value of a vertex attribute at a specific vertex.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// The `GetAttributeAtVertex()` function can be used in a fragment shader\n") +SLANG_RAW("/// to get the value of the given `attribute` at the vertex of the primitive\n") +SLANG_RAW("/// that corresponds to the given `vertexIndex`.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Note that the `attribute` must have been a declared varying input to\n") +SLANG_RAW("/// the fragment shader with the `nointerpolation` modifier.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// This function can be applied to scalars, vectors, and matrices of\n") +SLANG_RAW("/// built-in scalar types.\n") +SLANG_RAW("///\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("__glsl_version(450)\n") +SLANG_RAW("__glsl_extension(GL_EXT_fragment_shader_barycentric)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, getattributeatvertex)]\n") +SLANG_RAW("matrix GetAttributeAtVertex(matrix attribute, uint vertexIndex)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"GetAttributeAtVertex\";\n") +SLANG_RAW(" case glsl: \n") +SLANG_RAW(" __intrinsic_asm \"$0[$1]\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %_ptr_Input_matrixT = OpTypePointer Input $$matrix;\n") +SLANG_RAW(" %addr = OpAccessChain %_ptr_Input_matrixT $attribute $vertexIndex;\n") +SLANG_RAW(" result:$$matrix = OpLoad %addr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Get number of samples in render target\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(hlsl, sm_4_0)]\n") +SLANG_RAW("[require(metal)]\n") +SLANG_RAW("uint GetRenderTargetSampleCount()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"GetRenderTargetSampleCount\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"get_num_samples\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Get position of given sample\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(hlsl, sm_4_0)]\n") +SLANG_RAW("[require(metal)]\n") +SLANG_RAW("float2 GetRenderTargetSamplePosition(int Index)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"GetRenderTargetSamplePosition\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"get_sample_position\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Group memory barrier. Ensures that all memory accesses in the group are visible to all threads in the group.\n") +SLANG_RAW("/// @category barrier\n") +SLANG_RAW("__glsl_extension(GL_KHR_memory_scope_semantics)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv_wgsl, memorybarrier)]\n") +SLANG_RAW("void GroupMemoryBarrier()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"memoryBarrier(gl_ScopeWorkgroup, gl_StorageSemanticsShared, gl_SemanticsAcquireRelease)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"GroupMemoryBarrier\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__threadfence_block\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"threadgroup_barrier(mem_flags::mem_threadgroup)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpMemoryBarrier Workgroup AcquireRelease|WorkgroupMemory\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"workgroupBarrier\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv, memorybarrier)]\n") +SLANG_RAW("void __subgroupBarrier()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupBarrier\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"GroupMemoryBarrierWithGroupSync\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__syncthreads()\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"threadgroup_barrier(mem_flags::mem_threadgroup)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpControlBarrier Subgroup Subgroup AcquireRelease|WorkgroupMemory|ImageMemory|UniformMemory\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Group memory barrier. Ensures that all memory accesses in the group are visible to all threads in the group.\n") +SLANG_RAW("/// @category barrier\n") +SLANG_RAW("__glsl_extension(GL_KHR_memory_scope_semantics)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv_wgsl, memorybarrier)]\n") +SLANG_RAW("void GroupMemoryBarrierWithGroupSync()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"controlBarrier(gl_ScopeWorkgroup, gl_ScopeWorkgroup, gl_StorageSemanticsShared, gl_SemanticsAcquireRelease)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"GroupMemoryBarrierWithGroupSync\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__syncthreads()\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"threadgroup_barrier(mem_flags::mem_threadgroup)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpControlBarrier Workgroup Workgroup AcquireRelease|WorkgroupMemory\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"workgroupBarrier\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Atomics\n") +SLANG_RAW("\n") +SLANG_RAW("// Accepts an ImageSubscript\n") +SLANG_RAW("// Gets Texture used with ImageSubscript.\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ExtractTextureFromTextureAccess +) +SLANG_RAW(")\n") +SLANG_RAW("TextureAccess* __extractTextureFromTextureAccess(__ref TextureAccess x);\n") +SLANG_RAW("\n") +SLANG_RAW("// Accepts an ImageSubscript\n") +SLANG_RAW("// Gets Coord from ImageSubscript. Swizzles out ArrayCoord if applicable\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ExtractCoordFromTextureAccess +) +SLANG_RAW(")\n") +SLANG_RAW("uint __extractCoordFromTextureAccess(__ref TextureAccess x);\n") +SLANG_RAW("\n") +SLANG_RAW("// Accepts an ImageSubscript\n") +SLANG_RAW("// Gets ArrayCoord from ImageSubscript\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ExtractArrayCoordFromTextureAccess +) +SLANG_RAW(")\n") +SLANG_RAW("uint __extractArrayCoordFromTextureAccess(__ref TextureAccess x);\n") +SLANG_RAW("\n") + +// Generates code for: +// InterlockedAdd, InterlockedAnd, InterlockedOr, InterlockedXor, +// InterlockedMax, InterlockedMin, InterlockedExchange +struct SlangAtomicOperationInfo +{ + const char* slangCallSuffix; + const char* internalCallSuffix; + const char* interface; +}; + +SlangAtomicOperationInfo slangAtomicOperationInfo[7] = { + { "Add", "add", "IArithmeticAtomicable" }, + { "And", "and", "IArithmeticAtomicable" }, + { "Or", "or", "IArithmeticAtomicable" }, + { "Xor", "xor", "IArithmeticAtomicable" }, + { "Max", "max", "IArithmeticAtomicable" }, + { "Min", "min", "IArithmeticAtomicable" }, + { "Exchange", "exchange", "IAtomicable" }, +}; + +for (SlangAtomicOperationInfo atomicOp : slangAtomicOperationInfo) +{ +SLANG_RAW("#line 9308 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Perform an atomic ") +SLANG_SPLICE(atomicOp.internalCallSuffix +) +SLANG_RAW(" operation on `dest`.\n") +SLANG_RAW("/// @param T The type of the value to perform the atomic operation on.\n") +SLANG_RAW("/// @param dest The value to perform the atomic operation on.\n") +SLANG_RAW("/// @param value The operand to the atomic operation.\n") +SLANG_RAW("/// @param original_value The value of `dest` before the operation.\n") +SLANG_RAW("/// @remarks When targeting HLSL, it is invalid to call this function with `T` being a floating-point type, since\n") +SLANG_RAW("/// HLSL does not allow atomic operations on floating point types. For `InterlockedAdd`, consider using\n") +SLANG_RAW("/// `RWByteAddressBuffer.InterlockedAddF32` or `RWByteAddressBuffer.InterlockedAddF16` instead when NVAPI is available.\n") +SLANG_RAW("/// On SPIR-V (Vulkan), all integer and floating point types are supported.\n") +SLANG_RAW("/// On Metal and WGSL, all floating-point types are not supported.\n") +SLANG_RAW("/// @category atomic Atomic functions\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("__glsl_version(430)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal)]\n") +SLANG_RAW("void Interlocked") +SLANG_SPLICE(atomicOp.slangCallSuffix +) +SLANG_RAW("(__ref T dest, T value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __atomic_") +SLANG_SPLICE(atomicOp.internalCallSuffix +) +SLANG_RAW("(dest, value);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("__glsl_version(430)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal)]\n") +SLANG_RAW("void Interlocked") +SLANG_SPLICE(atomicOp.slangCallSuffix +) +SLANG_RAW("(__ref T dest, T value, out T original_value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" original_value = __atomic_") +SLANG_SPLICE(atomicOp.internalCallSuffix +) +SLANG_RAW("(dest, value);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("__glsl_version(430)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal)]\n") +SLANG_RAW("void Interlocked") +SLANG_SPLICE(atomicOp.slangCallSuffix +) +SLANG_RAW("(__ref uint dest, int value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __atomic_") +SLANG_SPLICE(atomicOp.internalCallSuffix +) +SLANG_RAW("(dest, (uint)value);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +} // for (SlangAtomicOperationInfo atomicOp : slangAtomicOperationInfo) +SLANG_RAW("#line 9347 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Perform an atomic compare and exchange operation on `dest`.\n") +SLANG_RAW("/// @param T The type of the value to perform the atomic operation on.\n") +SLANG_RAW("/// @param dest The value to perform the atomic operation on.\n") +SLANG_RAW("/// @param compare_value The value to compare `dest` with.\n") +SLANG_RAW("/// @param value The value to store into `dest` if the compare result is equal.\n") +SLANG_RAW("/// @param original_value The value of `dest` before the operation.\n") +SLANG_RAW("/// @remarks When targeting HLSL, a call to this function with `T` being `float` will translate to a call to\n") +SLANG_RAW("/// `InterlockedCompareExchangeFloatBitwise`, which means the comparison is done as a bitwise comparison.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On SPIR-V (Vulkan), this function maps to `OpAtomicCompareExchange`.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On Metal and WGSL, all floating-point types are not supported.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW("/// @category atomic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal)]\n") +SLANG_RAW("void InterlockedCompareExchange(__ref T dest, T compare_value, T value, out T original_value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" original_value = __atomic_compare_exchange(dest, compare_value, value);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Perform an atomic compare and exchange operation on `dest`.\n") +SLANG_RAW("/// @param T The type of the value to perform the atomic operation on.\n") +SLANG_RAW("/// @param dest The value to perform the atomic operation on.\n") +SLANG_RAW("/// @param compare_value The value to compare `dest` with.\n") +SLANG_RAW("/// @param value The value to store into `dest` if the compare result is equal.\n") +SLANG_RAW("/// @param original_value The value of `dest` before the operation.\n") +SLANG_RAW("/// @remarks When targeting HLSL, a call to this function will translate to a call to\n") +SLANG_RAW("/// `InterlockedCompareExchangeFloatBitwise`, which means the comparison is done as a bitwise comparison.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On SPIR-V (Vulkan), this function maps to `OpAtomicCompareExchange`.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On Metal and WGSL, this function is not available.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW("/// @category atomic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("void InterlockedCompareExchangeFloatBitwise(__ref float dest, float compare_value, float value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __atomic_compare_exchange(dest, compare_value, value);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("void InterlockedCompareExchangeFloatBitwise(__ref float dest, float compare_value, float value, out float original_value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" original_value = __atomic_compare_exchange(dest, compare_value, value);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Perform an atomic compare and store operation on `dest`.\n") +SLANG_RAW("/// @param T The type of the value to perform the atomic operation on.\n") +SLANG_RAW("/// @param dest The value to perform the atomic operation on.\n") +SLANG_RAW("/// @param compare_value The value to compare `dest` with.\n") +SLANG_RAW("/// @param value The value to store into `dest` if the compare result is equal.\n") +SLANG_RAW("/// @remarks When targeting HLSL, a call to this function with `T` being `float` will translate to a call to\n") +SLANG_RAW("/// `InterlockedCompareStoreFloatBitwise`, which means the comparison is done as a bitwise comparison.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On SPIR-V (Vulkan), this function maps to `OpAtomicCompareExchange`.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On Metal and WGSL, this function is not available.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW("/// @category atomic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("__glsl_version(430)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_metal_spirv, atomic_glsl_hlsl_cuda_metal)]\n") +SLANG_RAW("void InterlockedCompareStore(__ref T dest, T compare_value, T value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"InterlockedCompareStore\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __atomic_compare_exchange(dest, compare_value, value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Perform an atomic compare and store operation on `dest`.\n") +SLANG_RAW("/// @param T The type of the value to perform the atomic operation on.\n") +SLANG_RAW("/// @param dest The value to perform the atomic operation on.\n") +SLANG_RAW("/// @param compare_value The value to compare `dest` with.\n") +SLANG_RAW("/// @param value The value to store into `dest` if the compare result is equal.\n") +SLANG_RAW("/// @remarks When targeting HLSL, a call to this function will translate to a call to\n") +SLANG_RAW("/// `InterlockedCompareStoreFloatBitwise`, which means the comparison is done as a bitwise comparison.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On SPIR-V (Vulkan), this function maps to `OpAtomicCompareExchange`.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On Metal and WGSL, this function is not available.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// On CUDA, this function maps to `atomicCAS`.\n") +SLANG_RAW("/// @category atomic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("void InterlockedCompareStoreFloatBitwise(__ref T dest, T compare_value, T value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"InterlockedCompareStoreFloatBitwise\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" __atomic_compare_exchange(dest, compare_value, value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Test if a floating-point value is finite.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return `true` if `x` is finite, `false` otherwise.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("bool isfinite(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"isfinite\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"$P_isfinite($0)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"isfinite\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return !(isinf(x) || isnan(x));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector isfinite(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"isfinite\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return !(isinf(x) || isnan(x));\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"isfinite\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(bool, N, isfinite, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix isfinite(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"isfinite\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(bool, N, M, isfinite, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Test if a floating-point value is infinite.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return `true` if `x` is infinite, `false` otherwise.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("bool isinf(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"isinf\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"$P_isinf($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm { result:$$bool = OpIsInf $x};\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"($0 > 0x1.fffffep+127f) || ($0 < -0x1.fffffep+127f)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector isinf(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"isinf\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm { result:$$vector = OpIsInf $x};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(bool, N, isinf, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix isinf(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"isinf\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(bool, N, M, isinf, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Test if a floating-point value is not-a-number.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return `true` if `x` is not-a-number, `false` otherwise.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("bool isnan(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"isnan\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"$P_isnan($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm { result:$$bool = OpIsNan $x};\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0 != $0\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector isnan(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"isnan\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm { result:$$vector = OpIsNan $x};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(bool, N, isnan, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix isnan(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"isnan\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(bool, N, M, isnan, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Construct float from mantissa and exponent.\n") +SLANG_RAW("/// @param x The significand.\n") +SLANG_RAW("/// @param exp The exponent.\n") +SLANG_RAW("/// @return The floating-point number constructed from `x` and `exp`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T ldexp(T x, T exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" // In WGSL spec, ldexp can only take integer as the exponent.\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($0 * exp2($1))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x * exp2(exp);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector ldexp(vector x, vector exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" // In WGSL spec, ldexp can only take integer as the exponent.\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($0 * exp2($1))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x * exp2(exp);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix ldexp(matrix x, matrix exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, ldexp, x, exp);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T ldexp(T x, E exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Ldexp $x $exp\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return ldexp(x, __realCast(exp));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector ldexp(vector x, vector exp)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Ldexp $x $exp\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"ldexp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" vector temp;\n") +SLANG_RAW(" [ForceUnroll]\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" temp[i] = __realCast(exp[i]);\n") +SLANG_RAW(" return ldexp(x, temp);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute the length of a vector.\n") +SLANG_RAW("/// @param x The input vector.\n") +SLANG_RAW("/// @return The length of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T length(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"length\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"length\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"length\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Length $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"length\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return sqrt(dot(x, x));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T length(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"length\"; \n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Length $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"length\"; \n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return abs(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Computes linear interpolation.\n") +SLANG_RAW("/// @param x The starting value.\n") +SLANG_RAW("/// @param y The ending value.\n") +SLANG_RAW("/// @param s The interpolation factor.\n") +SLANG_RAW("/// @return Returns `x+(y-x)*s`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("T lerp(T x, T y, T s)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"mix\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"mix\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"mix\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"lerp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 FMix $x $y $s\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x + (y - x) * s;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("vector lerp(vector x, vector y, vector s)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"mix\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"mix\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"mix\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"lerp\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 FMix $x $y $s\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x + (y - x) * s;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("matrix lerp(matrix x, matrix y, matrix s)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"lerp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_TRINARY(T, N, M, lerp, x, y, s);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Legacy lighting function (obsolete).\n") +SLANG_RAW("/// @param n_dot_l The dot product of the normal and light vectors.\n") +SLANG_RAW("/// @param n_dot_h The dot product of the normal and half-angle vectors.\n") +SLANG_RAW("/// @param m The material shininess factor.\n") +SLANG_RAW("/// @return The lighting coefficients, (ambient, diffuse, specular, 1.0).\n") +SLANG_RAW("/// @remarks In HLSL, this function is implemented as an intrinsic. It is emulated for other targets.\n") +SLANG_RAW("/// @deprecated\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("float4 lit(float n_dot_l, float n_dot_h, float m)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"lit\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let ambient = 1.0f;\n") +SLANG_RAW(" let diffuse = max(n_dot_l, 0.0f);\n") +SLANG_RAW(" let specular = step(0.0f, n_dot_l) * max(pow(n_dot_h, m), 0.0f);\n") +SLANG_RAW(" return float4(ambient, diffuse, specular, 1.0f);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute base-e logarithm.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The natural logarithm of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T log(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_log($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_log($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"log\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"log\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Log $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"log\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector log(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"log\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"log\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Log $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"log\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, log, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix log(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, log, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute base-10 logarithm.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The base-10 logarithm of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T log10(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log10\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"log10\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"(log( $0 ) * $S0( 0.43429448190325182765112891891661) )\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(log( $0 ) * $S0( 0.43429448190325182765112891891661) )\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_log10($0)\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_log10($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" const T tmp = T(0.43429448190325182765112891891661);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %baseElog:$$T = OpExtInst glsl450 Log $x;\n") +SLANG_RAW(" result:$$T = OpFMul %baseElog $tmp\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector log10(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log10\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"log10\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"(log( $0 ) * $S0(0.43429448190325182765112891891661) )\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(log( $0 ) * $S0(0.43429448190325182765112891891661) )\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" const T tmp = T(0.43429448190325182765112891891661);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %baseElog:$$vector = OpExtInst glsl450 Log $x;\n") +SLANG_RAW(" result:$$vector = OpVectorTimesScalar %baseElog $tmp\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, log10, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("matrix log10(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log10\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, log10, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute base-2 logarithm.\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @return The base-2 logarithm of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T log2(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_log2($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_log2($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Log2 $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector log2(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Log2 $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, log2, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix log2(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"log2\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, log2, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Computes multiply-add.\n") +SLANG_RAW("/// @param mvalue The multiplier.\n") +SLANG_RAW("/// @param avalue The multiplicand.\n") +SLANG_RAW("/// @param bvalue The addend.\n") +SLANG_RAW("/// @return The result of `mvalue * avalue + bvalue`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("T mad(T mvalue, T avalue, T bvalue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_fma($0, $1, $2)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_fma($0, $1, $2)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mad\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Fma $mvalue $avalue $bvalue\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("vector mad(vector mvalue, vector avalue, vector bvalue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mad\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Fma $mvalue $avalue $bvalue\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_TRINARY(T, N, mad, mvalue, avalue, bvalue);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("matrix mad(matrix mvalue, matrix avalue, matrix bvalue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mad\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_TRINARY(T, N, M, mad, mvalue, avalue, bvalue);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("T mad(T mvalue, T avalue, T bvalue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_fma($0, $1, $2)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_fma($0, $1, $2)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mad\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Fma $mvalue $avalue $bvalue\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("vector mad(vector mvalue, vector avalue, vector bvalue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"fma\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mad\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Fma $mvalue $avalue $bvalue\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_TRINARY(T, N, mad, mvalue, avalue, bvalue);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shader5_sm_5_0)]\n") +SLANG_RAW("matrix mad(matrix mvalue, matrix avalue, matrix bvalue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mad\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_TRINARY(T, N, M, mad, mvalue, avalue, bvalue);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Maximum.\n") +SLANG_RAW("/// @param x The first value.\n") +SLANG_RAW("/// @param y The second value.\n") +SLANG_RAW("/// @return The maximum of `x` and `y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T max(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" // Note: a core module implementation of `max` (or `min`) will require splitting\n") +SLANG_RAW(" // floating-point and integer cases apart, because the floating-point\n") +SLANG_RAW(" // version needs to correctly handle the case where one of the inputs\n") +SLANG_RAW(" // is not-a-number.\n") +SLANG_RAW("\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_max($0, $1)\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_max($0, $1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (__isSignedInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 SMax $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 UMax $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector max(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (__isSignedInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 SMax $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 UMax $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, max, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix max(matrix x, matrix y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, max, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T max(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_max($0, $1)\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_max($0, $1)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 FMax $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector max(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 FMax $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, max, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix max(matrix x, matrix y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"max\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, max, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Maximum of 3 inputs.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @param z The third value to compare.\n") +SLANG_RAW("/// @return The largest of the three values, element-wise if vector typed.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T max3(T x, T y, T z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"max3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return max(x, max(y, z));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector max3(vector x, vector y, vector z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"max3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return max(x, max(y, z));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Floating-point maximum.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @return The larger of the two values, element-wise if vector typed.\n") +SLANG_RAW("/// @remarks Result is `y` if `x` < `y`, either `x` or `y` if both `x` and `y` are zeros, otherwise `x`. Which operand is the result is undefined if one of the operands is a NaN.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T fmax(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmax\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return max(x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector fmax(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmax\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, fmax, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Floating-point maximum of 3 inputs.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @param z The third value to compare.\n") +SLANG_RAW("/// @return The largest of the three values, element-wise if vector typed.\n") +SLANG_RAW("/// @remarks If any operand in the 3-way comparison is NaN, it is undefined which operand is returned.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T fmax3(T x, T y, T z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmax3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return max(y, max(x, z));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector fmax3(vector x, vector y, vector z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmax3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_TRINARY(T, N, fmax3, x, y, z);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Minimum.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @return The smaller of the two values, element-wise if vector typed.\n") +SLANG_RAW("/// @remarks For HLSL, GLSL, and metal targets, this is implemented with the min() intrinsic.\n") +SLANG_RAW("/// For SPIR-V, it is implemented with the UMin or SMin instruction, depending on the signedness of the type.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T min(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"min\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"$P_min($0, $1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 SMin $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 UMin $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector min(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 SMin $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 UMin $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, min, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix min(matrix x, matrix y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, min, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T min(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_min($0, $1)\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_min($0, $1)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 FMin $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector min(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 FMin $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, min, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix min(matrix x, matrix y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"min\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, min, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Minimum of 3 inputs.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @param z The third value to compare.\n") +SLANG_RAW("/// @return The smallest of the three values, element-wise if vector typed.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T min3(T x, T y, T z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"min3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(x, min(y, z));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector min3(vector x, vector y, vector z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"min3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(x, min(y, z));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Floating-point minimum.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @return The smaller of the two values, element-wise if vector typed.\n") +SLANG_RAW("/// @remarks Result is `x` if `x` < `y`, either `x` or `y` if both `x` and `y` are zeros, otherwise `y`. Which operand is the result is undefined if one of the operands is a NaN.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T fmin(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmin\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return min(x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector fmin(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmin\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, fmin, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Floating-point minimum of 3 inputs.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @param z The third value to compare.\n") +SLANG_RAW("/// @return The smallest of the three values, element-wise if vector typed.\n") +SLANG_RAW("/// @remarks If any operand in the 3-way comparison is NaN, it is undefined which operand is returned.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T fmin3(T x, T y, T z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmin3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return min(x, min(y, z));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector fmin3(vector x, vector y, vector z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmin3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_TRINARY(T, N, fmin3, x, y, z);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Median of 3 values.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @param z The third value to compare.\n") +SLANG_RAW("/// @return The median of the three values, element-wise if vector typed.\n") +SLANG_RAW("/// @remarks For metal, this is implemented with the median3 intrinsic which has special handling for NaN.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T median3(T x, T y, T z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"median3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // | a | b | c | m |\n") +SLANG_RAW(" // ----------+---+---+---+---+\n") +SLANG_RAW(" // x > y > z | z | y | x | y |\n") +SLANG_RAW(" // x > z > y | y | z | x | z |\n") +SLANG_RAW(" // y > x > z | z | y | x | x |\n") +SLANG_RAW(" // y > z > x | z | y | z | z |\n") +SLANG_RAW(" // z > x > y | y | z | x | x |\n") +SLANG_RAW(" // z > y > x | y | z | y | y |\n") +SLANG_RAW("\n") +SLANG_RAW(" T a = min(y, z);\n") +SLANG_RAW(" T b = max(y, z);\n") +SLANG_RAW(" T c = max(x, a);\n") +SLANG_RAW(" T m = min(b, c);\n") +SLANG_RAW(" return m;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector median3(vector x, vector y, vector z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"median3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" vector a = min(y, z);\n") +SLANG_RAW(" vector b = max(y, z);\n") +SLANG_RAW(" vector c = max(x, a);\n") +SLANG_RAW(" vector m = min(b, c);\n") +SLANG_RAW(" return m;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Floating-point median.\n") +SLANG_RAW("/// @param x The first value to compare.\n") +SLANG_RAW("/// @param y The second value to compare.\n") +SLANG_RAW("/// @param z The third value to compare.\n") +SLANG_RAW("/// @return The median of the three values, element-wise if vector typed.\n") +SLANG_RAW("/// @remarks For metal, this is implemented with the fmedian3 intrinsic.\n") +SLANG_RAW("/// If any value is NaN, it is unspecified which operand is returned.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T fmedian3(T x, T y, T z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmedian3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return median3(x, y, z);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector fmedian3(vector x, vector y, vector z)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"fmedian3\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_TRINARY(T, N, fmedian3, x, y, z);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Split into integer and fractional parts (both with same sign).\n") +SLANG_RAW("/// @param x The input value.\n") +SLANG_RAW("/// @param[out] ip The integer part of `x`.\n") +SLANG_RAW("/// @return The fractional part of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T modf(T x, out T ip)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_modf($0, $1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_modf($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"modf\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"modf\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"modf($0, *($1))\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpExtInst glsl450 Modf $x &ip\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" T fract;\n") +SLANG_RAW(" __wgsl_modf(x, fract, ip);\n") +SLANG_RAW(" return fract;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(wgsl)]\n") +SLANG_RAW("void __wgsl_modf(T x, out T fract, out T whole)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_asm \"{ var s = modf($0); ($1) = s.fract; ($2) = s.whole; }\";\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector modf(vector x, out vector ip)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"modf\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"modf\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"modf($0, *($1))\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpExtInst glsl450 Modf $x &ip\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" vector fract;\n") +SLANG_RAW(" __wgsl_modf(x, fract, ip);\n") +SLANG_RAW(" return fract;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, modf, x, ip);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(wgsl)]\n") +SLANG_RAW("void __wgsl_modf(vector x, out vector fract, out vector whole)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_asm \"{ var s = modf($0); ($1) = s.fract; ($2) = s.whole; }\";\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix modf(matrix x, out matrix ip)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"modf\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, modf, x, ip);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Masked sum of absolute differences of byte alignments.\n") +SLANG_RAW("/// This function computes the absolute differences of the byte alignments of the reference and source values, and adds them to the accumulated differences.\n") +SLANG_RAW("/// @param reference The reference 4 bytes packed in a uint.\n") +SLANG_RAW("/// @param source The source 2 uints packed in a uint2.\n") +SLANG_RAW("/// @param accum The accumulated differences.\n") +SLANG_RAW("/// @return The updated accumulated differences.\n") +SLANG_RAW("/// @remarks In HLSL, this is implemented with the msad4 intrinsic.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, sm_4_0_version)]\n") +SLANG_RAW("uint4 msad4(uint reference, uint2 source, uint4 accum)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"msad4\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" int4 bytesRef = (reference >> uint4(24, 16, 8, 0)) & 0xFF;\n") +SLANG_RAW(" int4 bytesX = (source.x >> uint4(24, 16, 8, 0)) & 0xFF;\n") +SLANG_RAW(" int4 bytesY = (source.y >> uint4(24, 16, 8, 0)) & 0xFF;\n") +SLANG_RAW(" \n") +SLANG_RAW(" uint4 mask = select(bytesRef == 0, 0, 0xFFFFFFFFu);\n") +SLANG_RAW(" \n") +SLANG_RAW(" uint4 result = accum;\n") +SLANG_RAW(" result += mask.x & abs(bytesRef - int4(bytesX.x, bytesY.y, bytesY.z, bytesY.w));\n") +SLANG_RAW(" result += mask.y & abs(bytesRef - int4(bytesX.x, bytesX.y, bytesY.z, bytesY.w));\n") +SLANG_RAW(" result += mask.z & abs(bytesRef - int4(bytesX.x, bytesX.y, bytesX.z, bytesY.w));\n") +SLANG_RAW(" result += mask.w & abs(bytesRef - int4(bytesX.x, bytesX.y, bytesX.z, bytesX.w));\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// General inner products\n") +SLANG_RAW("\n") +SLANG_RAW("// scalar-scalar\n") +SLANG_RAW("/// Multiply.\n") +SLANG_RAW("/// @param x The first value.\n") +SLANG_RAW("/// @param y The second value.\n") +SLANG_RAW("/// @return The inner product of `x` and `y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_Mul +) +SLANG_RAW(")\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T mul(T x, T y);\n") +SLANG_RAW("\n") +SLANG_RAW("// scalar-vector and vector-scalar\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_Mul +) +SLANG_RAW(")\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector mul(vector x, T y);\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_Mul +) +SLANG_RAW(")\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector mul(T x, vector y);\n") +SLANG_RAW("\n") +SLANG_RAW("// scalar-matrix and matrix-scalar\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_Mul +) +SLANG_RAW(")\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("matrix mul(matrix x, T y);\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_Mul +) +SLANG_RAW(")\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("matrix mul(T x, matrix y);\n") +SLANG_RAW("\n") +SLANG_RAW("// vector-vector (dot product)\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T mul(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"dot\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return dot(x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T mul(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return dot(x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// vector-matrix\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector mul(vector left, matrix right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpMatrixTimesVector $$vector result $right $left\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" vector result;\n") +SLANG_RAW(" for( int j = 0; j < M; ++j )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum += left[i] * right[i][j];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[j] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector mul(vector left, matrix right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" vector result;\n") +SLANG_RAW(" for( int j = 0; j < M; ++j )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum += left[i] * right[i][j];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[j] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector mul(vector left, matrix right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" vector result;\n") +SLANG_RAW(" for( int j = 0; j < M; ++j )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum |= left[i] & right[i][j];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[j] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// matrix-vector\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector mul(matrix left, vector right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpVectorTimesMatrix $$vector result $right $left\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" vector result;\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int j = 0; j < M; ++j )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum += left[i][j] * right[j];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[i] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector mul(matrix left, vector right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" vector result;\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int j = 0; j < M; ++j )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum += left[i][j] * right[j];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[i] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[OverloadRank(-1)]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector mul(matrix left, vector right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" vector result;\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int j = 0; j < M; ++j )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum |= left[i][j] & right[j];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[i] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// matrix-matrix\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix mul(matrix left, matrix right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpMatrixTimesMatrix $$matrix result $right $left\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for( int r = 0; r < R; ++r)\n") +SLANG_RAW(" for( int c = 0; c < C; ++c)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum += left[r][i] * right[i][c];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[r][c] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix mul(matrix left, matrix right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for( int r = 0; r < R; ++r)\n") +SLANG_RAW(" for( int c = 0; c < C; ++c)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum += left[r][i] * right[i][c];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[r][c] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix mul(matrix left, matrix right)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"mul\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"($1 * $0)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for( int r = 0; r < R; ++r)\n") +SLANG_RAW(" for( int c = 0; c < C; ++c)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T sum = T(0);\n") +SLANG_RAW(" for( int i = 0; i < N; ++i )\n") +SLANG_RAW(" {\n") +SLANG_RAW(" sum |= left[r][i] & right[i][c];\n") +SLANG_RAW(" }\n") +SLANG_RAW(" result[r][c] = sum;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// next-after: next representable floating-point value\n") +SLANG_RAW("// after x in the direction of y\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, shader5_sm_4_0)]\n") +SLANG_RAW("T nextafter(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"nextafter\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" if (isnan(x)) return x;\n") +SLANG_RAW(" if (isnan(y)) return y;\n") +SLANG_RAW(" if (x == y) return y;\n") +SLANG_RAW(" if (T is half)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T delta = __realCast(bit_cast(uint16_t(1)));\n") +SLANG_RAW(" return x + ((x < y) ? delta : -delta);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" if (T is float)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T delta = __realCast(bit_cast(uint32_t(1)));\n") +SLANG_RAW(" return x + ((x < y) ? delta : -delta);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" T delta = __realCast(bit_cast(uint64_t(1)));\n") +SLANG_RAW(" return x + ((x < y) ? delta : -delta);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, shader5_sm_4_0)]\n") +SLANG_RAW("vector nextafter(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"nextafter\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, nextafter, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Generate a random number (unsupported).\n") +SLANG_RAW("/// @param x The seed value.\n") +SLANG_RAW("/// @remarks This function is not supported in that it always returns 0.\n") +SLANG_RAW("/// @deprecated\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[deprecated(\"Always returns 0\")]\n") +SLANG_RAW("float noise(float x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return 0;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[deprecated(\"Always returns 0\")]\n") +SLANG_RAW("__generic float noise(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return 0;\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Indicate that an index may be non-uniform at execution time.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Shader Model 5.1 and 6.x introduce support for dynamic indexing\n") +SLANG_RAW("/// of arrays of resources, but place the restriction that *by default*\n") +SLANG_RAW("/// the implementation can assume that any value used as an index into\n") +SLANG_RAW("/// such arrays will be dynamically uniform across an entire `Draw` or `Dispatch`\n") +SLANG_RAW("/// (when using instancing, the value must be uniform across all instances;\n") +SLANG_RAW("/// it does not seem that the restriction extends to draws within a multi-draw).\n") +SLANG_RAW("///\n") +SLANG_RAW("/// In order to indicate to the implementation that it cannot make the\n") +SLANG_RAW("/// uniformity assumption, a shader programmer is required to pass the index\n") +SLANG_RAW("/// to the `NonUniformResourceIndex` function before using it as an index.\n") +SLANG_RAW("/// The function superficially acts like an identity function.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Note: a future version of Slang may take responsibility for inserting calls\n") +SLANG_RAW("/// to this function as necessary in output code, rather than make this\n") +SLANG_RAW("/// the user's responsibility, so that the default behavior of the language\n") +SLANG_RAW("/// is more semantically \"correct.\"\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(spirv)]\n") +SLANG_RAW("T __copyObject(T v)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch {\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpCopyObject $v;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// `NonUniformResourceIndex` function is used to indicate if the resource index is\n") +SLANG_RAW("/// divergent, and ensure scalarization happens correctly for each divergent lane.\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_NonUniformResourceIndex +) +SLANG_RAW(")\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, nonuniformqualifier)]\n") +SLANG_RAW("T NonUniformResourceIndex(T index);\n") +SLANG_RAW("\n") +SLANG_RAW("/// HLSL allows NonUniformResourceIndex around non int/uint types.\n") +SLANG_RAW("/// It's effect is presumably to ignore it, which the following implementation does.\n") +SLANG_RAW("/// We should also look to add a warning for this scenario.\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("[deprecated(\"NonUniformResourceIndex on a type other than uint/int is deprecated and has no effect\")]\n") +SLANG_RAW("T NonUniformResourceIndex(T value) { return value; }\n") +SLANG_RAW("\n") +SLANG_RAW("/// Normalize a vector.\n") +SLANG_RAW("/// @param x The vector to normalize.\n") +SLANG_RAW("/// @return The normalized vector, `x`/`length(x)`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector normalize(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Normalize $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x / length(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T normalize(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Normalize $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"normalize\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x / length(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Raise to a power.\n") +SLANG_RAW("/// @param x The base value.\n") +SLANG_RAW("/// @param y The exponent value.\n") +SLANG_RAW("/// @return The value of `x` raised to the power of `y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T pow(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_pow($0, $1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_pow($0, $1)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Pow $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector pow(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Pow $x $y\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, pow, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix pow(matrix x, matrix y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"pow\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, pow, x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Raise positive base value to a power.\n") +SLANG_RAW("/// @param x The base value, must be >= 0.\n") +SLANG_RAW("/// @param y The exponent value.\n") +SLANG_RAW("/// @return The value of `x` raised to the power of `y`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("/// @remarks Return value is undefined for non-positive values of `x`.\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T powr(T x, T y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"powr\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return pow(x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector powr(vector x, vector y)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"powr\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return pow(x, y);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Output message\n") +SLANG_RAW("// TODO: add check to ensure format is const literal.\n") +SLANG_RAW("\n") +SLANG_RAW("/// Print a message to the debug output.\n") +SLANG_RAW("/// @param T The variadic type pack parameter for the arguments to be printed.\n") +SLANG_RAW("/// @param format The format string.\n") +SLANG_RAW("/// @param args (optional) The arguments to be printed.\n") +SLANG_RAW("/// @remarks The function maps to `printf` for HLSL, CPU and CUDA targets, and maps to `OpDebugPrintf` for SPIR-V target,\n") +SLANG_RAW("/// and maps to `debugPrintfEXT` for GLSL target. Depending on the target and execution environment, the function may have\n") +SLANG_RAW("/// no effect.\n") +SLANG_RAW("/// @example\n") +SLANG_RAW("/// ```cpp\n") +SLANG_RAW("/// void test(int x, float y)\n") +SLANG_RAW("/// {\n") +SLANG_RAW("/// printf(\"hello world!\\n\");\n") +SLANG_RAW("/// printf(R\"(x = \"%d\", y = \"%f\")\", x, y);\n") +SLANG_RAW("/// }\n") +SLANG_RAW("/// ```\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, printf)]\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_Printf +) +SLANG_RAW(")\n") +SLANG_RAW("void printf(NativeString format, expand each T args);\n") +SLANG_RAW("\n") +SLANG_RAW("// Tessellation factor fixup routines\n") +SLANG_RAW("/// @category tessellation Tessellation functions\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void Process2DQuadTessFactorsAvg(\n") +SLANG_RAW(" in float4 RawEdgeFactors,\n") +SLANG_RAW(" in float2 InsideScale,\n") +SLANG_RAW(" out float4 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float2 RoundedInsideTessFactors,\n") +SLANG_RAW(" out float2 UnroundedInsideTessFactors);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void Process2DQuadTessFactorsMax(\n") +SLANG_RAW(" in float4 RawEdgeFactors,\n") +SLANG_RAW(" in float2 InsideScale,\n") +SLANG_RAW(" out float4 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float2 RoundedInsideTessFactors,\n") +SLANG_RAW(" out float2 UnroundedInsideTessFactors);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void Process2DQuadTessFactorsMin(\n") +SLANG_RAW(" in float4 RawEdgeFactors,\n") +SLANG_RAW(" in float2 InsideScale,\n") +SLANG_RAW(" out float4 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float2 RoundedInsideTessFactors,\n") +SLANG_RAW(" out float2 UnroundedInsideTessFactors);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void ProcessIsolineTessFactors(\n") +SLANG_RAW(" in float RawDetailFactor,\n") +SLANG_RAW(" in float RawDensityFactor,\n") +SLANG_RAW(" out float RoundedDetailFactor,\n") +SLANG_RAW(" out float RoundedDensityFactor);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void ProcessQuadTessFactorsAvg(\n") +SLANG_RAW(" in float4 RawEdgeFactors,\n") +SLANG_RAW(" in float InsideScale,\n") +SLANG_RAW(" out float4 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float2 RoundedInsideTessFactors,\n") +SLANG_RAW(" out float2 UnroundedInsideTessFactors);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void ProcessQuadTessFactorsMax(\n") +SLANG_RAW(" in float4 RawEdgeFactors,\n") +SLANG_RAW(" in float InsideScale,\n") +SLANG_RAW(" out float4 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float2 RoundedInsideTessFactors,\n") +SLANG_RAW(" out float2 UnroundedInsideTessFactors);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void ProcessQuadTessFactorsMin(\n") +SLANG_RAW(" in float4 RawEdgeFactors,\n") +SLANG_RAW(" in float InsideScale,\n") +SLANG_RAW(" out float4 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float2 RoundedInsideTessFactors,\n") +SLANG_RAW(" out float2 UnroundedInsideTessFactors);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void ProcessTriTessFactorsAvg(\n") +SLANG_RAW(" in float3 RawEdgeFactors,\n") +SLANG_RAW(" in float InsideScale,\n") +SLANG_RAW(" out float3 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float RoundedInsideTessFactor,\n") +SLANG_RAW(" out float UnroundedInsideTessFactor);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void ProcessTriTessFactorsMax(\n") +SLANG_RAW(" in float3 RawEdgeFactors,\n") +SLANG_RAW(" in float InsideScale,\n") +SLANG_RAW(" out float3 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float RoundedInsideTessFactor,\n") +SLANG_RAW(" out float UnroundedInsideTessFactor);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category tessellation\n") +SLANG_RAW("[require(hlsl, sm_5_0)]\n") +SLANG_RAW("void ProcessTriTessFactorsMin(\n") +SLANG_RAW(" in float3 RawEdgeFactors,\n") +SLANG_RAW(" in float InsideScale,\n") +SLANG_RAW(" out float3 RoundedEdgeTessFactors,\n") +SLANG_RAW(" out float RoundedInsideTessFactors,\n") +SLANG_RAW(" out float UnroundedInsideTessFactors);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Convert degrees to radians.\n") +SLANG_RAW("/// @param x The angle in degrees.\n") +SLANG_RAW("/// @return The angle in radians.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T radians(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"radians\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"radians\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Radians $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"radians\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x * (T.getPi() / T(180.0f));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector radians(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"radians\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"radians\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Radians $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"radians\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x * (T.getPi() / T(180.0f));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix radians(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"radians\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x * (T.getPi() / T(180.0f));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute approximate reciprocal of `x`.\n") +SLANG_RAW("/// @param x The value to compute the reciprocal of.\n") +SLANG_RAW("/// @return The approximate reciprocal of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T rcp(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"rcp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return T(1.0) / x;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector rcp(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"rcp\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" return T(1.0) / x;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, rcp, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix rcp(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"rcp\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, rcp, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reflect incident vector across plane with given normal.\n") +SLANG_RAW("/// @param i The incident vector.\n") +SLANG_RAW("/// @param n The normal vector.\n") +SLANG_RAW("/// @return The reflected vector.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T reflect(T i, T n)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Reflect $i $n\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return i - T(2) * dot(n,i) * n;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector reflect(vector i, vector n)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Reflect $i $n\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"reflect\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return i - T(2) * dot(n,i) * n;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Refract incident vector given surface normal and index of refraction.\n") +SLANG_RAW("/// @param i The incident vector.\n") +SLANG_RAW("/// @param n The normal vector.\n") +SLANG_RAW("/// @param eta The relative refractive index.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector refract(vector i, vector n, T eta)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Refract $i $n $eta\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let dotNI = dot(n,i);\n") +SLANG_RAW(" let k = T(1) - eta*eta*(T(1) - dotNI * dotNI);\n") +SLANG_RAW(" if(k < T(0)) return vector(T(0));\n") +SLANG_RAW(" return eta * i - (eta * dotNI + sqrt(k)) * n;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T refract(T i, T n, T eta)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Refract $i $n $eta\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"refract\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let dotNI = dot(n,i);\n") +SLANG_RAW(" let k = T(1) - eta*eta*(T(1) - dotNI * dotNI);\n") +SLANG_RAW(" if(k < T(0)) return T(0);\n") +SLANG_RAW(" return eta * i - (eta * dotNI + sqrt(k)) * n;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reverse order of bits.\n") +SLANG_RAW("/// @param value The value to reverse bits of.\n") +SLANG_RAW("/// @return The bits of `value`, reversed such that bit n of the result is equal to bit (width - 1 - n) of `value`.\n") +SLANG_RAW("/// @remarks For SPIR-V, this function maps to `OpBitReverse`.\n") +SLANG_RAW("/// @category bitops\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("uint reversebits(uint value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"reversebits\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"bitfieldReverse\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"$P_reversebits($0)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"reverse_bits\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpBitReverse $$uint result $value};\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"reverseBits\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, shader5_sm_5_0)]\n") +SLANG_RAW("vector reversebits(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(uint, N, reversebits, value);\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"bitfieldReverse\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"reverse_bits\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpBitReverse $$vector result $value};\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"reverseBits\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Round even.\n") +SLANG_RAW("/// @param x The value to round.\n") +SLANG_RAW("/// @return The value rounded to the nearest integer, with ties rounded to the nearest even integer.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("T rint(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"roundEven\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"rint\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 RoundEven $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" T nearest = round(x);\n") +SLANG_RAW("\n") +SLANG_RAW(" // Check if the value is exactly halfway between two integers\n") +SLANG_RAW(" if (abs(x - nearest) == T(0.5))\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // If halfway, choose the even number\n") +SLANG_RAW(" if ((nearest / T(2)) * T(2) != nearest)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // If the nearest number is odd,\n") +SLANG_RAW(" // move to the closest even number\n") +SLANG_RAW(" nearest -= ((x < nearest) ? T(1) : T(-1));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" return nearest;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv, sm_4_0_version)]\n") +SLANG_RAW("vector rint(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"roundEven\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"rint\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 RoundEven $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, rint, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Round-to-nearest.\n") +SLANG_RAW("/// @param x The value to round.\n") +SLANG_RAW("/// @return The value rounded to the nearest integer.\n") +SLANG_RAW("/// @remarks Rounding behavior of .5 is determined by target intrinsic.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T round(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_round($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_round($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"round\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"round\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"round\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Round $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"round\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector round(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"round\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"round\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"round\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Round $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"round\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, round, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix round(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"round\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, round, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reciprocal of square root.\n") +SLANG_RAW("/// @param x The value to compute the reciprocal square root of.\n") +SLANG_RAW("/// @return The reciprocal square root of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T rsqrt(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_rsqrt($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_rsqrt($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"inversesqrt($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"rsqrt\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"rsqrt\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 InverseSqrt $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return T(1.0) / sqrt(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector rsqrt(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"inversesqrt($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"rsqrt\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"rsqrt\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 InverseSqrt $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, rsqrt, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix rsqrt(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"rsqrt\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, rsqrt, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Clamp value to [0,1] range.\n") +SLANG_RAW("/// @param x The value to clamp.\n") +SLANG_RAW("/// @return The clamped value.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T saturate(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"saturate\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"saturate\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"saturate\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return clamp(x, T(0), T(1));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector saturate(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"saturate\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"saturate\";\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"saturate\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return clamp(x,\n") +SLANG_RAW(" vector(T(0)),\n") +SLANG_RAW(" vector(T(1)));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix saturate(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"saturate\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, saturate, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_IntCast +) +SLANG_RAW(")\n") +SLANG_RAW("T __int_cast(U val);\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_IntCast +) +SLANG_RAW(")\n") +SLANG_RAW("vector __int_cast(vector val);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Extract sign of value.\n") +SLANG_RAW("/// @param x The value to extract the sign of.\n") +SLANG_RAW("/// @return -1 if `x` is negative, 0 if `x` is zero, and 1 if `x` is positive.\n") +SLANG_RAW("/// @category math Math functions\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("int sign(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sign\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"int(sign($0))\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"int(sign($0))\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"$P_sign($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %fsign:$$T = OpExtInst glsl450 FSign $x;\n") +SLANG_RAW(" result:$$int = OpConvertFToS %fsign\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return __int_cast(spirv_asm {OpExtInst $$T result glsl450 SSign $x});\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sign\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("vector sign(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" if(N == 1)\n") +SLANG_RAW(" return vector(sign(x[0]));\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sign\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"ivec$N0(sign($0))\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"vec(sign($0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %fsign:$$vector = OpExtInst glsl450 FSign $x;\n") +SLANG_RAW(" result:$$vector = OpConvertFToS %fsign\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return __int_cast(spirv_asm {OpExtInst $$vector result glsl450 SSign $x});\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sign\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(int, N, sign, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix sign(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sign\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(int, N, M, sign, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Sine.\n") +SLANG_RAW("/// @param x The angle in radians.\n") +SLANG_RAW("/// @return The sine of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T sin(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_sin($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_sin($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Sin $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector sin(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Sin $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, sin, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix sin(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sin\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, sin, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(metal)]\n") +SLANG_RAW("T __sincos_metal(T x, out T c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sincos($0, *$1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(metal)]\n") +SLANG_RAW("vector __sincos_metal(vector x, out vector c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sincos($0, *$1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Sine and cosine.\n") +SLANG_RAW("/// Calculate both the sine and cosine of `x`.\n") +SLANG_RAW("/// @param x The angle in radians.\n") +SLANG_RAW("/// @param[out] s The sine of `x`.\n") +SLANG_RAW("/// @param[out] c The cosine of `x`.\n") +SLANG_RAW("/// @return void\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("void sincos(T x, out T s, out T c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_sincos($0, $1, $2)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sincos\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" //__intrinsic_asm \"*($1) = sincos($0, *($2))\";\n") +SLANG_RAW(" s = __sincos_metal(x, c);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" s = sin(x);\n") +SLANG_RAW(" c = cos(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("void sincos(vector x, out vector s, out vector c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sincos\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" //__intrinsic_asm \"*($1) = sincos($0, *($2))\";\n") +SLANG_RAW(" s = __sincos_metal(x, c);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" s = sin(x);\n") +SLANG_RAW(" c = cos(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("void sincos(matrix x, out matrix s, out matrix c)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sincos\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" s = sin(x);\n") +SLANG_RAW(" c = cos(x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Hyperbolic sine.\n") +SLANG_RAW("/// @param x The value to compute the hyperbolic sine of.\n") +SLANG_RAW("/// @return The hyperbolic sine of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T sinh(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_sinh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_sinh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Sinh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector sinh(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Sinh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, sinh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix sinh(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sinh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, sinh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute the sine of `x * pi`.\n") +SLANG_RAW("/// @param x The value to compute the sine of.\n") +SLANG_RAW("/// @return The sine of `x * pi`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T sinpi(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sinpi\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return sin(T.getPi() * x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector sinpi(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sinpi\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return sin(T.getPi() * x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Smooth step (Hermite interpolation).\n") +SLANG_RAW("/// @param min The lower edge of the interpolation range.\n") +SLANG_RAW("/// @param max The upper edge of the interpolation range.\n") +SLANG_RAW("/// @param x The value to interpolate.\n") +SLANG_RAW("/// @return 0 if `x` is less than `min`, 1 if `x` is greater than `max`, and a smooth interpolation between 0 and 1 otherwise.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T smoothstep(T min, T max, T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 SmoothStep $min $max $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" let t = saturate((x - min) / (max - min));\n") +SLANG_RAW(" return t * t * (T(3.0f) - (t + t));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector smoothstep(vector min, vector max, vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 SmoothStep $min $max $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_TRINARY(T, N, smoothstep, min, max, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix smoothstep(matrix min, matrix max, matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"smoothstep\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_TRINARY(T, N, M, smoothstep, min, max, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute the square root of `x`.\n") +SLANG_RAW("/// @param x The value to compute the square root of.\n") +SLANG_RAW("/// @return The square root of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T sqrt(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_sqrt($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_sqrt($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Sqrt $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector sqrt(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Sqrt $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, sqrt, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix sqrt(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"sqrt\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, sqrt, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Step function.\n") +SLANG_RAW("/// @param y The threshold value.\n") +SLANG_RAW("/// @param x The value to compare against the threshold.\n") +SLANG_RAW("/// @return 0 if `x` is less than `y`, and 1 otherwise.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T step(T y, T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"step\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"step\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"step\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Step $y $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"step\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return x < y ? T(0.0f) : T(1.0f);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector step(vector y, vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"step\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"step\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"step\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Step $y $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"step\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_BINARY(T, N, step, y, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix step(matrix y, matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"step\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_BINARY(T, N, M, step, y, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute the tangent of `x`.\n") +SLANG_RAW("/// @param x The angle in radians.\n") +SLANG_RAW("/// @return The tangent of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T tan(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_tan($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_tan($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Tan $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector tan(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Tan $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, tan, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix tan(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"tan\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, tan, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute the hyperbolic tangent of `x`.\n") +SLANG_RAW("/// @param x The value to compute the hyperbolic tangent of, in radians.\n") +SLANG_RAW("/// @return The hyperbolic tangent of `x`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T tanh(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_tanh($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_tanh($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Tanh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector tanh(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Tanh $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, tanh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix tanh(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"tanh\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, tanh, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Compute the tangent of `x * pi`.\n") +SLANG_RAW("/// @param x The value to compute the tangent of.\n") +SLANG_RAW("/// @return The tangent of `x * pi`.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T tanpi(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"tanpi\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return tan(T.getPi() * x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector tanpi(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case metal: __intrinsic_asm \"tanpi\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return tan(T.getPi() * x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("/// Matrix transpose.\n") +SLANG_RAW("/// @param x The matrix to transpose.\n") +SLANG_RAW("/// @return The transposed matrix.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("[PreferRecompute]\n") +SLANG_RAW("matrix transpose(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpTranspose $$matrix result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for(int r = 0; r < M; ++r)\n") +SLANG_RAW(" for(int c = 0; c < N; ++c)\n") +SLANG_RAW(" result[r][c] = x[c][r];\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("[PreferRecompute]\n") +SLANG_RAW("matrix transpose(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpTranspose $$matrix result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for (int r = 0; r < M; ++r)\n") +SLANG_RAW(" for (int c = 0; c < N; ++c)\n") +SLANG_RAW(" result[r][c] = x[c][r];\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("[PreferRecompute]\n") +SLANG_RAW("[OverloadRank(-1)]\n") +SLANG_RAW("matrix transpose(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpTranspose $$matrix result $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"transpose\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for (int r = 0; r < M; ++r)\n") +SLANG_RAW(" for (int c = 0; c < N; ++c)\n") +SLANG_RAW(" result[r][c] = x[c][r];\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Truncate to integer.\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("T trunc(T x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"$P_trunc($0)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"$P_trunc($0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$T result glsl450 Trunc $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("vector trunc(vector x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" OpExtInst $$vector result glsl450 Trunc $x\n") +SLANG_RAW(" };\n") +SLANG_RAW(" case wgsl: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" VECTOR_MAP_UNARY(T, N, trunc, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_metal_spirv_wgsl, sm_4_0_version)]\n") +SLANG_RAW("matrix trunc(matrix x)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"trunc\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" MATRIX_MAP_UNARY(T, N, M, trunc, x);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Slang Specific 'Mask' Wave Intrinsics\n") +SLANG_RAW("\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("typedef uint WaveMask;\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("WaveMask WaveGetConvergedMask()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBallot(true).x\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveBallot(true).x\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"__activemask()\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let _true = true;\n") +SLANG_RAW(" return (spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBallot $$uint4 result Subgroup $_true\n") +SLANG_RAW(" }).x;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_WaveGetActiveMask +) +SLANG_RAW(")\n") +SLANG_RAW("WaveMask __WaveGetActiveMask();\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot_activemask)]\n") +SLANG_RAW("WaveMask WaveGetActiveMask()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBallot(true).x\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveBallot(true).x\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let _true = true;\n") +SLANG_RAW(" return (spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBallot $$uint4 result Subgroup $_true\n") +SLANG_RAW(" }).x;\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" return __WaveGetActiveMask();\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_basic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_basic)]\n") +SLANG_RAW("bool WaveMaskIsFirstLane(WaveMask mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupElect()\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"(($0 & -$0) == (WarpMask(1) << _getLaneId()))\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveIsFirstLane()\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformElect $$bool result Subgroup\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveMaskAllTrue(WaveMask mask, bool condition)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAll($1)\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"(__all_sync($0, $1) != 0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAllTrue($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformAll $$bool result Subgroup $condition\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveMaskAnyTrue(WaveMask mask, bool condition)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAny($1)\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"(__any_sync($0, $1) != 0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAnyTrue($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformAny $$bool result Subgroup $condition\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("WaveMask WaveMaskBallot(WaveMask mask, bool condition)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBallot($1).x\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"__ballot_sync($0, $1)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveBallot($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return (spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBallot $$uint4 result Subgroup $condition\n") +SLANG_RAW(" }).x;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_basic_ballot)]\n") +SLANG_RAW("uint WaveMaskCountBits(WaveMask mask, bool value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"__popc(__ballot_sync($0, $1))\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveCountBits($1)\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return _WaveCountBits(WaveActiveBallot(value));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Waits until all warp lanes named in mask have executed a WaveMaskSharedSync (with the same mask)\n") +SLANG_RAW("// before resuming execution. Guarantees memory ordering in shared memory among threads participating\n") +SLANG_RAW("// in the barrier.\n") +SLANG_RAW("//\n") +SLANG_RAW("// The CUDA intrinsic says it orders *all* memory accesses, which appears to match most closely subgroupBarrier.\n") +SLANG_RAW("//\n") +SLANG_RAW("// TODO(JS):\n") +SLANG_RAW("// For HLSL it's not clear what to do. There is no explicit mechanism to 'reconverge' threads. In the docs it describes\n") +SLANG_RAW("// behavior as\n") +SLANG_RAW("// \"These intrinsics are dependent on active lanes and therefore flow control. In the model of this document, implementations\n") +SLANG_RAW("// must enforce that the number of active lanes exactly corresponds to the programmer429496726642949671684294967193s view of flow control.\"\n") +SLANG_RAW("//\n") +SLANG_RAW("// It seems this can only mean the active threads are the \"threads the program flow would lead to\". This implies a lockstep\n") +SLANG_RAW("// \"straight SIMD\" style interpretation. That being the case this op on HLSL is just a memory barrier without any Sync.\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, memorybarrier)]\n") +SLANG_RAW("void AllMemoryBarrierWithWaveMaskSync(WaveMask mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"__syncwarp($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"AllMemoryBarrier()\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" __subgroupBarrier();\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// On GLSL, it appears we can't use subgroupMemoryBarrierShared, because it only implies a memory ordering, it does not\n") +SLANG_RAW("// imply convergence. For subgroupBarrier we have from the docs..\n") +SLANG_RAW("// \"The function subgroupBarrier() enforces that all active invocations within a subgroup must execute this function before any\n") +SLANG_RAW("// are allowed to continue their execution\"\n") +SLANG_RAW("// TODO(JS):\n") +SLANG_RAW("// It's not entirely clear what to do here on HLSL.\n") +SLANG_RAW("// Reading the dxc wiki (https://github.com/Microsoft/DirectXShaderCompiler/wiki/Wave-Intrinsics), we have statements like:\n") +SLANG_RAW("// ... these intrinsics enable the elimination of barrier constructs when the scope of synchronization is within the width of the SIMD processor.\n") +SLANG_RAW("// Wave: A set of lanes executed simultaneously in the processor. No explicit barriers are required to guarantee that they execute in parallel.\n") +SLANG_RAW("// Which seems to imply at least some memory barriers like Shared might not be needed.\n") +SLANG_RAW("//\n") +SLANG_RAW("// The barrier is left here though, because not only is the barrier make writes before the barrier across the wave appear to others afterwards, it's\n") +SLANG_RAW("// also there to inform the compiler on what order reads and writes can take place. This might seem to be silly because of the 'Active' lanes\n") +SLANG_RAW("// aspect of HLSL seems to make everything in lock step - but that's not quite so, it only has to apparently be that way as far as the programmers\n") +SLANG_RAW("// model appears - divergence could perhaps potentially still happen.\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, memorybarrier)]\n") +SLANG_RAW("void GroupMemoryBarrierWithWaveMaskSync(WaveMask mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"__syncwarp($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"GroupMemoryBarrier()\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" __subgroupBarrier();\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, memorybarrier)]\n") +SLANG_RAW("void AllMemoryBarrierWithWaveSync()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"__syncwarp()\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"AllMemoryBarrier()\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" __subgroupBarrier();\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, memorybarrier)]\n") +SLANG_RAW("void GroupMemoryBarrierWithWaveSync()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"__syncwarp()\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"GroupMemoryBarrier()\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" __subgroupBarrier();\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// NOTE! WaveMaskBroadcastLaneAt is *NOT* standard HLSL\n") +SLANG_RAW("// It is provided as access to subgroupBroadcast which can only take a\n") +SLANG_RAW("// constexpr laneId.\n") +SLANG_RAW("// https://github.com/KhronosGroup/GLSL/blob/master/extensions/khr/GL_KHR_shader_subgroup.txt\n") +SLANG_RAW("// Versions SPIR-V greater than 1.4 loosen this restriction, and allow 'dynamic uniform' index\n") +SLANG_RAW("// If that's the behavior required then client code should use WaveReadLaneAt which works this way.\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("T WaveMaskBroadcastLaneAt(WaveMask mask, T value, constexpr int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupBroadcast($1, $2)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__shfl_sync($0, $1, $2)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt($1, $2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBroadcast $$T result Subgroup $value $ulane;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("vector WaveMaskBroadcastLaneAt(WaveMask mask, vector value, constexpr int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupBroadcast($1, $2)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveShuffleMultiple($0, $1, $2)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt($1, $2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBroadcast $$vector result Subgroup $value $ulane;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_ballot)]\n") +SLANG_RAW("matrix WaveMaskBroadcastLaneAt(WaveMask mask, matrix value, constexpr int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveShuffleMultiple($0, $1, $2)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt($1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO(JS): If it can be determines that the `laneId` is constExpr, then subgroupBroadcast\n") +SLANG_RAW("// could be used on GLSL. For now we just use subgroupShuffle\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_shuffle)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_shuffle)]\n") +SLANG_RAW("T WaveMaskReadLaneAt(WaveMask mask, T value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupShuffle($1, $2)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__shfl_sync($0, $1, $2)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt($1, $2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformShuffle;\n") +SLANG_RAW(" OpGroupNonUniformShuffle $$T result Subgroup $value $ulane;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_shuffle)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_shuffle)]\n") +SLANG_RAW("vector WaveMaskReadLaneAt(WaveMask mask, vector value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupShuffle($1, $2)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveShuffleMultiple($0, $1, $2)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt($1, $2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformShuffle;\n") +SLANG_RAW(" OpGroupNonUniformShuffle $$vector result Subgroup $value $ulane;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_shuffle)]\n") +SLANG_RAW("matrix WaveMaskReadLaneAt(WaveMask mask, matrix value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveShuffleMultiple($0, $1, $2)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt($1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// NOTE! WaveMaskShuffle is a NON STANDARD HLSL intrinsic! It will map to WaveReadLaneAt on HLSL\n") +SLANG_RAW("// which means it will only work on hardware which allows arbitrary laneIds which is not true\n") +SLANG_RAW("// in general because it breaks the HLSL standard, which requires it's 'dynamically uniform' across the Wave.\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("T WaveMaskShuffle(WaveMask mask, T value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return WaveMaskReadLaneAt(mask, value, lane);\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("vector WaveMaskShuffle(WaveMask mask, vector value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return WaveMaskReadLaneAt(mask, value, lane);\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("matrix WaveMaskShuffle(WaveMask mask, matrix value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" return WaveMaskReadLaneAt(mask, value, lane);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("uint WaveMaskPrefixCountBits(WaveMask mask, bool value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupBallotExclusiveBitCount(subgroupBallot($1))\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__popc(__ballot_sync($0, $1) & _getLaneLtMask())\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixCountBits($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" %mask:$$uint4 = OpGroupNonUniformBallot Subgroup $value;\n") +SLANG_RAW(" OpGroupNonUniformBallotBitCount $$uint result Subgroup 2 %mask\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Across lane ops\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskBitAnd(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupAnd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveAnd($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitAnd($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformBitwiseAnd $$T result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskBitAnd(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupAnd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveAndMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitAnd($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformBitwiseAnd $$vector result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskBitAnd(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveAndMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitAnd($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskBitOr(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupOr($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveOr($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitOr($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformBitwiseOr $$T result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskBitOr(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupOr($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveOrMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitOr($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformBitwiseOr $$vector result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskBitOr(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveOrMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitOr($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskBitXor(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupXor($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveXor($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitXor($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformBitwiseXor $$T result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskBitXor(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupXor($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveXorMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitXor($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformBitwiseXor $$vector result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskBitXor(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveXorMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBitXor($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskMax(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupMax($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMax($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveMax($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMax $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformSMax $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isUnsignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformUMax $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskMax(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupMax($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMaxMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveMax($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMax $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformSMax $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isUnsignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformUMax $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskMax(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMaxMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveMax($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskMin(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupMin($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMin($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveMin($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMin $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformSMin $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isUnsignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformUMin $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskMin(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupMin($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMinMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveMin($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMin $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isSignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformSMin $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isUnsignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformUMin $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskMin(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMinMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveMin($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskProduct(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupMul($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveProduct($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveProduct($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMul $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformIMul $$T result Subgroup 0 $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskProduct(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupMul($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveProductMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveProduct($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMul $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformIMul $$vector result Subgroup 0 $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskProduct(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveProductMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveProduct($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskSum(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAdd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveSum($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveSum($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFAdd $$T result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformIAdd $$T result Subgroup 0 $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskSum(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAdd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveSumMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveSum($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFAdd $$vector result Subgroup 0 $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic; \n") +SLANG_RAW(" OpGroupNonUniformIAdd $$vector result Subgroup 0 $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskSum(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveSumMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveSum($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("__cuda_sm_version(7.0)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveMaskAllEqual(WaveMask mask, T value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAllEqual($1)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAllEqual($1)\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"_waveAllEqual($0, $1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformVote;\n") +SLANG_RAW(" OpGroupNonUniformAllEqual $$bool result Subgroup $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("__cuda_sm_version(7.0)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveMaskAllEqual(WaveMask mask, vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAllEqual($1)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAllEqual($1)\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"_waveAllEqualMultiple($0, $1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformVote;\n") +SLANG_RAW(" OpGroupNonUniformAllEqual $$bool result Subgroup $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__cuda_sm_version(7.0)\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_vote)]\n") +SLANG_RAW("bool WaveMaskAllEqual(WaveMask mask, matrix value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveAllEqualMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveAllEqual($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Prefix\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskPrefixProduct(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveMul($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixProduct($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixProduct($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMul $$T result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformIMul $$T result Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskPrefixProduct(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveMul($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixProductMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixProduct($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMul $$vector result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformIMul $$vector result Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskPrefixProduct(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixProductMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixProduct($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskPrefixSum(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveAdd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixSum($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixSum($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFAdd $$T result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" result:$$T = OpGroupNonUniformIAdd Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskPrefixSum(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveAdd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixSumMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixSum($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFAdd $$vector result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" result:$$vector = OpGroupNonUniformIAdd Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskPrefixSum(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixSumMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixSum($1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("T WaveMaskReadLaneFirst(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupBroadcastFirst($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveReadFirst($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneFirst($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformBallot; OpGroupNonUniformBroadcastFirst $$T result Subgroup $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("vector WaveMaskReadLaneFirst(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupBroadcastFirst($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveReadFirstMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneFirst($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformBallot; OpGroupNonUniformBroadcastFirst $$vector result Subgroup $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda, subgroup_ballot)]\n") +SLANG_RAW("matrix WaveMaskReadLaneFirst(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveReadFirstMultiple($0, $1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// WaveMask SM6.5 like intrinsics\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO(JS): On HLSL it only works for 32 bits or less\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_NV_shader_subgroup_partitioned)\n") +SLANG_RAW("__spirv_version(1.1)\n") +SLANG_RAW("__cuda_sm_version(7.0)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_partitioned)]\n") +SLANG_RAW("WaveMask WaveMaskMatch(WaveMask mask, T value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupPartitionNV($1).x\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMatchScalar($0, $1).x\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMatch($1).x\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return (spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformPartitionedNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_subgroup_partitioned\";\n") +SLANG_RAW(" OpGroupNonUniformPartitionNV $$uint4 result $value\n") +SLANG_RAW(" }).x;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_NV_shader_subgroup_partitioned)\n") +SLANG_RAW("__spirv_version(1.1)\n") +SLANG_RAW("__cuda_sm_version(7.0)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_partitioned)]\n") +SLANG_RAW("WaveMask WaveMaskMatch(WaveMask mask, vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupPartitionNV($1).x\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMatchMultiple($0, $1).x\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMatch($1).x\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return (spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformPartitionedNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_subgroup_partitioned\";\n") +SLANG_RAW(" OpGroupNonUniformPartitionNV $$uint4 result $value\n") +SLANG_RAW(" }).x;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_NV_shader_subgroup_partitioned)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("__cuda_sm_version(7.0)\n") +SLANG_RAW("[require(cuda_glsl_hlsl, subgroup_partitioned)]\n") +SLANG_RAW("WaveMask WaveMaskMatch(WaveMask mask, matrix value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveMatchMultiple($0, $1)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupPartitionNV($1).x\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMatch($1).x\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskPrefixBitAnd(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveAnd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixAnd($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitAnd($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformBitwiseAnd $$T result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskPrefixBitAnd(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveAnd($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixAndMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitAnd($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformBitwiseAnd $$vector result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskPrefixBitAnd(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixAndMultiple(_getMultiPrefixMask($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitAnd($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskPrefixBitOr(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveOr($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixOr($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitOr($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformBitwiseAnd $$T result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskPrefixBitOr(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveOr($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixOrMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitOr($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformBitwiseOr $$vector result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskPrefixBitOr(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixOrMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitOr($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveMaskPrefixBitXor(WaveMask mask, T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveXor($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixXor($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitXor($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformBitwiseXor $$T result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveMaskPrefixBitXor(WaveMask mask, vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveXor($1)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixXorMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitXor($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformBitwiseXor $$vector result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveMaskPrefixBitXor(WaveMask mask, matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixXorMultiple($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitXor($1, uint4($0, 0, 0, 0))\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("\n") +SLANG_RAW("// Shader model 6.0 stuff\n") +SLANG_RAW("\n") +SLANG_RAW("// Information for GLSL wave/subgroup support\n") +SLANG_RAW("// https://github.com/KhronosGroup/GLSL/blob/master/extensions/khr/GL_KHR_shader_subgroup.txt\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("T QuadReadLaneAt(T sourceValue, uint quadLaneID)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadLaneAt\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadBroadcast\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$T = OpGroupNonUniformQuadBroadcast Subgroup $sourceValue $quadLaneID;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("vector QuadReadLaneAt(vector sourceValue, uint quadLaneID)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadLaneAt\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadBroadcast\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$vector = OpGroupNonUniformQuadBroadcast Subgroup $sourceValue $quadLaneID;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic matrix QuadReadLaneAt(matrix sourceValue, uint quadLaneID);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("T QuadReadAcrossX(T localValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadAcrossX\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadSwapHorizontal($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint direction = 0u;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$T = OpGroupNonUniformQuadSwap Subgroup $localValue $direction;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("vector QuadReadAcrossX(vector localValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadAcrossX\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadSwapHorizontal($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint direction = 0u;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$vector = OpGroupNonUniformQuadSwap Subgroup $localValue $direction;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic matrix QuadReadAcrossX(matrix localValue);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("T QuadReadAcrossY(T localValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadAcrossY\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadSwapVertical($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint direction = 1u;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$T = OpGroupNonUniformQuadSwap Subgroup $localValue $direction;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("vector QuadReadAcrossY(vector localValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadAcrossY\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadSwapVertical($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint direction = 1u;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$vector = OpGroupNonUniformQuadSwap Subgroup $localValue $direction;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic matrix QuadReadAcrossY(matrix localValue);\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("T QuadReadAcrossDiagonal(T localValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadAcrossDiagonal\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadSwapDiagonal($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint direction = 2u;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$T = OpGroupNonUniformQuadSwap Subgroup $localValue $direction;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_quad)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subgroup_quad)]\n") +SLANG_RAW("vector QuadReadAcrossDiagonal(vector localValue)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"QuadReadAcrossDiagonal\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupQuadSwapDiagonal($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint direction = 2u;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformQuad;\n") +SLANG_RAW(" result:$$vector = OpGroupNonUniformQuadSwap Subgroup $localValue $direction;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("__generic matrix QuadReadAcrossDiagonal(matrix localValue);\n") +SLANG_RAW("\n") +SLANG_RAW("// WaveActiveBitAnd, WaveActiveBitOr, WaveActiveBitXor\n") + +struct WaveActiveBitOpEntry { const char* hlslName; const char* glslName; const char* spirvName; }; +const WaveActiveBitOpEntry kWaveActiveBitOpEntries[] = {{"BitAnd", "And", "BitwiseAnd"}, {"BitOr", "Or", "BitwiseOr"}, {"BitXor", "Xor", "BitwiseXor"}}; +for (auto opName : kWaveActiveBitOpEntries) { +SLANG_RAW("#line 14009 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave Wave and quad functions\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroup") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW("($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniform") +SLANG_SPLICE(opName.spirvName +) +SLANG_RAW(" $$T result Subgroup Reduce $expr};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroup") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW("($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniform") +SLANG_SPLICE(opName.spirvName +) +SLANG_RAW(" $$vector result Subgroup Reduce $expr};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" [ForceUnroll]\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(expr[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") + +} // WaveActiveBitAnd, WaveActiveBitOr, WaveActiveBitXor +SLANG_RAW("#line 14065 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// WaveActiveMin/Max\n") + +const char* kWaveActiveMinMaxNames[] = {"Min", "Max"}; +for (const char* opName : kWaveActiveMinMaxNames) { +SLANG_RAW("#line 14071 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveActive") +SLANG_SPLICE(opName +) +SLANG_RAW("(T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroup") +SLANG_SPLICE(opName +) +SLANG_RAW("($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformF") +SLANG_SPLICE(opName +) +SLANG_RAW(" $$T result Subgroup Reduce $expr};\n") +SLANG_RAW(" else if (__isUnsignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformU") +SLANG_SPLICE(opName +) +SLANG_RAW(" $$T result Subgroup Reduce $expr};\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformS") +SLANG_SPLICE(opName +) +SLANG_RAW(" $$T result Subgroup Reduce $expr};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveActive") +SLANG_SPLICE(opName +) +SLANG_RAW("(vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroup") +SLANG_SPLICE(opName +) +SLANG_RAW("($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformF") +SLANG_SPLICE(opName +) +SLANG_RAW(" $$vector result Subgroup Reduce $expr};\n") +SLANG_RAW(" else if (__isUnsignedInt())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformU") +SLANG_SPLICE(opName +) +SLANG_RAW(" $$vector result Subgroup Reduce $expr};\n") +SLANG_RAW(" else\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformS") +SLANG_SPLICE(opName +) +SLANG_RAW(" $$vector result Subgroup Reduce $expr};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveActive") +SLANG_SPLICE(opName +) +SLANG_RAW("(matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" [ForceUnroll]\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WaveActive") +SLANG_SPLICE(opName +) +SLANG_RAW("(expr[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +} // WaveActiveMinMax. +SLANG_RAW("#line 14138 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// WaveActiveProduct/Sum\n") + +struct WaveActiveProductSumEntry { const char* hlslName; const char* glslName; }; +const WaveActiveProductSumEntry kWaveActivProductSumNames[] = {{"Product", "Mul"}, {"Sum", "Add"}}; +for (auto opName : kWaveActivProductSumNames) { +SLANG_RAW("#line 14145 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroup") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW("($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformF") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW(" $$T result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformI") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW(" $$T result Subgroup 0 $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroup") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW("($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformF") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW(" $$vector result Subgroup 0 $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformI") +SLANG_SPLICE(opName.glslName +) +SLANG_RAW(" $$vector result Subgroup 0 $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" [ForceUnroll]\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WaveActive") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(expr[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMask") +SLANG_SPLICE(opName.hlslName +) +SLANG_RAW("(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") + +} // WaveActiveProduct/WaveActiveProductSum. +SLANG_RAW("#line 14231 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveActiveAllEqual(T value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAllEqual($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAllEqual\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformVote;\n") +SLANG_RAW(" OpGroupNonUniformAllEqual $$bool result Subgroup $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskAllEqual(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveActiveAllEqual(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAllEqual($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAllEqual\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformVote;\n") +SLANG_RAW(" OpGroupNonUniformAllEqual $$bool result Subgroup $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskAllEqual(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_vote)]\n") +SLANG_RAW("bool WaveActiveAllEqual(matrix value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveAllEqual\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskAllEqual(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveActiveAllTrue(bool condition)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAll($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAllTrue($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformVote;\n") +SLANG_RAW(" OpGroupNonUniformAll $$bool result Subgroup $condition\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskAllTrue(WaveGetActiveMask(), condition);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_vote)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_vote)]\n") +SLANG_RAW("bool WaveActiveAnyTrue(bool condition)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupAny($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveAnyTrue($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformVote;\n") +SLANG_RAW(" OpGroupNonUniformAny $$bool result Subgroup $condition\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskAnyTrue(WaveGetActiveMask(), condition);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("uint4 WaveActiveBallot(bool condition)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBallot($0)\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveActiveBallot\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBallot $$uint4 result Subgroup $condition\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskBallot(WaveGetActiveMask(), condition);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_basic_ballot)]\n") +SLANG_RAW("uint WaveActiveCountBits(bool value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveCountBits\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return _WaveCountBits(WaveActiveBallot(value));\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskCountBits(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_basic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_basic)]\n") +SLANG_RAW("uint WaveGetLaneCount()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_SubgroupSize)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"(warpSize)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveGetLaneCount()\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniform;\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(SubgroupSize:uint)\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_basic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_basic)]\n") +SLANG_RAW("uint WaveGetLaneIndex()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_SubgroupInvocationID)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_getLaneId()\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveGetLaneIndex()\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniform;\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(SubgroupLocalInvocationId:uint)\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_basic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_basic)]\n") +SLANG_RAW("bool WaveIsFirstLane()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupElect()\";\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"WaveIsFirstLane()\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformElect $$bool result Subgroup\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskIsFirstLane(WaveGetActiveMask());\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// It's useful to have a wave uint4 version of countbits, because some wave functions return uint4.\n") +SLANG_RAW("// This implementation tries to limit the amount of work required by the actual lane count.\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, subgroup_basic_ballot)]\n") +SLANG_RAW("uint _WaveCountBits(uint4 value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBallotBitCount $$uint result Subgroup Reduce $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" // Assume since WaveGetLaneCount should be known at compile time, the branches will hopefully boil away\n") +SLANG_RAW(" const uint waveLaneCount = WaveGetLaneCount();\n") +SLANG_RAW(" switch ((waveLaneCount - 1) / 32)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" case 0: return countbits(value.x);\n") +SLANG_RAW(" case 1: return countbits(value.x) + countbits(value.y);\n") +SLANG_RAW(" case 2: return countbits(value.x) + countbits(value.y) + countbits(value.z);\n") +SLANG_RAW(" case 3: return countbits(value.x) + countbits(value.y) + countbits(value.z) + countbits(value.w);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Prefix\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WavePrefixProduct(T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveMul($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixProduct\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformFMul $$T result Subgroup ExclusiveScan $expr\n") +SLANG_RAW(" };\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformIMul $$T result Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskPrefixProduct(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WavePrefixProduct(vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveMul($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixProduct\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFMul $$vector result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" OpGroupNonUniformIMul $$vector result Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskPrefixProduct(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WavePrefixProduct(matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixProduct\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WavePrefixProduct(expr[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskPrefixProduct(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("T WavePrefixSum(T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveAdd($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixSum\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFAdd $$T result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" result:$$T = OpGroupNonUniformIAdd Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskPrefixSum(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("vector WavePrefixSum(vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupExclusiveAdd($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixSum\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" if (__isFloat())\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformArithmetic; OpGroupNonUniformFAdd $$vector result Subgroup ExclusiveScan $expr};\n") +SLANG_RAW(" else if (__isInt())\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformArithmetic;\n") +SLANG_RAW(" result:$$vector = OpGroupNonUniformIAdd Subgroup ExclusiveScan $expr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else return expr;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskPrefixSum(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_arithmetic)]\n") +SLANG_RAW("matrix WavePrefixSum(matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixSum\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WavePrefixSum(expr[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskPrefixSum(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("T WaveReadLaneFirst(T expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBroadcastFirst($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneFirst\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformBallot; OpGroupNonUniformBroadcastFirst $$T result Subgroup $expr};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskReadLaneFirst(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("vector WaveReadLaneFirst(vector expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBroadcastFirst($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneFirst\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformBallot; OpGroupNonUniformBroadcastFirst $$vector result Subgroup $expr};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskReadLaneFirst(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("matrix WaveReadLaneFirst(matrix expr)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneFirst\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WaveReadLaneFirst(expr[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskReadLaneFirst(WaveGetActiveMask(), expr);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// NOTE! WaveBroadcastLaneAt is *NOT* standard HLSL\n") +SLANG_RAW("// It is provided as access to subgroupBroadcast which can only take a\n") +SLANG_RAW("// constexpr laneId.\n") +SLANG_RAW("// https://github.com/KhronosGroup/GLSL/blob/master/extensions/khr/GL_KHR_shader_subgroup.txt\n") +SLANG_RAW("// Versions SPIR-V greater than 1.4 loosen this restriction, and allow 'dynamic uniform' index\n") +SLANG_RAW("// If that's the behavior required then client code should use WaveReadLaneAt which works this way.\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("T WaveBroadcastLaneAt(T value, constexpr int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBroadcast($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformBallot; OpGroupNonUniformBroadcast $$T result Subgroup $value $ulane};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskBroadcastLaneAt(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("vector WaveBroadcastLaneAt(vector value, constexpr int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBroadcast($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformBallot; OpGroupNonUniformBroadcast $$vector result Subgroup $value $ulane};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskBroadcastLaneAt(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("matrix WaveBroadcastLaneAt(matrix value, constexpr int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveShuffleMultiple(_getActiveMask(), $0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WaveBroadcastLaneAt(value[i], lane);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskBroadcastLaneAt(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO(JS): If it can be determines that the `laneId` is constExpr, then subgroupBroadcast\n") +SLANG_RAW("// could be used on GLSL. For now we just use subgroupShuffle\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_shuffle)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_shuffle)]\n") +SLANG_RAW("T WaveReadLaneAt(T value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupShuffle($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformShuffle; OpGroupNonUniformShuffle $$T result Subgroup $value $ulane};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskReadLaneAt(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_shuffle)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_shuffle)]\n") +SLANG_RAW("vector WaveReadLaneAt(vector value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupShuffle($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformShuffle; OpGroupNonUniformShuffle $$vector result Subgroup $value $ulane};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskReadLaneAt(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_shuffle)]\n") +SLANG_RAW("matrix WaveReadLaneAt(matrix value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_waveShuffleMultiple(_getActiveMask(), $0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" matrix result;\n") +SLANG_RAW(" for (int i = 0; i < N; ++i)\n") +SLANG_RAW(" result[i] = WaveReadLaneAt(value[i], lane);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskReadLaneAt(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// NOTE! WaveShuffle is a NON STANDARD HLSL intrinsic! It will map to WaveReadLaneAt on HLSL\n") +SLANG_RAW("// which means it will only work on hardware which allows arbitrary laneIds which is not true\n") +SLANG_RAW("// in general because it breaks the HLSL standard, which requires it's 'dynamically uniform' across the Wave.\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_shuffle)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_shuffle)]\n") +SLANG_RAW("T WaveShuffle(T value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupShuffle($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformShuffle; OpGroupNonUniformShuffle $$T result Subgroup $value $ulane};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskShuffle(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_shuffle)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_shuffle)]\n") +SLANG_RAW("vector WaveShuffle(vector value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" if (__isHalf()) __requireGLSLExtension(\"GL_EXT_shader_subgroup_extended_types_float16\");\n") +SLANG_RAW(" __intrinsic_asm \"subgroupShuffle($0, $1)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let ulane = uint(lane);\n") +SLANG_RAW(" return spirv_asm {OpCapability GroupNonUniformShuffle; OpGroupNonUniformShuffle $$vector result Subgroup $value $ulane};\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskShuffle(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, subgroup_shuffle)]\n") +SLANG_RAW("matrix WaveShuffle(matrix value, int lane)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveReadLaneAt\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskShuffle(WaveGetActiveMask(), value, lane);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("uint WavePrefixCountBits(bool value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBallotExclusiveBitCount(subgroupBallot($0))\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WavePrefixCountBits($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" %mask:$$uint4 = OpGroupNonUniformBallot Subgroup $value;\n") +SLANG_RAW(" OpGroupNonUniformBallotBitCount $$uint result Subgroup 2 %mask\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskPrefixCountBits(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_ballot)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_ballot)]\n") +SLANG_RAW("uint4 WaveGetConvergedMulti()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"subgroupBallot(true)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveActiveBallot(true)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"make_uint4(__activemask(), 0, 0, 0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" let _true = true;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformBallot;\n") +SLANG_RAW(" OpGroupNonUniformBallot $$uint4 result Subgroup $_true\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("uint4 WaveGetActiveMulti()\n") +SLANG_RAW("{\n") +SLANG_RAW(" return WaveGetConvergedMulti();\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Shader model 6.5 stuff\n") +SLANG_RAW("// https://github.com/microsoft/DirectX-Specs/blob/master/d3d/HLSL_ShaderModel6_5.md\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_partitioned)]\n") +SLANG_RAW("uint4 WaveMatch(T value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMatch\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupPartitionNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformPartitionedNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_subgroup_partitioned\";\n") +SLANG_RAW(" OpGroupNonUniformPartitionNV $$uint4 result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskMatch(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_partitioned)]\n") +SLANG_RAW("uint4 WaveMatch(vector value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMatch\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupPartitionNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability GroupNonUniformPartitionedNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_subgroup_partitioned\";\n") +SLANG_RAW(" OpGroupNonUniformPartitionNV $$uint4 result $value\n") +SLANG_RAW(" };\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskMatch(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, subgroup_partitioned)]\n") +SLANG_RAW("uint4 WaveMatch(matrix value)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMatch\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint4 result = uint4(0xFFFFFFFF);\n") +SLANG_RAW(" [ForceUnroll]\n") +SLANG_RAW(" for (int i = 0; i < N; i++)\n") +SLANG_RAW(" result &= WaveMatch(value[i]);\n") +SLANG_RAW(" return result;\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return WaveMaskMatch(WaveGetActiveMask(), value);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("uint WaveMultiPrefixCountBits(bool value, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_popc(__ballot_sync(($1).x, $0) & _getLaneLtMask())\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixCountBits\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl, waveprefix)]\n") +SLANG_RAW("T WaveMultiPrefixBitAnd(T expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixAnd(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveAnd($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitAnd\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl, waveprefix)]\n") +SLANG_RAW("vector WaveMultiPrefixBitAnd(vector expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixAndMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveAnd($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitAnd\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("matrix WaveMultiPrefixBitAnd(matrix expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixAndMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitAnd\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl, waveprefix)]\n") +SLANG_RAW("T WaveMultiPrefixBitOr(T expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixOr(, _getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveOr($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitOr\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl, waveprefix)]\n") +SLANG_RAW("vector WaveMultiPrefixBitOr(vector expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixOrMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveOr($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitOr\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("matrix WaveMultiPrefixBitOr(matrix expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixOrMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitOr\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl, waveprefix)]\n") +SLANG_RAW("T WaveMultiPrefixBitXor(T expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixXor(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveXor($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitXor\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__glsl_extension(GL_KHR_shader_subgroup_arithmetic)\n") +SLANG_RAW("__spirv_version(1.3)\n") +SLANG_RAW("[require(cuda_glsl_hlsl, waveprefix)]\n") +SLANG_RAW("vector WaveMultiPrefixBitXor(vector expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixXorMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subgroupExclusiveXor($0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitXor\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("matrix WaveMultiPrefixBitXor(matrix expr, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixXorMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixBitXor\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("T WaveMultiPrefixProduct(T value, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixProduct(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixProduct\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("vector WaveMultiPrefixProduct(vector value, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixProductMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixProduct\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("matrix WaveMultiPrefixProduct(matrix value, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixProductMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixProduct\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category wave\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("T WaveMultiPrefixSum(T value, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixSum(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixSum\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("vector WaveMultiPrefixSum(vector value, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixSumMultiple(_getMultiPrefixMask(($1).x), $0 )\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixSum\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_hlsl, waveprefix)]\n") +SLANG_RAW("matrix WaveMultiPrefixSum(matrix value, uint4 mask)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"_wavePrefixSumMultiple(_getMultiPrefixMask(($1).x), $0)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WaveMultiPrefixSum\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_EXT_demote_to_helper_invocation)\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_hlsl_metal_spirv, helper_lane)]\n") +SLANG_RAW("bool IsHelperLane()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"IsHelperLane()\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"gl_HelperInvocation\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"simd_is_helper_thread()\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpExtension \"SPV_EXT_demote_to_helper_invocation\";\n") +SLANG_RAW(" OpCapability DemoteToHelperInvocationEXT;\n") +SLANG_RAW(" result:$$bool = OpIsHelperInvocationEXT\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// `typedef`s to help with the fact that HLSL has been sorta-kinda case insensitive at various points\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("typedef Texture2D texture2D;\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("\n") + + +// Buffer types + +static const struct { + char const* name; + SlangResourceAccess access; +} kBaseBufferAccessLevels[] = { + { "", SLANG_RESOURCE_ACCESS_READ }, + { "RW", SLANG_RESOURCE_ACCESS_READ_WRITE }, + { "RasterizerOrdered", SLANG_RESOURCE_ACCESS_RASTER_ORDERED }, +}; +static const int kBaseBufferAccessLevelCount = sizeof(kBaseBufferAccessLevels) / sizeof(kBaseBufferAccessLevels[0]); + +for (int aa = 0; aa < kBaseBufferAccessLevelCount; ++aa) +{ + auto access = kBaseBufferAccessLevels[aa].access; + sb << "/// @category texture_types\n"; + sb << "__generic\n"; + sb << "typealias "; + sb << kBaseBufferAccessLevels[aa].name; + sb << "Buffer = _Texture;\n"; + + bool isReadOnly = aa == 0; + + char const* glslTextureSizeFunc = (isReadOnly) ? "textureSize" : "imageSize"; + char const* glslLoadFuncName = (isReadOnly) ? "texelFetch" : "imageLoad"; + char const* spvLoadInstName = (isReadOnly) ? "OpImageFetch" : "OpImageRead"; + char const* requireToSetQuery = (isReadOnly) ? "[require(glsl_hlsl_metal_spirv, texture_size)]" : "[require(glsl_hlsl_metal_spirv, image_size)]"; + char const* requireToSet = (isReadOnly) ? "[require(glsl_hlsl_metal_spirv, texture_sm_4_1)]" : "[require(glsl_hlsl_metal_spirv, texture_sm_4_1_compute_fragment)]"; + char const* requireToSet_onlyHLSL = (isReadOnly) ? "[require(hlsl, texture_sm_4_1)]" : "[require(hlsl, texture_sm_4_1_compute_fragment)]"; +SLANG_RAW("#line 15242 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" ") +SLANG_SPLICE(requireToSetQuery +) +SLANG_RAW("\n") +SLANG_RAW(" void GetDimensions(out uint dim)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetDimensions\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" __intrinsic_asm \"($1 = ") +SLANG_SPLICE(glslTextureSizeFunc +) +SLANG_RAW("($0))\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"(*($1) = $0.get_width())\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" dim = spirv_asm {\n") +SLANG_RAW(" OpCapability ImageQuery;\n") +SLANG_RAW(" result:$$uint = OpImageQuerySize $this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" ") +SLANG_SPLICE(isReadOnly?"[__readNone] ":"" +) +SLANG_RAW("\n") +SLANG_RAW(" ") +SLANG_SPLICE(requireToSet +) +SLANG_RAW("\n") +SLANG_RAW(" T Load(int location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"$c$0.read(uint($1))$z\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __requireGLSLExtension(\"GL_EXT_samplerless_texture_functions\");\n") +SLANG_RAW(" __intrinsic_asm \"") +SLANG_SPLICE(glslLoadFuncName +) +SLANG_RAW("($0, $1)$z\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" %sampled:__sampledType(T) = ") +SLANG_SPLICE(spvLoadInstName +) +SLANG_RAW(" $this $location;\n") +SLANG_RAW(" __truncate $$T result __sampledType(T) %sampled;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" ") +SLANG_SPLICE(isReadOnly?"[__readNone] ":"" +) +SLANG_RAW("\n") +SLANG_RAW(" ") +SLANG_SPLICE(requireToSet_onlyHLSL +) +SLANG_RAW("\n") +SLANG_RAW(" T Load(int location, out uint status)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Load\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __subscript(uint index) -> T {\n") +SLANG_RAW("\n") +SLANG_RAW(" ") +SLANG_SPLICE(isReadOnly?"[__readNone] ":"" +) +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" ") +SLANG_SPLICE(requireToSet +) +SLANG_RAW("\n") +SLANG_RAW(" get { return Load((int)index); }\n") + + if (access != SLANG_RESOURCE_ACCESS_READ) { +SLANG_RAW("#line 15302 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW(" [nonmutating] \n") +SLANG_RAW(" ") +SLANG_SPLICE(requireToSet +) +SLANG_RAW("\n") +SLANG_RAW(" set\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0)[$1] = $2\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"imageStore($0, int($1), $V2)\";\n") +SLANG_RAW(" case metal: __intrinsic_asm \"$0.write($2, $1)\";\n") +SLANG_RAW(" case spirv: spirv_asm {\n") +SLANG_RAW(" OpImageWrite $this $index __convertTexel(newValue);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // If a 'Texture[index]' is referred to by a '__ref', call 'kIROp_ImageSubscript(index)'.\n") +SLANG_RAW(" // This allows call's to stay aware that the input is from a 'Texture'.\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_ImageSubscript +) +SLANG_RAW(")\n") +SLANG_RAW(" [nonmutating]\n") +SLANG_RAW(" ref;\n") + + } // access != SLANG_RESOURCE_ACCESS_READ +SLANG_RAW("#line 15325 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" }\n") +SLANG_RAW(" \n") +SLANG_RAW("\n") +SLANG_RAW(" }; // end extension\n") + +} +SLANG_RAW("#line 15333 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// DirectX Raytracing (DXR) Support\n") +SLANG_RAW("//\n") +SLANG_RAW("// The following is based on the experimental DXR SDK v0.09.01.\n") +SLANG_RAW("//\n") +SLANG_RAW("// Numbering follows the sections in the \"D3D12 Raytracing Functional Spec\" v0.09 (2018-03-12)\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.1.1 - Ray Flags\n") +SLANG_RAW("\n") +SLANG_RAW("/// Flags that control ray traversal behavior and shader execution.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("typedef uint RAY_FLAG;\n") +SLANG_RAW("\n") +SLANG_RAW("/// No special ray flags.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_NONE = 0x00;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Forces all geometries to be treated as opaque, disabling any-hit shader execution.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_FORCE_OPAQUE = 0x01;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Forces all geometries to be treated as non-opaque, enabling any-hit shader execution.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_FORCE_NON_OPAQUE = 0x02;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Accepts the first intersection found and skips searching for closer hits.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_ACCEPT_FIRST_HIT_AND_END_SEARCH = 0x04;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Skips execution of closest hit shaders, useful for shadow rays.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_SKIP_CLOSEST_HIT_SHADER = 0x08;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Culls triangles facing away from the ray origin.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_CULL_BACK_FACING_TRIANGLES = 0x10;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Culls triangles facing toward the ray origin.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_CULL_FRONT_FACING_TRIANGLES = 0x20;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Skips intersections with opaque geometry.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_CULL_OPAQUE = 0x40;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Skips intersections with non-opaque geometry.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_CULL_NON_OPAQUE = 0x80;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Skips all triangle intersections.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_SKIP_TRIANGLES = 0x100;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Skips all procedural primitive intersections.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const RAY_FLAG RAY_FLAG_SKIP_PROCEDURAL_PRIMITIVES = 0x200;\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.1.2 - Ray Description Structure\n") +SLANG_RAW("/// Describes a ray for traversal through an acceleration structure.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__target_intrinsic(hlsl, RayDesc)\n") +SLANG_RAW("__target_intrinsic(cuda, RayDesc)\n") +SLANG_RAW("struct RayDesc\n") +SLANG_RAW("{\n") +SLANG_RAW(" /// Starting point of the ray in world space.\n") +SLANG_RAW(" __target_intrinsic(hlsl, Origin)\n") +SLANG_RAW(" __target_intrinsic(cuda, Origin)\n") +SLANG_RAW(" float3 Origin;\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Minimum distance along the ray to consider intersections.\n") +SLANG_RAW(" __target_intrinsic(hlsl, TMin)\n") +SLANG_RAW(" __target_intrinsic(cuda, TMin)\n") +SLANG_RAW(" float TMin;\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Normalized direction vector of the ray in world space.\n") +SLANG_RAW(" __target_intrinsic(hlsl, Direction)\n") +SLANG_RAW(" __target_intrinsic(cuda, Direction)\n") +SLANG_RAW(" float3 Direction;\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Maximum distance along the ray to consider intersections.\n") +SLANG_RAW(" __target_intrinsic(hlsl, TMax)\n") +SLANG_RAW(" __target_intrinsic(cuda, TMax)\n") +SLANG_RAW(" float TMax;\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.1.3 - Ray Acceleration Structure\n") +SLANG_RAW("/// Opaque type representing a ray-tracing acceleration structure.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__builtin\n") +SLANG_RAW("__magic_type(RaytracingAccelerationStructureType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_RaytracingAccelerationStructureType +) +SLANG_RAW(")\n") +SLANG_RAW("struct RaytracingAccelerationStructure {};\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.1.4 - Subobject Definitions\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO: We may decide to support these, but their reliance on C++ implicit\n") +SLANG_RAW("// constructor call syntax (`SomeType someVar(arg0, arg1);`) makes them\n") +SLANG_RAW("// annoying for the current Slang parsing strategy, and using global variables\n") +SLANG_RAW("// for this stuff comes across as a kludge rather than the best possible design.\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.1.5 - Intersection Attributes Structure\n") +SLANG_RAW("/// Built-in structure containing intersection attributes for triangle primitives.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__target_intrinsic(hlsl, BuiltInTriangleIntersectionAttributes)\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, raytracing)]\n") +SLANG_RAW("struct BuiltInTriangleIntersectionAttributes\n") +SLANG_RAW("{\n") +SLANG_RAW(" /// Barycentric coordinates of the intersection point on the triangle.\n") +SLANG_RAW(" __target_intrinsic(hlsl, barycentrics)\n") +SLANG_RAW(" float2 barycentrics;\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.2 Shaders\n") +SLANG_RAW("\n") +SLANG_RAW("// Right now new shader stages need to be added directly to the compiler\n") +SLANG_RAW("// implementation, rather than being something that can be declared in the core module.\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.3 - Intrinsics\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.3.1\n") +SLANG_RAW("\n") +SLANG_RAW("// `executeCallableNV` is the GLSL intrinsic that will be used to implement\n") +SLANG_RAW("// `CallShader()` for GLSL-based targets.\n") +SLANG_RAW("//\n") +SLANG_RAW("[require(glsl, raytracing_raygen_closesthit_miss_callable)]\n") +SLANG_RAW("void __executeCallable(uint shaderIndex, int payloadLocation)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"executeCallableEXT\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Next is the custom intrinsic that will compute the payload location\n") +SLANG_RAW("// for a type being used in a `CallShader()` call for GLSL-based targets.\n") +SLANG_RAW("//\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetVulkanRayTracingPayloadLocation +) +SLANG_RAW(")\n") +SLANG_RAW("int __callablePayloadLocation(__ref Payload payload);\n") +SLANG_RAW("\n") +SLANG_RAW("// Now we provide a hard-coded definition of `CallShader()` for GLSL-based\n") +SLANG_RAW("// targets, which maps the generic HLSL operation into the non-generic\n") +SLANG_RAW("// GLSL equivalent.\n") +SLANG_RAW("//\n") +SLANG_RAW("/// Executes a callable shader with the specified payload.\n") +SLANG_RAW("/// @param shaderIndex Index of the callable shader to execute\n") +SLANG_RAW("/// @param payload Data structure to pass to and receive from the callable shader\n") +SLANG_RAW("/// @remarks Used to implement dynamic shader calls during ray tracing\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_raygen_closesthit_miss_callable)]\n") +SLANG_RAW("void CallShader(uint shaderIndex, inout Payload payload)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"CallShader\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanCallablePayload]\n") +SLANG_RAW(" static Payload p;\n") +SLANG_RAW("\n") +SLANG_RAW(" p = payload;\n") +SLANG_RAW(" __executeCallable(shaderIndex, __callablePayloadLocation(p));\n") +SLANG_RAW(" payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanCallablePayload]\n") +SLANG_RAW(" static Payload p;\n") +SLANG_RAW("\n") +SLANG_RAW(" p = payload;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExecuteCallableKHR $shaderIndex &p\n") +SLANG_RAW(" };\n") +SLANG_RAW(" payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.3.2\n") +SLANG_RAW("\n") +SLANG_RAW("// Some functions only accept a \"struct type\" parameter. The \n") +SLANG_RAW("// following function addresses this issue by transforming non-struct\n") +SLANG_RAW("// parameters into a struct. \n") +SLANG_RAW("// side effect typed use locations (`inout`,`out`, etc.) are managed.\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_ForceVarIntoStructTemporarily +) +SLANG_RAW(")\n") +SLANG_RAW("Ref __forceVarIntoStructTemporarily(inout T maybeStruct);\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(hlsl, raytracing)]\n") +SLANG_RAW("void __traceRayHLSL(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint RayFlags,\n") +SLANG_RAW(" uint InstanceInclusionMask,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"TraceRay\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[require(glsl, raytracing_raygen_closesthit_miss)]\n") +SLANG_RAW("void __traceRay(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint RayFlags,\n") +SLANG_RAW(" uint InstanceInclusionMask,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" float3 Origin,\n") +SLANG_RAW(" float TMin,\n") +SLANG_RAW(" float3 Direction,\n") +SLANG_RAW(" float TMax,\n") +SLANG_RAW(" int PayloadLocation)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"traceRayEXT\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO: Slang's parsing logic currently puts modifiers on\n") +SLANG_RAW("// the `GenericDecl` rather than the inner decl when\n") +SLANG_RAW("// using our default syntax, which seems wrong. We need\n") +SLANG_RAW("// to fix this, but for now using the expanded `__generic`\n") +SLANG_RAW("// syntax works in a pinch.\n") +SLANG_RAW("//\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetVulkanRayTracingPayloadLocation +) +SLANG_RAW(")\n") +SLANG_RAW("int __rayPayloadLocation(__ref Payload payload);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Traces a ray through the acceleration structure.\n") +SLANG_RAW("/// @param AccelerationStructure The acceleration structure to traverse\n") +SLANG_RAW("/// @param RayFlags Flags controlling ray behavior\n") +SLANG_RAW("/// @param InstanceInclusionMask Mask for filtering instance visibility\n") +SLANG_RAW("/// @param RayContributionToHitGroupIndex Offset for hit group indexing\n") +SLANG_RAW("/// @param MultiplierForGeometryContributionToHitGroupIndex Multiplier for geometry-based hit group indexing\n") +SLANG_RAW("/// @param MissShaderIndex Index of the miss shader to execute if no hit is found\n") +SLANG_RAW("/// @param Ray Description of the ray to trace\n") +SLANG_RAW("/// @param Payload Structure for passing data between shaders\n") +SLANG_RAW("/// @remarks Core ray tracing function for initiating traversal\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_raygen_closesthit_miss)]\n") +SLANG_RAW("void TraceRay(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint RayFlags,\n") +SLANG_RAW(" uint InstanceInclusionMask,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __traceRayHLSL(\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" RayFlags,\n") +SLANG_RAW(" InstanceInclusionMask,\n") +SLANG_RAW(" RayContributionToHitGroupIndex,\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" MissShaderIndex,\n") +SLANG_RAW(" Ray,\n") +SLANG_RAW(" __forceVarIntoStructTemporarily(Payload));\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"traceOptiXRay\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW(" __traceRay(\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" RayFlags,\n") +SLANG_RAW(" InstanceInclusionMask,\n") +SLANG_RAW(" RayContributionToHitGroupIndex,\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" MissShaderIndex,\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction,\n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" __rayPayloadLocation(p));\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpTraceRayKHR \n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $RayFlags\n") +SLANG_RAW(" /**/ $InstanceInclusionMask\n") +SLANG_RAW(" /**/ $RayContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MultiplierForGeometryContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MissShaderIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ &p;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// NOTE!\n") +SLANG_RAW("// The name of the following functions may change when DXR supports\n") +SLANG_RAW("// a feature similar to the `GL_NV_ray_tracing_motion_blur` extension\n") +SLANG_RAW("//\n") +SLANG_RAW("// https://github.com/KhronosGroup/GLSL/blob/master/extensions/nv/GLSL_NV_ray_tracing_motion_blur.txt\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[require(hlsl, raytracing_motionblur)]\n") +SLANG_RAW("void __traceMotionRayHLSL(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint RayFlags,\n") +SLANG_RAW(" uint InstanceInclusionMask,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"TraceMotionRay\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_extension(GL_NV_ray_tracing_motion_blur)\n") +SLANG_RAW("[require(glsl, raytracing_motionblur_raygen_closesthit_miss)]\n") +SLANG_RAW("void __traceMotionRay(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint RayFlags,\n") +SLANG_RAW(" uint InstanceInclusionMask,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" float3 Origin,\n") +SLANG_RAW(" float TMin,\n") +SLANG_RAW(" float3 Direction,\n") +SLANG_RAW(" float TMax,\n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" int PayloadLocation)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"traceRayMotionNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Traces a ray with motion blur support through the acceleration structure.\n") +SLANG_RAW("/// @param AccelerationStructure The acceleration structure to traverse\n") +SLANG_RAW("/// @param RayFlags Flags controlling ray behavior\n") +SLANG_RAW("/// @param InstanceInclusionMask Mask for filtering instance visibility\n") +SLANG_RAW("/// @param RayContributionToHitGroupIndex Offset for hit group indexing\n") +SLANG_RAW("/// @param MultiplierForGeometryContributionToHitGroupIndex Multiplier for geometry-based hit group indexing\n") +SLANG_RAW("/// @param MissShaderIndex Index of the miss shader to execute if no hit is found\n") +SLANG_RAW("/// @param Ray Description of the ray to trace\n") +SLANG_RAW("/// @param CurrentTime Time value for motion blur interpolation\n") +SLANG_RAW("/// @param Payload Structure for passing data between shaders\n") +SLANG_RAW("/// @remarks Extended version of TraceRay with motion blur support\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_motionblur_raygen_closesthit_miss)]\n") +SLANG_RAW("__generic\n") +SLANG_RAW("void TraceMotionRay(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint RayFlags,\n") +SLANG_RAW(" uint InstanceInclusionMask,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __traceMotionRayHLSL(\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" RayFlags,\n") +SLANG_RAW(" InstanceInclusionMask,\n") +SLANG_RAW(" RayContributionToHitGroupIndex,\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" MissShaderIndex,\n") +SLANG_RAW(" Ray,\n") +SLANG_RAW(" CurrentTime,\n") +SLANG_RAW(" __forceVarIntoStructTemporarily(Payload));\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW(" __traceMotionRay(\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" RayFlags,\n") +SLANG_RAW(" InstanceInclusionMask,\n") +SLANG_RAW(" RayContributionToHitGroupIndex,\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" MissShaderIndex,\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction,\n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" CurrentTime,\n") +SLANG_RAW(" __rayPayloadLocation(p));\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW(" \n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW("\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability RayTracingMotionBlurNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_ray_tracing_motion_blur\";\n") +SLANG_RAW("\n") +SLANG_RAW(" OpTraceRayMotionNV\n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $RayFlags\n") +SLANG_RAW(" /**/ $InstanceInclusionMask\n") +SLANG_RAW(" /**/ $RayContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MultiplierForGeometryContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MissShaderIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ $CurrentTime\n") +SLANG_RAW(" /**/ &p;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.3.3\n") +SLANG_RAW("\n") +SLANG_RAW("[require(glsl_spirv, raytracing_intersection)]\n") +SLANG_RAW("bool __reportIntersection(float tHit, uint hitKind)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"reportIntersectionEXT\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$bool = OpReportIntersectionKHR $tHit $hitKind;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reports a hit from an intersection shader.\n") +SLANG_RAW("/// @param tHit Distance along the ray where the intersection occurred\n") +SLANG_RAW("/// @param hitKind User-defined value identifying the type of hit\n") +SLANG_RAW("/// @param attributes Custom attributes for the intersection\n") +SLANG_RAW("/// @return true if the hit was accepted, false if rejected\n") +SLANG_RAW("/// @remarks Used in custom intersection shaders to report primitive intersections\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_intersection)]\n") +SLANG_RAW("bool ReportHit(float tHit, uint hitKind, A attributes)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"ReportHit($0, $1, $2)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" [__vulkanHitAttributes]\n") +SLANG_RAW(" static A a;\n") +SLANG_RAW(" a = attributes;\n") +SLANG_RAW(" return __reportIntersection(tHit, hitKind);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Reports a hit optimized for OptiX.\n") +SLANG_RAW("/// @param tHit Distance along the ray where the intersection occurred\n") +SLANG_RAW("/// @param hitKind User-defined value identifying the type of hit\n") +SLANG_RAW("/// @param attribs Attribute values for the intersection\n") +SLANG_RAW("/// @return true if the hit was accepted, false if rejected\n") +SLANG_RAW("/// @remarks OptiX-specific version of ReportHit with optimized attribute handling\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__generic\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_intersection)]\n") +SLANG_RAW("bool ReportHitOptix(float tHit, uint hitKind, expand each T attribs)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"optixReportIntersection\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return ReportHit(tHit, hitKind, makeTuple(expand each attribs));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.3.4\n") +SLANG_RAW("/// Ignores the current intersection and continues traversal.\n") +SLANG_RAW("/// @remarks Used in any-hit shaders to reject potential intersections\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit)]\n") +SLANG_RAW("void IgnoreHit()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"IgnoreHit\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"ignoreIntersectionEXT;\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixIgnoreIntersection\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" { \n") +SLANG_RAW(" OpIgnoreIntersectionKHR; %_ = OpLabel \n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.3.5\n") +SLANG_RAW("/// Accepts the current intersection and terminates further traversal.\n") +SLANG_RAW("/// @remarks Used in any-hit shaders to immediately accept an intersection\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit)]\n") +SLANG_RAW("void AcceptHitAndEndSearch()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"AcceptHitAndEndSearch\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"terminateRayEXT;\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixTerminateRay\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" { \n") +SLANG_RAW(" OpTerminateRayKHR; %_ = OpLabel \n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.4 - System Values and Special Semantics\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO: Many of these functions need to be restricted so that\n") +SLANG_RAW("// they can only be accessed from specific stages.\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.4.1 - Ray Dispatch System Values\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the current ray dispatch coordinates.\n") +SLANG_RAW("/// @return 3D index of the current ray being processed\n") +SLANG_RAW("/// @remarks Available in all ray tracing shader stages\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_allstages)]\n") +SLANG_RAW("uint3 DispatchRaysIndex()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"DispatchRaysIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_LaunchIDEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetLaunchIndex\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint3 = OpLoad builtin(LaunchIdKHR:uint3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the dimensions of the ray dispatch.\n") +SLANG_RAW("/// @return 3D dimensions of the ray dispatch grid\n") +SLANG_RAW("/// @remarks Available in all ray tracing shader stages\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_allstages)]\n") +SLANG_RAW("uint3 DispatchRaysDimensions()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"DispatchRaysDimensions\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_LaunchSizeEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetLaunchDimensions\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint3 = OpLoad builtin(LaunchSizeKHR:uint3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.4.2 - Ray System Values\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the origin of the current ray in world space.\n") +SLANG_RAW("/// @return World-space position where the ray originated\n") +SLANG_RAW("/// @remarks Available in any-hit, closest-hit, intersection, and miss shaders\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection_miss)]\n") +SLANG_RAW("float3 WorldRayOrigin()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WorldRayOrigin\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_WorldRayOriginEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetWorldRayOrigin\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpLoad builtin(WorldRayOriginKHR:float3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the direction of the current ray in world space.\n") +SLANG_RAW("/// @return Normalized world-space direction vector of the ray\n") +SLANG_RAW("/// @remarks Available in any-hit, closest-hit, intersection, and miss shaders\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection_miss)]\n") +SLANG_RAW("float3 WorldRayDirection()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WorldRayDirection\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_WorldRayDirectionEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetWorldRayDirection\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpLoad builtin(WorldRayDirectionKHR:float3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the minimum valid intersection distance for the current ray.\n") +SLANG_RAW("/// @return Minimum distance along the ray where intersections are considered\n") +SLANG_RAW("/// @remarks Used to prevent self-intersections and near-plane clipping\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection_miss)]\n") +SLANG_RAW("float RayTMin()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"RayTMin\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_RayTminEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetRayTmin\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpLoad builtin(RayTminKHR:float);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Note: The `RayTCurrent()` intrinsic should translate to\n") +SLANG_RAW("// either `gl_HitTNV` (for hit shaders) or `gl_RayTmaxNV`\n") +SLANG_RAW("// (for intersection shaders). Right now we are handling this\n") +SLANG_RAW("// during code emission, for simplicity.\n") +SLANG_RAW("//\n") +SLANG_RAW("// TODO: Once the compiler supports a more refined concept\n") +SLANG_RAW("// of profiles/capabilities and overloading based on them,\n") +SLANG_RAW("// we should simply provide two overloads here, specialized\n") +SLANG_RAW("// to the appropriate Vulkan stages.\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the current intersection distance or maximum ray distance.\n") +SLANG_RAW("/// @return Current t-value for hit shaders or maximum distance for intersection shaders\n") +SLANG_RAW("/// @remarks Interpretation depends on shader stage (hit vs. intersection)\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection_miss)]\n") +SLANG_RAW("float RayTCurrent()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"RayTCurrent\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_RayTmaxEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetRayTmax\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpLoad builtin(RayTmaxKHR:float);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the flags used when tracing the current ray.\n") +SLANG_RAW("/// @return Combination of RAY_FLAG values used for this ray\n") +SLANG_RAW("/// @remarks Allows shaders to modify behavior based on ray trace flags\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection_miss)]\n") +SLANG_RAW("uint RayFlags()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"RayFlags\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_IncomingRayFlagsEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetRayFlags\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(IncomingRayFlagsKHR:uint);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.4.3 - Primitive/Object Space System Values\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the index of the current instance in the acceleration structure.\n") +SLANG_RAW("/// @return Zero-based index of the current instance\n") +SLANG_RAW("/// @remarks Available in any-hit, closest-hit, and intersection shaders\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("uint InstanceIndex()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"InstanceIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_InstanceID)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetInstanceIndex\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(InstanceId:uint);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the user-provided ID of the current instance.\n") +SLANG_RAW("/// @return Custom instance identifier set during acceleration structure build\n") +SLANG_RAW("/// @remarks Used for instance-specific shader behavior\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("uint InstanceID()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"InstanceID\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_InstanceCustomIndexEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetInstanceId\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(InstanceCustomIndexKHR:uint);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the index of the current primitive within its geometry.\n") +SLANG_RAW("/// @return Zero-based index of the intersected primitive\n") +SLANG_RAW("/// @remarks Available in any-hit, closest-hit, and intersection shaders\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("uint PrimitiveIndex()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"PrimitiveIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_PrimitiveID)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetPrimitiveIndex\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(PrimitiveId:uint);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the ray origin in object space of the current instance.\n") +SLANG_RAW("/// @return Object-space position where the ray originated\n") +SLANG_RAW("/// @remarks Transformed by the inverse of the instance transform\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("float3 ObjectRayOrigin()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ObjectRayOrigin\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_ObjectRayOriginEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetObjectRayOrigin\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpLoad builtin(ObjectRayOriginKHR:float3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the ray direction in object space of the current instance.\n") +SLANG_RAW("/// @return Object-space direction vector of the ray\n") +SLANG_RAW("/// @remarks Transformed by the inverse of the instance transform\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("float3 ObjectRayDirection()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ObjectRayDirection\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_ObjectRayDirectionEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetObjectRayDirection\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpLoad builtin(ObjectRayDirectionKHR:float3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// TODO: optix has an optixGetObjectToWorldTransformMatrix function that returns 12\n") +SLANG_RAW("// floats by reference.\n") +SLANG_RAW("/// Returns the object-to-world transformation matrix (3x4).\n") +SLANG_RAW("/// @return 3x4 matrix transforming from object to world space\n") +SLANG_RAW("/// @remarks Includes position and orientation of the current instance\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("float3x4 ObjectToWorld3x4()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ObjectToWorld3x4\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"transpose(gl_ObjectToWorldEXT)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" %mat:$$float4x3 = OpLoad builtin(ObjectToWorldKHR:float4x3);\n") +SLANG_RAW(" result:$$float3x4 = OpTranspose %mat;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the world-to-object transformation matrix (3x4).\n") +SLANG_RAW("/// @return 3x4 matrix transforming from world to object space\n") +SLANG_RAW("/// @remarks Inverse of the object-to-world transform\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("float3x4 WorldToObject3x4()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WorldToObject3x4\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"transpose(gl_WorldToObjectEXT)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" %mat:$$float4x3 = OpLoad builtin(WorldToObjectKHR:float4x3);\n") +SLANG_RAW(" result:$$float3x4 = OpTranspose %mat;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the object-to-world transformation matrix (4x3).\n") +SLANG_RAW("/// @return 4x3 matrix transforming from object to world space\n") +SLANG_RAW("/// @remarks Transposed version of ObjectToWorld3x4\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("float4x3 ObjectToWorld4x3()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"ObjectToWorld4x3\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_ObjectToWorldEXT)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float4x3 = OpLoad builtin(ObjectToWorldKHR:float4x3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the world-to-object transformation matrix (4x3).\n") +SLANG_RAW("/// @return 4x3 matrix transforming from world to object space\n") +SLANG_RAW("/// @remarks Transposed version of WorldToObject3x4\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("float4x3 WorldToObject4x3()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"WorldToObject4x3\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_WorldToObjectEXT)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float4x3 = OpLoad builtin(WorldToObjectKHR:float4x3);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// NOTE!\n") +SLANG_RAW("// The name of the following functions may change when DXR supports\n") +SLANG_RAW("// a feature similar to the `GL_NV_ray_tracing_motion_blur` extension\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the current time value for motion blur.\n") +SLANG_RAW("/// @return Time value between 0 and 1 for motion blur interpolation\n") +SLANG_RAW("/// @remarks Available when motion blur extension is enabled\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__glsl_extension(GL_NV_ray_tracing_motion_blur)\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_motionblur_anyhit_closesthit_intersection_miss)]\n") +SLANG_RAW("float RayCurrentTime()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"RayCurrentTime\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_CurrentRayTimeNV)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpLoad builtin(CurrentRayTimeNV:float);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Note: The provisional DXR spec included these unadorned\n") +SLANG_RAW("// `ObjectToWorld()` and `WorldToObject()` functions, so\n") +SLANG_RAW("// we will forward them to the new names as a convience\n") +SLANG_RAW("// for users who are porting their code.\n") +SLANG_RAW("//\n") +SLANG_RAW("// TODO: Should we provide a deprecation warning on these\n") +SLANG_RAW("// declarations, so that users can know they aren't coding\n") +SLANG_RAW("// against the final spec?\n") +SLANG_RAW("//\n") +SLANG_RAW("/// Alias for ObjectToWorld3x4.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn] float3x4 ObjectToWorld() { return ObjectToWorld3x4(); }\n") +SLANG_RAW("/// Alias for WorldToObject3x4.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn] float3x4 WorldToObject() { return WorldToObject3x4(); }\n") +SLANG_RAW("\n") +SLANG_RAW("// 10.4.4 - Hit Specific System values\n") +SLANG_RAW("/// Returns the type of intersection that was found.\n") +SLANG_RAW("/// @return Hit kind value (HIT_KIND_TRIANGLE_FRONT_FACE, HIT_KIND_TRIANGLE_BACK_FACE, or custom value)\n") +SLANG_RAW("/// @remarks Available in any-hit and closest-hit shaders\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda_glsl_hlsl_spirv, raytracing_anyhit_closesthit)]\n") +SLANG_RAW("uint HitKind()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"HitKind\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_HitKindEXT)\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"optixGetHitKind\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(HitKindKHR:uint);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Pre-defined hit kinds (not documented explicitly)\n") +SLANG_RAW("/// Predefined hit kind value for front-facing triangle intersections.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const uint HIT_KIND_TRIANGLE_FRONT_FACE = 254;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Predefined hit kind value for back-facing triangle intersections.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const uint HIT_KIND_TRIANGLE_BACK_FACE = 255;\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// Shader Model 6.4\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("/// Treats `left` and `right` as 4-component vectors of `UInt8` and computes `dot(left, right) + acc`\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("uint dot4add_u8packed(uint left, uint right, uint acc);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Treats `left` and `right` as 4-component vectors of `Int8` and computes `dot(left, right) + acc`\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("int dot4add_i8packed(uint left, uint right, int acc);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Computes `dot(left, right) + acc`.\n") +SLANG_RAW("/// May not produce infinities or NaNs for intermediate results that overflow the range of `half`\n") +SLANG_RAW("/// @category math\n") +SLANG_RAW("float dot2add(float2 left, float2 right, float acc);\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// Shader Model 6.5\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// Mesh Shaders\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("/// Set the number of output vertices and primitives for a mesh shader invocation.\n") +SLANG_RAW("/// @category meshshading Mesh shading\n") +SLANG_RAW("__glsl_extension(GL_EXT_mesh_shader)\n") +SLANG_RAW("__glsl_version(450)\n") +SLANG_RAW("[require(glsl_hlsl_metal_spirv, meshshading)]\n") +SLANG_RAW("[noRefInline]\n") +SLANG_RAW("void SetMeshOutputCounts(uint vertexCount, uint primitiveCount)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"SetMeshOutputCounts\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"SetMeshOutputsEXT\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"_slang_mesh.set_primitive_count($1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability MeshShadingEXT;\n") +SLANG_RAW(" OpExtension \"SPV_EXT_mesh_shader\";\n") +SLANG_RAW(" OpSetMeshOutputsEXT $vertexCount $primitiveCount;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Specify the number of downstream mesh shader thread groups to invoke from an amplification shader,\n") +SLANG_RAW("/// and provide the values for per-mesh payload parameters.\n") +SLANG_RAW("/// @return This function doesn't return.\n") +SLANG_RAW("/// @category meshshading\n") +SLANG_RAW("[KnownBuiltin(\"DispatchMesh\")]\n") +SLANG_RAW("[require(glsl_hlsl_metal_spirv, meshshading)]\n") +SLANG_RAW("[noRefInline]\n") +SLANG_RAW("void DispatchMesh

(uint threadGroupCountX, uint threadGroupCountY, uint threadGroupCountZ, __ref P meshPayload)\n") +SLANG_RAW("{\n") +SLANG_RAW(" // This function cannot be inlined due to a legalization pass happening mid-way through processing\n") +SLANG_RAW(" // and later more processing happening to the function which requires eventual inlining.\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"DispatchMesh\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" // This intrinsic doesn't take into account writing meshPayload. That\n") +SLANG_RAW(" // is dealt with separately by 'legalizeDispatchMeshPayloadForGLSL'.\n") +SLANG_RAW(" __intrinsic_asm \"EmitMeshTasksEXT($0, $1, $2)\";\n") +SLANG_RAW(" case metal:\n") +SLANG_RAW(" __intrinsic_asm \"*_slang_mesh_payload = *$3; _slang_mgp.set_threadgroups_per_grid(uint3($0, $1, $2)); return;\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability MeshShadingEXT;\n") +SLANG_RAW(" OpExtension \"SPV_EXT_mesh_shader\";\n") +SLANG_RAW(" OpEmitMeshTasksEXT $threadGroupCountX $threadGroupCountY $threadGroupCountZ &meshPayload;\n") +SLANG_RAW(" // OpEmitMeshTasksExt is a terminator, so we need to start a new\n") +SLANG_RAW(" // block to hold whatever comes after this intrinsic\n") +SLANG_RAW(" %_ = OpLabel\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// \"Sampler feedback\" types `FeedbackTexture2D` and `FeedbackTexture2DArray`.\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("// https://microsoft.github.io/DirectX-Specs/d3d/SamplerFeedback.html\n") +SLANG_RAW("\n") +SLANG_RAW("// The docs describe these as 'types' but their syntax makes them seem enum like, and enum is a simpler way to implement them\n") +SLANG_RAW("// But slang enums are always 'enum class like', so I use an empty struct type here\n") +SLANG_RAW("\n") +SLANG_RAW("[sealed]\n") +SLANG_RAW("[builtin]\n") +SLANG_RAW("interface __BuiltinSamplerFeedbackType {};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category texture_types\n") +SLANG_RAW("[sealed]\n") +SLANG_RAW("__magic_type(FeedbackType, ") +SLANG_SPLICE(int(FeedbackType::Kind::MinMip) +) +SLANG_RAW(")\n") +SLANG_RAW("__target_intrinsic(hlsl, SAMPLER_FEEDBACK_MIN_MIP)\n") +SLANG_RAW("struct SAMPLER_FEEDBACK_MIN_MIP : __BuiltinSamplerFeedbackType {};\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category texture_types\n") +SLANG_RAW("[sealed]\n") +SLANG_RAW("__magic_type(FeedbackType, ") +SLANG_SPLICE(int(FeedbackType::Kind::MipRegionUsed) +) +SLANG_RAW(")\n") +SLANG_RAW("__target_intrinsic(hlsl, SAMPLER_FEEDBACK_MIP_REGION_USED)\n") +SLANG_RAW("struct SAMPLER_FEEDBACK_MIP_REGION_USED : __BuiltinSamplerFeedbackType {};\n") +SLANG_RAW("\n") +SLANG_RAW("// All of these objects are write-only resources that point to a special kind of unordered access view meant for sampler feedback.\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" // With Clamp\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedback(Texture2D tex, SamplerState samp, float2 location, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3, $4)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackBias(Texture2D tex, SamplerState samp, float2 location, float bias, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackGrad(Texture2D tex, SamplerState samp, float2 location, float2 ddx, float2 ddy, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5, $6)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5, $6)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Level\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackLevel(Texture2D tex, SamplerState samp, float2 location, float lod)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackLevel($1, $2, $3, $4)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackLevel($1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Without Clamp\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedback(Texture2D tex, SamplerState samp, float2 location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackBias(Texture2D tex, SamplerState samp, float2 location, float bias)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackGrad(Texture2D tex, SamplerState samp, float2 location, float2 ddx, float2 ddy)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" // With Clamp\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedback(Texture2DArray texArray, SamplerState samp, float3 location, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3, $4)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackBias(Texture2DArray texArray, SamplerState samp, float3 location, float bias, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackGrad(Texture2DArray texArray, SamplerState samp, float3 location, float3 ddx, float3 ddy, float clamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5, $6)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5, $6)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Level\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackLevel(Texture2DArray texArray, SamplerState samp, float3 location, float lod)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackLevel($1, $2, $3, $4)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackLevel($1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // Without Clamp\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedback(Texture2DArray texArray, SamplerState samp, float3 location)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedback($1, $2, $3)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackBias(Texture2DArray texArray, SamplerState samp, float3 location, float bias)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackBias($1, $2, $3, $4)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(cpp_hlsl)]\n") +SLANG_RAW(" void WriteSamplerFeedbackGrad(Texture2DArray texArray, SamplerState samp, float3 location, float3 ddx, float3 ddy)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($0).WriteSamplerFeedbackGrad($1, $2, $3, $4, $5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// DXR 1.1 and `TraceRayInline` support\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("/// Returns the index of the geometry that was hit in an intersection, any-hit, or closest-hit shader.\n") +SLANG_RAW("/// @return Zero-based index of the geometry in the current instance\n") +SLANG_RAW("/// @remarks Available in intersection, any-hit, and closest-hit shaders\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, raytracing_anyhit_closesthit_intersection)]\n") +SLANG_RAW("uint GeometryIndex()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"GeometryIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(gl_GeometryIndexEXT)\";\n") +SLANG_RAW(" case spirv: return spirv_asm {\n") +SLANG_RAW(" result:$$uint = OpLoad builtin(RayGeometryIndexKHR:uint);\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Get the vertex positions of the currently hit triangle in any-hit or closest-hit shader.\n") +SLANG_RAW("/// https://github.com/KhronosGroup/GLSL/blob/master/extensions/ext/GLSL_EXT_ray_tracing_position_fetch.txt\n") +SLANG_RAW("/// @param index Index of the vertex (0-2)\n") +SLANG_RAW("/// @return World-space position of the specified vertex\n") +SLANG_RAW("/// @remarks Requires ray tracing position fetch extension\n") +SLANG_RAW("/// @see GL_EXT_ray_tracing_position_fetch\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing_position_fetch)\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_spirv, raytracing_position)]\n") +SLANG_RAW("float3 HitTriangleVertexPosition(uint index)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"gl_HitTriangleVertexPositionsEXT[$0]\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability RayTracingKHR;\n") +SLANG_RAW(" OpCapability RayTracingPositionFetchKHR;\n") +SLANG_RAW(" OpExtension \"SPV_KHR_ray_tracing\";\n") +SLANG_RAW(" OpExtension \"SPV_KHR_ray_tracing_position_fetch\";\n") +SLANG_RAW(" %_ptr_Input_v3float = OpTypePointer Input $$float3;\n") +SLANG_RAW(" %addr : %_ptr_Input_v3float = OpAccessChain builtin(HitTriangleVertexPositionsKHR:float3[3]) $index;\n") +SLANG_RAW(" result:$$float3 = OpLoad %addr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// Status indicating whether and what type of hit has been committed in a RayQuery.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("typedef uint COMMITTED_STATUS;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Indicates no hit has been committed yet.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const COMMITTED_STATUS COMMITTED_NOTHING = 0;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Closest hit is a triangle.\n") +SLANG_RAW("/// This could be an opaque triangle hit found by the fixed-function\n") +SLANG_RAW("/// traversal and intersection implementation, or a non-opaque\n") +SLANG_RAW("/// triangle hit committed by user code with `RayQuery.CommitNonOpaqueTriangleHit`.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const COMMITTED_STATUS COMMITTED_TRIANGLE_HIT = 1;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Closest hit is a procedural primitive.\n") +SLANG_RAW("/// A procedural hit primitive is committed using `RayQuery.CommitProceduralPrimitiveHit`.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const COMMITTED_STATUS COMMITTED_PROCEDURAL_PRIMITIVE_HIT = 2;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Type of candidate hit that a `RayQuery` is pausing at.\n") +SLANG_RAW("/// A `RayQuery` can automatically commit hits with opaque triangles,\n") +SLANG_RAW("/// but yields to user code for other hits to allow them to be\n") +SLANG_RAW("/// dismissed or committed.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("typedef uint CANDIDATE_TYPE;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Candidate hit is a non-opaque triangle.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const CANDIDATE_TYPE CANDIDATE_NON_OPAQUE_TRIANGLE = 0;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Candidate hit is a procedural primitive.\n") +SLANG_RAW("/// @category raytracing\n") +SLANG_RAW("static const CANDIDATE_TYPE CANDIDATE_PROCEDURAL_PRIMITIVE = 1;\n") +SLANG_RAW("\n") +SLANG_RAW("/// Handle to state of an in-progress ray-tracing query.\n") +SLANG_RAW("/// The ray query is effectively a coroutine that user shader\n") +SLANG_RAW("/// code can resume to continue tracing the ray, and which yields\n") +SLANG_RAW("/// back to the user code at interesting events along the ray.\n") +SLANG_RAW("//\n") +SLANG_RAW("/// Note: The treatment of the `RayQuery` type in Slang does not\n") +SLANG_RAW("/// perfectly match its semantics in vanilla HLSL in some corner\n") +SLANG_RAW("/// cases. Specifically, a `RayQuery` in vanilla HLSL is an\n") +SLANG_RAW("/// opaque handle to mutable storage, and assigning a `RayQuery`\n") +SLANG_RAW("/// or passing one as a parameter will only copy the *handle*,\n") +SLANG_RAW("/// potentially resulting in aliasing of the underlying mutable\n") +SLANG_RAW("/// storage.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// In contrast, Slang considers a `RayQuery` to own its mutable\n") +SLANG_RAW("/// state, and (because the API does not support cloning of queries),\n") +SLANG_RAW("/// `RayQuery` values are non-copyable (aka \"move-only\").\n") +SLANG_RAW("///\n") +SLANG_RAW("/// The main place where this arises as a consideration is when\n") +SLANG_RAW("/// passing a `RayQuery` down into a function that will perform\n") +SLANG_RAW("/// mutating operations on it (e.g., `TraceRay` or `Proceed`):\n") +SLANG_RAW("/// ```\n") +SLANG_RAW("/// void myFunc( inout RayQuery q )\n") +SLANG_RAW("/// {\n") +SLANG_RAW("/// q.Proceed();\n") +SLANG_RAW("/// }\n") +SLANG_RAW("/// ```\n") +SLANG_RAW("/// In Slang, a parameter like `q` above should be declared `inout`.\n") +SLANG_RAW("/// HLSL does not care about whether `q` is declared `inout` or not.\n") +SLANG_RAW("///\n") +SLANG_RAW("///cannot use a cap for struct with unequal target support\n") +SLANG_RAW("///since it will propegate rules to children.\n") +SLANG_RAW("/// @category raytracing Ray-tracing\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW("[__NonCopyableType]\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_RayQueryType +) +SLANG_RAW(")\n") +SLANG_RAW("struct RayQuery \n") +SLANG_RAW("{\n") +SLANG_RAW(" /// Initialize a new ray query in its default state.\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_AllocateOpaqueHandle +) +SLANG_RAW(")\n") +SLANG_RAW(" __init();\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" [mutating]\n") +SLANG_RAW(" void __rayQueryInitializeEXT(\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" RAY_FLAG rayFlags,\n") +SLANG_RAW(" uint instanceInclusionMask,\n") +SLANG_RAW(" float3 origin,\n") +SLANG_RAW(" float tMin,\n") +SLANG_RAW(" float3 direction,\n") +SLANG_RAW(" float tMax)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryInitializeEXT($0, $1, $2, $3, $4, $5, $6, $7)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm {\n") +SLANG_RAW(" OpRayQueryInitializeKHR &this $accelerationStructure $rayFlags $instanceInclusionMask $origin $tMin $direction $tMax;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Initialize a ray-tracing query.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This method may be called on a \"fresh\" ray query, or\n") +SLANG_RAW(" /// on one that is already tracing a ray. In the latter\n") +SLANG_RAW(" /// case any state related to the ray previously being\n") +SLANG_RAW(" /// traced is overwritten.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// The `rayFlags` here will be bitwise ORed with\n") +SLANG_RAW(" /// the `rayFlags` passed as a generic argument to\n") +SLANG_RAW(" /// `RayQuery` to get the effective ray flags, which\n") +SLANG_RAW(" /// must obey any API-imposed restrictions.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// @param accelerationStructure Acceleration structure to traverse\n") +SLANG_RAW(" /// @param rayFlags Additional flags for this trace (combined with rayFlagsGeneric)\n") +SLANG_RAW(" /// @param instanceInclusionMask Mask for filtering instance visibility\n") +SLANG_RAW(" /// @param ray Description of ray parameters (origin, direction, tMin, tMax)\n") +SLANG_RAW(" [__unsafeForceInlineEarly]\n") +SLANG_RAW(" [mutating]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" void TraceRayInline(\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" RAY_FLAG rayFlags,\n") +SLANG_RAW(" uint instanceInclusionMask,\n") +SLANG_RAW(" RayDesc ray)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".TraceRayInline\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" __rayQueryInitializeEXT(\n") +SLANG_RAW(" accelerationStructure,\n") +SLANG_RAW(" rayFlags | rayFlagsGeneric,\n") +SLANG_RAW(" instanceInclusionMask,\n") +SLANG_RAW(" ray.Origin,\n") +SLANG_RAW(" ray.TMin,\n") +SLANG_RAW(" ray.Direction,\n") +SLANG_RAW(" ray.TMax);\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" \n") +SLANG_RAW(" /// Resume the ray query coroutine.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// If the coroutine suspends because of encountering\n") +SLANG_RAW(" /// a candidate hit that cannot be resolved with fixed-funciton\n") +SLANG_RAW(" /// logic, this function returns `true`, and the `Candidate*()`\n") +SLANG_RAW(" /// functions should be used by application code to resolve\n") +SLANG_RAW(" /// the candidate hit (by either committing or ignoring it).\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// If the coroutine terminates because traversal is\n") +SLANG_RAW(" /// complete (or has been aborted), this function returns\n") +SLANG_RAW(" /// `false`, and application code should use the `Committed*()`\n") +SLANG_RAW(" /// functions to appropriately handle the closest hit (it any)\n") +SLANG_RAW(" /// that was found.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// @return true if a candidate hit needs evaluation, false if traversal is complete\n") +SLANG_RAW(" /// @remarks When true is returned, use Candidate* methods to evaluate the hit\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [mutating]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" bool Proceed()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Proceed\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryProceedEXT\";\n") +SLANG_RAW(" case spirv: return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$bool = OpRayQueryProceedKHR &this\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Terminate ray traversal immediately.\n") +SLANG_RAW(" /// @remarks Causes subsequent Proceed() calls to return false\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [mutating]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" void Abort()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".Abort\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryTerminateEXT\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpRayQueryTerminateKHR &this };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Commit the current non-opaque triangle hit as the closest hit.\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [mutating]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" void CommitNonOpaqueTriangleHit()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".CommitNonOpaqueTriangleHit\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryConfirmIntersectionEXT\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpRayQueryConfirmIntersectionKHR &this };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Commit a procedural primitive hit at the specified distance.\n") +SLANG_RAW(" /// @param t Distance along the ray where the hit occurred\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [mutating]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" void CommitProceduralPrimitiveHit(float t)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".CommitProceduralPrimitiveHit\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGenerateIntersectionEXT\";\n") +SLANG_RAW(" case spirv: spirv_asm { OpRayQueryGenerateIntersectionKHR &this $t };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Get the type of candidate hit being considered.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// The ray query coroutine will suspend when it encounters\n") +SLANG_RAW(" /// a hit that cannot be resolved with fixed-function logic\n") +SLANG_RAW(" /// (either a non-opaque triangle or a procedural primitive).\n") +SLANG_RAW(" /// In either of those cases, `CandidateType()` will return\n") +SLANG_RAW(" /// the kind of candidate hit that must be resolved by\n") +SLANG_RAW(" /// user code.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" CANDIDATE_TYPE CandidateType()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".CandidateType\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionTypeEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint RayQueryCandidateIntersectionKHR = 0;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$CANDIDATE_TYPE = OpRayQueryGetIntersectionTypeKHR &this $RayQueryCandidateIntersectionKHR;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns the status of the committed (closest) hit.\n") +SLANG_RAW(" /// @return COMMITTED_STATUS indicating type of committed hit, if any\n") +SLANG_RAW(" /// @remarks Valid after traversal is complete (Proceed() returns false)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" COMMITTED_STATUS CommittedStatus()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".CommittedStatus\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionTypeEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint RayQueryCommittedIntersectionKHR = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$COMMITTED_STATUS = OpRayQueryGetIntersectionTypeKHR &this $RayQueryCommittedIntersectionKHR;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Checks if the candidate procedural primitive is non-opaque.\n") +SLANG_RAW(" /// @return true if the primitive is non-opaque and requires shader evaluation\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" bool CandidateProceduralPrimitiveNonOpaque()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".CandidateProceduralPrimitiveNonOpaque\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"(!rayQueryGetIntersectionCandidateAABBOpaqueEXT($0))\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" %rr:$$bool = OpRayQueryGetIntersectionCandidateAABBOpaqueKHR &this;\n") +SLANG_RAW(" result:$$bool = OpLogicalNot %rr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the distance to the candidate triangle hit.\n") +SLANG_RAW(" /// @return t-value along the ray where intersection occurred\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" float CandidateTriangleRayT()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".CandidateTriangleRayT\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionTEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpRayQueryGetIntersectionTKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the distance to the committed (closest) hit.\n") +SLANG_RAW(" /// @return t-value along the ray where the closest hit occurred\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" float CommittedRayT()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".CommittedRayT\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionTEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpRayQueryGetIntersectionTKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the custom index of the instance containing the candidate hit.\n") +SLANG_RAW(" /// @return User-provided instance identifier\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CandidateRayInstanceCustomIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionInstanceCustomIndexEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionInstanceCustomIndexKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the custom index of the instance containing the committed hit.\n") +SLANG_RAW(" /// @return User-provided instance identifier\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CommittedRayInstanceCustomIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionInstanceCustomIndexEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionInstanceCustomIndexKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the instance ID of the candidate hit.\n") +SLANG_RAW(" /// @return System-assigned instance identifier\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CandidateRayInstanceId()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionInstanceIdEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionInstanceIdKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the instance ID of the committed hit.\n") +SLANG_RAW(" /// @return System-assigned instance identifier\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CommittedRayInstanceId()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionInstanceIdEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionInstanceIdKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the shader binding table offset for the instance containing the candidate hit.\n") +SLANG_RAW(" /// @return Offset into the shader binding table for hit group selection\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" uint CandidateRayInstanceShaderBindingTableRecordOffset()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the shader binding table offset for the instance containing the committed hit.\n") +SLANG_RAW(" /// @return Offset into the shader binding table for hit group selection\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" uint CommittedRayInstanceShaderBindingTableRecordOffset()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the geometry index for the candidate hit.\n") +SLANG_RAW(" /// @return Zero-based index of the geometry in the instance\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CandidateRayGeometryIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionGeometryIndexEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionGeometryIndexKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the geometry index for the committed hit.\n") +SLANG_RAW(" /// @return Zero-based index of the geometry in the instance\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CommittedRayGeometryIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionGeometryIndexEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionGeometryIndexKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the primitive index for the candidate hit.\n") +SLANG_RAW(" /// @return Zero-based index of the primitive in the geometry\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CandidateRayPrimitiveIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionPrimitiveIndexEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionPrimitiveIndexKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the primitive index for the committed hit.\n") +SLANG_RAW(" /// @return Zero-based index of the primitive in the geometry\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" int CommittedRayPrimitiveIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionPrimitiveIndexEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$int = OpRayQueryGetIntersectionPrimitiveIndexKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the barycentric coordinates of the candidate hit point.\n") +SLANG_RAW(" /// @return UV barycentric coordinates on the triangle\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float2 CandidateRayBarycentrics()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionBarycentricsEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float2 = OpRayQueryGetIntersectionBarycentricsKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the barycentric coordinates of the committed hit point.\n") +SLANG_RAW(" /// @return UV barycentric coordinates on the triangle\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float2 CommittedRayBarycentrics()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionBarycentricsEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float2 = OpRayQueryGetIntersectionBarycentricsKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Checks if the candidate hit is on the front face of a triangle.\n") +SLANG_RAW(" /// @return true if hit is on triangle front face\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" bool CandidateRayFrontFace()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionFrontFaceEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$bool = OpRayQueryGetIntersectionFrontFaceKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Checks if the committed hit is on the front face of a triangle.\n") +SLANG_RAW(" /// @return true if hit is on triangle front face\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" bool CommittedRayFrontFace()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionFrontFaceEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$bool = OpRayQueryGetIntersectionFrontFaceKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the ray direction in object space for the candidate hit.\n") +SLANG_RAW(" /// @return Direction vector transformed into instance's object space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float3 CandidateRayObjectRayDirection()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionObjectRayDirectionEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpRayQueryGetIntersectionObjectRayDirectionKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the ray direction in object space for the committed hit.\n") +SLANG_RAW(" /// @return Direction vector transformed into instance's object space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float3 CommittedRayObjectRayDirection()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionObjectRayDirectionEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpRayQueryGetIntersectionObjectRayDirectionKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the ray origin in object space for the candidate hit.\n") +SLANG_RAW(" /// @return Origin point transformed into instance's object space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float3 CandidateRayObjectRayOrigin()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionObjectRayOriginEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpRayQueryGetIntersectionObjectRayOriginKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the ray origin in object space for the committed hit.\n") +SLANG_RAW(" /// @return Origin point transformed into instance's object space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float3 CommittedRayObjectRayOrigin()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionObjectRayOriginEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpRayQueryGetIntersectionObjectRayOriginKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the object-to-world transform matrix for the candidate hit instance.\n") +SLANG_RAW(" /// @return 4x3 matrix transforming from object to world space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float4x3 CandidateRayObjectToWorld()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionObjectToWorldEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float4x3 = OpRayQueryGetIntersectionObjectToWorldKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the object-to-world transform matrix for the committed hit instance.\n") +SLANG_RAW(" /// @return 4x3 matrix transforming from object to world space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float4x3 CommittedRayObjectToWorld()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionObjectToWorldEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float4x3 = OpRayQueryGetIntersectionObjectToWorldKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the world-to-object transform matrix for the candidate hit instance.\n") +SLANG_RAW(" /// @return 4x3 matrix transforming from world to object space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float4x3 CandidateRayWorldToObject()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionWorldToObjectEXT($0, false)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 0;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float4x3 = OpRayQueryGetIntersectionWorldToObjectKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the world-to-object transform matrix for the committed hit instance.\n") +SLANG_RAW(" /// @return 4x3 matrix transforming from world to object space\n") +SLANG_RAW(" /// @remarks GLSL/SPIRV only\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [require(glsl_spirv, rayquery)]\n") +SLANG_RAW(" float4x3 CommittedRayWorldToObject()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersectionWorldToObjectEXT($0, true)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = 1;\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpRayQueryGetIntersectionWorldToObjectKHR $$float4x3 result &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("///~\n") +SLANG_RAW("\n") + + const char* kCandidateCommitted[] = {"Candidate", "Committed"}; + + // Access Candidate and Committed Matrices. + for (uint32_t candidateOrCommitted = 0; candidateOrCommitted < 2; candidateOrCommitted++) + { + auto ccName = kCandidateCommitted[candidateOrCommitted]; + auto ccTF = candidateOrCommitted == 0 ? "false" : "true"; +SLANG_RAW("#line 17443 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing_position_fetch)\n") +SLANG_RAW(" [require(glsl, rayquery_position)]\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" void __glslGetIntersectionTriangleVertexPositions") +SLANG_SPLICE(ccName +) +SLANG_RAW("(out float3 arr[3])\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __intrinsic_asm \"rayQueryGetIntersectionTriangleVertexPositionsEXT($0, ") +SLANG_SPLICE(ccTF +) +SLANG_RAW(", $1)\";\n") +SLANG_RAW(" };\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the triangle vertex positions for an intersection.\n") +SLANG_RAW(" /// @return Array of three vertex positions in world space\n") +SLANG_RAW(" /// @remarks Requires ray query position fetch extension\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [require(glsl, rayquery_position)]\n") +SLANG_RAW(" [require(spirv, rayquery_position)]\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" float3[3] ") +SLANG_SPLICE(ccName +) +SLANG_RAW("GetIntersectionTriangleVertexPositions()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" typedef float3[3] float3Arr3;\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" float3 output[3];\n") +SLANG_RAW(" __glslGetIntersectionTriangleVertexPositions") +SLANG_SPLICE(ccName +) +SLANG_RAW("(output);\n") +SLANG_RAW(" return output;\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = ") +SLANG_SPLICE(candidateOrCommitted +) +SLANG_RAW(";\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability RayQueryKHR;\n") +SLANG_RAW(" OpCapability RayQueryPositionFetchKHR;\n") +SLANG_RAW(" OpExtension \"SPV_KHR_ray_query\";\n") +SLANG_RAW(" OpExtension \"SPV_KHR_ray_tracing_position_fetch\";\n") +SLANG_RAW(" result: $$float3Arr3 = OpRayQueryGetIntersectionTriangleVertexPositionsKHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" };\n") +SLANG_RAW("\n") +SLANG_RAW(" // CandidateObjectToWorld3x4, CandidateWorldToObject4x3\n") +SLANG_RAW(" // CommittedObjectToWorld3x4, CommittedObjectToWorld4x3\n") + + const char* kRayQueryMatrixNames[] = {"ObjectToWorld", "WorldToObject"}; + for (auto matName : kRayQueryMatrixNames) { + +SLANG_RAW("#line 17488 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the object-to-world transform as a 3x4 matrix.\n") +SLANG_RAW(" /// @return 3x4 matrix transforming from object to world space\n") +SLANG_RAW(" /// @remarks Available for both candidate and committed hits\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" float3x4 ") +SLANG_SPLICE(ccName +) +SLANG_SPLICE(matName +) +SLANG_RAW("3x4()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"transpose(rayQueryGetIntersection") +SLANG_SPLICE(matName +) +SLANG_RAW("EXT($0, ") +SLANG_SPLICE(ccTF +) +SLANG_RAW("))\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".") +SLANG_SPLICE(ccName +) +SLANG_SPLICE(matName +) +SLANG_RAW("3x4\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = ") +SLANG_SPLICE(candidateOrCommitted +) +SLANG_RAW(";\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" %m:$$float4x3 = OpRayQueryGetIntersection") +SLANG_SPLICE(matName +) +SLANG_RAW("KHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" result:$$float3x4 = OpTranspose %m;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the world-to-object transform as a 4x3 matrix.\n") +SLANG_RAW(" /// @return 4x3 matrix transforming from world to object space\n") +SLANG_RAW(" /// @remarks Available for both candidate and committed hits\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__readNone]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" float4x3 ") +SLANG_SPLICE(ccName +) +SLANG_SPLICE(matName +) +SLANG_RAW("4x3()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersection") +SLANG_SPLICE(matName +) +SLANG_RAW("EXT($0, ") +SLANG_SPLICE(ccTF +) +SLANG_RAW(")\";\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".") +SLANG_SPLICE(ccName +) +SLANG_SPLICE(matName +) +SLANG_RAW("4x3\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = ") +SLANG_SPLICE(candidateOrCommitted +) +SLANG_RAW(";\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float4x3 = OpRayQueryGetIntersection") +SLANG_SPLICE(matName +) +SLANG_RAW("KHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + } // ObjectToWorld/WorldToObject. + + // Access Candidate and Committed properties. + struct RayQueryMethodEntry + { + const char* type; + const char* hlslName; + const char* glslName; + }; + const RayQueryMethodEntry rayQueryMethods[] = { + {"uint", "InstanceIndex", "InstanceId"}, + {"uint", "InstanceID", "InstanceCustomIndex"}, + {"uint", "PrimitiveIndex", "PrimitiveIndex"}, + {"uint", "GeometryIndex", "GeometryIndex"}, + {"uint", "InstanceContributionToHitGroupIndex", "InstanceShaderBindingTableRecordOffset"}, + {"float3", "ObjectRayOrigin", "ObjectRayOrigin"}, + {"float3", "ObjectRayDirection", "ObjectRayDirection"}, + {"bool", "TriangleFrontFace", "FrontFace"}, + {"float2", "TriangleBarycentrics", "Barycentrics"}, + }; + for (auto method : rayQueryMethods) { +SLANG_RAW("#line 17557 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" ") +SLANG_SPLICE(method.type +) +SLANG_RAW(" ") +SLANG_SPLICE(ccName +) +SLANG_SPLICE(method.hlslName +) +SLANG_RAW("()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".") +SLANG_SPLICE(ccName +) +SLANG_SPLICE(method.hlslName +) +SLANG_RAW("\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetIntersection") +SLANG_SPLICE(method.glslName +) +SLANG_RAW("EXT($0, ") +SLANG_SPLICE(ccTF +) +SLANG_RAW(")\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint iCandidateOrCommitted = ") +SLANG_SPLICE(candidateOrCommitted +) +SLANG_RAW(";\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$") +SLANG_SPLICE(method.type +) +SLANG_RAW(" = OpRayQueryGetIntersection") +SLANG_SPLICE(method.glslName +) +SLANG_RAW("KHR &this $iCandidateOrCommitted;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") + + } // Candidate/Committed properties. + } // for ("Candidate", "Committed") +SLANG_RAW("#line 17580 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" // Access properties of the ray being traced.\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" uint RayFlags()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".RayFlags\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetRayFlagsEXT\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$uint = OpRayQueryGetRayFlagsKHR &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the world-space origin of the ray.\n") +SLANG_RAW(" /// @return Starting point of the ray in world space\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" float3 WorldRayOrigin()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".WorldRayOrigin\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetWorldRayOriginEXT\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpRayQueryGetWorldRayOriginKHR &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the world-space direction of the ray.\n") +SLANG_RAW(" /// @return Normalized direction vector in world space\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" float3 WorldRayDirection()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".WorldRayDirection\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetWorldRayDirectionEXT\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float3 = OpRayQueryGetWorldRayDirectionKHR &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Gets the minimum valid distance along the ray.\n") +SLANG_RAW(" /// @return Minimum t-value for considering intersections\n") +SLANG_RAW(" /// @remarks Used to prevent self-intersections\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_query)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [NonUniformReturn]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, rayquery)]\n") +SLANG_RAW(" float RayTMin()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".RayTMin\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"rayQueryGetRayTMinEXT\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" result:$$float = OpRayQueryGetRayTMinKHR &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" };\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// SubpassInput\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io\n") +SLANG_RAW("__magic_type(SubpassInputType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_SubpassInputType +) +SLANG_RAW(")\n") +SLANG_RAW("[require(glsl_hlsl_spirv, subpass)]\n") +SLANG_RAW("struct __SubpassImpl\n") +SLANG_RAW("{\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io\n") +SLANG_RAW("__generic\n") +SLANG_RAW("typealias SubpassInput = __SubpassImpl;\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension __SubpassImpl\n") +SLANG_RAW("{\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, subpass)]\n") +SLANG_RAW(" T SubpassLoad()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"$0.SubpassLoad()\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subpassLoad($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let zeroVec = int2(0);\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability StorageImageReadWithoutFormat;\n") +SLANG_RAW(" result:$$T = OpImageRead $this $zeroVec\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("/// @category stage_io\n") +SLANG_RAW("__generic\n") +SLANG_RAW("typealias SubpassInputMS = __SubpassImpl;\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension __SubpassImpl\n") +SLANG_RAW("{\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, subpass)]\n") +SLANG_RAW(" T SubpassLoad(int sample)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"$0.SubpassLoad($1)\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"subpassLoad($0, $1)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let zeroVec = int2(0);\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability StorageImageReadWithoutFormat;\n") +SLANG_RAW(" result:$$T = OpImageRead $this $zeroVec Sample $sample\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Shader Execution Reordering (SER)\n") +SLANG_RAW("///\n") +SLANG_RAW("/// NOTE! This API is currently experimental and may change in the future as SER is made available\n") +SLANG_RAW("/// in different APIs and downstream compilers.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// Based on the NVAPI on D3D12 only currently.\n") +SLANG_RAW("///\n") +SLANG_RAW("/// White paper on SER on NVAPI https://developer.nvidia.com/sites/default/files/akamai/gameworks/ser-whitepaper.pdf\n") +SLANG_RAW("///\n") +SLANG_RAW("/// The NVAPI headers (R520) required for this functionality to work can be found here...\n") +SLANG_RAW("///\n") +SLANG_RAW("/// https://developer.nvidia.com/rtx/path-tracing/nvapi/get-started\n") +SLANG_RAW("///\n") +SLANG_RAW("/// For VK the specification is currently in this PR\n") +SLANG_RAW("///\n") +SLANG_RAW("/// https://github.com/KhronosGroup/GLSL/pull/196/files\n") +SLANG_RAW("\n") +SLANG_RAW("/// Internal helper functions\n") +SLANG_RAW("\n") +SLANG_RAW("// This is a bit of a hack for GLSL HitObjectAttributes\n") +SLANG_RAW("// It relies on [ForceInline] removing the surrounding function and just inserting the *contained* `t` as a global\n") +SLANG_RAW("// The __ref should indicate the desire for the returned value to not be a copy of t, but *t*.\n") +SLANG_RAW("// In practive __ref doesn't have this effect in practice.\n") +SLANG_RAW("// \n") +SLANG_RAW("// We need this to be able access the payload outside of a function (which is all that TraceRay for example needs)\n") +SLANG_RAW("// We access the HitObjectAttributes via this function for the desired type, and it acts *as if* it's just an access\n") +SLANG_RAW("// to the global t.\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("Ref __hitObjectAttributes()\n") +SLANG_RAW("{\n") +SLANG_RAW(" [__vulkanHitObjectAttributes] \n") +SLANG_RAW(" static T t;\n") +SLANG_RAW(" return t;\n") +SLANG_RAW("}\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("__Addr __allocHitObjectAttributes()\n") +SLANG_RAW("{\n") +SLANG_RAW(" [__vulkanHitObjectAttributes]\n") +SLANG_RAW(" static T t;\n") +SLANG_RAW(" return __get_addr(t);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Next is the custom intrinsic that will compute the hitObjectAttributes location\n") +SLANG_RAW("// for GLSL-based targets.\n") +SLANG_RAW("//\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetVulkanRayTracingPayloadLocation +) +SLANG_RAW(")\n") +SLANG_RAW("int __hitObjectAttributesLocation(__ref Attributes attributes);\n") +SLANG_RAW("\n") +SLANG_RAW("/// Immutable data type representing a ray hit or a miss. Can be used to invoke hit or miss shading,\n") +SLANG_RAW("/// or as a key in ReorderThread. Created by one of several methods described below. HitObject\n") +SLANG_RAW("/// and its related functions are available in raytracing shader types only.\n") +SLANG_RAW("/// @category raytracing Ray-tracing\n") +SLANG_RAW("__glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW("[__NonCopyableType]\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HitObjectType +) +SLANG_RAW(")\n") +SLANG_RAW("struct HitObject\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_AllocateOpaqueHandle +) +SLANG_RAW(")\n") +SLANG_RAW(" __init();\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Executes ray traversal (including anyhit and intersection shaders) like TraceRay, but returns the\n") +SLANG_RAW(" /// resulting hit information as a HitObject and does not trigger closesthit or miss shaders.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject TraceRay(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint RayFlags,\n") +SLANG_RAW(" uint InstanceInclusionMask,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" HitObject hitObj;\n") +SLANG_RAW(" __hlslTraceRay(\n") +SLANG_RAW(" AccelerationStructure, \n") +SLANG_RAW(" RayFlags, \n") +SLANG_RAW(" InstanceInclusionMask, \n") +SLANG_RAW(" RayContributionToHitGroupIndex, \n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex, \n") +SLANG_RAW(" MissShaderIndex, \n") +SLANG_RAW(" Ray, \n") +SLANG_RAW(" __forceVarIntoStructTemporarily(Payload),\n") +SLANG_RAW(" hitObj);\n") +SLANG_RAW(" return hitObj;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Save the payload\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW("\n") +SLANG_RAW(" __glslTraceRay(\n") +SLANG_RAW(" __return_val,\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" RayFlags, // Assumes D3D/VK have some RayFlags values\n") +SLANG_RAW(" InstanceInclusionMask, // cullMask\n") +SLANG_RAW(" RayContributionToHitGroupIndex, // sbtRecordOffset\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex, // sbtRecordStride\n") +SLANG_RAW(" MissShaderIndex,\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction, \n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" __rayPayloadLocation(p));\n") +SLANG_RAW(" \n") +SLANG_RAW(" // Write the payload out\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Save the payload\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW("\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectTraceRayNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $RayFlags\n") +SLANG_RAW(" /**/ $InstanceInclusionMask\n") +SLANG_RAW(" /**/ $RayContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MultiplierForGeometryContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MissShaderIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ &p;\n") +SLANG_RAW(" };\n") +SLANG_RAW("\n") +SLANG_RAW(" // Write the payload out\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Executes motion ray traversal (including anyhit and intersection shaders) like TraceRay, but returns the\n") +SLANG_RAW(" /// resulting hit information as a HitObject and does not trigger closesthit or miss shaders.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject TraceMotionRay( \n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure, \n") +SLANG_RAW(" uint RayFlags, \n") +SLANG_RAW(" uint InstanceInclusionMask, \n") +SLANG_RAW(" uint RayContributionToHitGroupIndex, \n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex, \n") +SLANG_RAW(" uint MissShaderIndex, \n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __traceMotionRayHLSL(\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" RayFlags,\n") +SLANG_RAW(" InstanceInclusionMask,\n") +SLANG_RAW(" RayContributionToHitGroupIndex,\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" MissShaderIndex,\n") +SLANG_RAW(" Ray,\n") +SLANG_RAW(" CurrentTime,\n") +SLANG_RAW(" __forceVarIntoStructTemporarily(Payload));\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Save the payload\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW("\n") +SLANG_RAW(" __glslTraceMotionRay(\n") +SLANG_RAW(" __return_val,\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" RayFlags, // Assumes D3D/VK have some RayFlags values\n") +SLANG_RAW(" InstanceInclusionMask, // cullMask\n") +SLANG_RAW(" RayContributionToHitGroupIndex, // sbtRecordOffset\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex, // sbtRecordStride\n") +SLANG_RAW(" MissShaderIndex,\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction, \n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" CurrentTime,\n") +SLANG_RAW(" __rayPayloadLocation(p));\n") +SLANG_RAW(" \n") +SLANG_RAW(" // Write the payload out\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Save the payload\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW("\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_ray_tracing_motion_blur\";\n") +SLANG_RAW(" OpCapability RayTracingMotionBlurNV;\n") +SLANG_RAW(" OpHitObjectTraceRayMotionNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $RayFlags\n") +SLANG_RAW(" /**/ $InstanceInclusionMask\n") +SLANG_RAW(" /**/ $RayContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MultiplierForGeometryContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MissShaderIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ $CurrentTime\n") +SLANG_RAW(" /**/ &p;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" \n") +SLANG_RAW(" // Write the payload out\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" \n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Creates a HitObject representing a hit based on values explicitly passed as arguments, without\n") +SLANG_RAW(" /// tracing a ray. The primitive specified by AccelerationStructure, InstanceIndex, GeometryIndex,\n") +SLANG_RAW(" /// and PrimitiveIndex must exist. The shader table index is computed using the formula used with\n") +SLANG_RAW(" /// TraceRay. The computed index must reference a valid hit group record in the shader table. The\n") +SLANG_RAW(" /// Attributes parameter must either be an attribute struct, such as\n") +SLANG_RAW(" /// BuiltInTriangleIntersectionAttributes, or another HitObject to copy the attributes from.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject MakeHit(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint InstanceIndex,\n") +SLANG_RAW(" uint GeometryIndex,\n") +SLANG_RAW(" uint PrimitiveIndex,\n") +SLANG_RAW(" uint HitKind,\n") +SLANG_RAW(" uint RayContributionToHitGroupIndex,\n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" attr_t attributes)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" HitObject hitObj;\n") +SLANG_RAW(" __hlslMakeHit(\n") +SLANG_RAW(" AccelerationStructure, \n") +SLANG_RAW(" InstanceIndex,\n") +SLANG_RAW(" GeometryIndex,\n") +SLANG_RAW(" PrimitiveIndex,\n") +SLANG_RAW(" HitKind,\n") +SLANG_RAW(" RayContributionToHitGroupIndex,\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex,\n") +SLANG_RAW(" Ray,\n") +SLANG_RAW(" attributes,\n") +SLANG_RAW(" hitObj);\n") +SLANG_RAW(" return hitObj;\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __hitObjectAttributes() = attributes;\n") +SLANG_RAW("\n") +SLANG_RAW(" __glslMakeHit(\n") +SLANG_RAW(" __return_val,\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" InstanceIndex,\n") +SLANG_RAW(" PrimitiveIndex,\n") +SLANG_RAW(" GeometryIndex,\n") +SLANG_RAW(" HitKind,\n") +SLANG_RAW(" RayContributionToHitGroupIndex, /// sbtRecordOffset?\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex, /// sbtRecordStride?\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction, \n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" __hitObjectAttributesLocation(__hitObjectAttributes()));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __Addr attr = __allocHitObjectAttributes();\n") +SLANG_RAW("\n") +SLANG_RAW(" *attr = attributes;\n") +SLANG_RAW("\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectRecordHitNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $InstanceIndex\n") +SLANG_RAW(" /**/ $PrimitiveIndex\n") +SLANG_RAW(" /**/ $GeometryIndex\n") +SLANG_RAW(" /**/ $HitKind\n") +SLANG_RAW(" /**/ $RayContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MultiplierForGeometryContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ $attr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// See MakeHit but handles Motion \n") +SLANG_RAW(" /// Currently only supported on VK\n") +SLANG_RAW(" [ForceInline] \n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject MakeMotionHit( \n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure, \n") +SLANG_RAW(" uint InstanceIndex, \n") +SLANG_RAW(" uint GeometryIndex, \n") +SLANG_RAW(" uint PrimitiveIndex, \n") +SLANG_RAW(" uint HitKind, \n") +SLANG_RAW(" uint RayContributionToHitGroupIndex, \n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex, \n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" attr_t attributes)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"MakeMotionHit\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __hitObjectAttributes() = attributes;\n") +SLANG_RAW("\n") +SLANG_RAW(" __glslMakeMotionHit(\n") +SLANG_RAW(" __return_val,\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" InstanceIndex,\n") +SLANG_RAW(" PrimitiveIndex,\n") +SLANG_RAW(" GeometryIndex,\n") +SLANG_RAW(" HitKind,\n") +SLANG_RAW(" RayContributionToHitGroupIndex, /// sbtRecordOffset?\n") +SLANG_RAW(" MultiplierForGeometryContributionToHitGroupIndex, /// sbtRecordStride?\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction, \n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" CurrentTime,\n") +SLANG_RAW(" __hitObjectAttributesLocation(__hitObjectAttributes()));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __Addr attr = __allocHitObjectAttributes();\n") +SLANG_RAW("\n") +SLANG_RAW(" *attr = attributes;\n") +SLANG_RAW("\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_ray_tracing_motion_blur\";\n") +SLANG_RAW(" OpCapability RayTracingMotionBlurNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectRecordHitMotionNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $InstanceIndex\n") +SLANG_RAW(" /**/ $PrimitiveIndex\n") +SLANG_RAW(" /**/ $GeometryIndex\n") +SLANG_RAW(" /**/ $HitKind\n") +SLANG_RAW(" /**/ $RayContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $MultiplierForGeometryContributionToHitGroupIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ $CurrentTime\n") +SLANG_RAW(" /**/ $attr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Creates a HitObject representing a hit based on values explicitly passed as arguments, without\n") +SLANG_RAW(" /// tracing a ray. The primitive specified by AccelerationStructure, InstanceIndex, GeometryIndex,\n") +SLANG_RAW(" /// and PrimitiveIndex must exist. The shader table index is explicitly provided as an argument\n") +SLANG_RAW(" /// instead of being computed from the indexing formula used in TraceRay. The provided index must\n") +SLANG_RAW(" /// reference a valid hit group record in the shader table. The Attributes parameter must either be an\n") +SLANG_RAW(" /// attribute struct, such as BuiltInTriangleIntersectionAttributes, or another HitObject to copy the\n") +SLANG_RAW(" /// attributes from.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject MakeHit(\n") +SLANG_RAW(" uint HitGroupRecordIndex,\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" uint InstanceIndex,\n") +SLANG_RAW(" uint GeometryIndex,\n") +SLANG_RAW(" uint PrimitiveIndex,\n") +SLANG_RAW(" uint HitKind,\n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" attr_t attributes)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" HitObject hitObj;\n") +SLANG_RAW(" __hlslMakeHitWithRecordIndex(\n") +SLANG_RAW(" HitGroupRecordIndex, \n") +SLANG_RAW(" AccelerationStructure, \n") +SLANG_RAW(" InstanceIndex,\n") +SLANG_RAW(" GeometryIndex,\n") +SLANG_RAW(" PrimitiveIndex,\n") +SLANG_RAW(" HitKind,\n") +SLANG_RAW(" Ray,\n") +SLANG_RAW(" attributes,\n") +SLANG_RAW(" hitObj);\n") +SLANG_RAW(" return hitObj;\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __hitObjectAttributes() = attributes;\n") +SLANG_RAW("\n") +SLANG_RAW(" __glslMakeHitWithIndex(\n") +SLANG_RAW(" __return_val,\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" InstanceIndex, ///? Same as instanceid ?\n") +SLANG_RAW(" PrimitiveIndex,\n") +SLANG_RAW(" GeometryIndex,\n") +SLANG_RAW(" HitKind, /// Assuming HitKinds are compatible\n") +SLANG_RAW(" HitGroupRecordIndex, /// sbtRecordIndex\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction, \n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" __hitObjectAttributesLocation(__hitObjectAttributes()));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __Addr attr = __allocHitObjectAttributes();\n") +SLANG_RAW(" *attr = attributes;\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectRecordHitWithIndexNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $InstanceIndex\n") +SLANG_RAW(" /**/ $PrimitiveIndex\n") +SLANG_RAW(" /**/ $GeometryIndex\n") +SLANG_RAW(" /**/ $HitKind\n") +SLANG_RAW(" /**/ $HitGroupRecordIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ $attr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" /// See MakeHit but handles Motion \n") +SLANG_RAW(" /// Currently only supported on VK\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_spirv, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject MakeMotionHit( \n") +SLANG_RAW(" uint HitGroupRecordIndex, \n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure, \n") +SLANG_RAW(" uint InstanceIndex, \n") +SLANG_RAW(" uint GeometryIndex, \n") +SLANG_RAW(" uint PrimitiveIndex, \n") +SLANG_RAW(" uint HitKind, \n") +SLANG_RAW(" RayDesc Ray, \n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" attr_t attributes)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __hitObjectAttributes() = attributes;\n") +SLANG_RAW("\n") +SLANG_RAW(" __glslMakeMotionHitWithIndex(\n") +SLANG_RAW(" __return_val,\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" InstanceIndex, ///? Same as instanceid ?\n") +SLANG_RAW(" PrimitiveIndex, \n") +SLANG_RAW(" GeometryIndex,\n") +SLANG_RAW(" HitKind, /// Assuming HitKinds are compatible\n") +SLANG_RAW(" HitGroupRecordIndex, /// sbtRecordIndex\n") +SLANG_RAW(" Ray.Origin,\n") +SLANG_RAW(" Ray.TMin,\n") +SLANG_RAW(" Ray.Direction, \n") +SLANG_RAW(" Ray.TMax,\n") +SLANG_RAW(" CurrentTime,\n") +SLANG_RAW(" __hitObjectAttributesLocation(__hitObjectAttributes()));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Save the attributes\n") +SLANG_RAW(" __Addr attr = __allocHitObjectAttributes();\n") +SLANG_RAW(" *attr = attributes;\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_ray_tracing_motion_blur\";\n") +SLANG_RAW(" OpCapability RayTracingMotionBlurNV;\n") +SLANG_RAW(" OpHitObjectRecordHitWithIndexMotionNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $AccelerationStructure\n") +SLANG_RAW(" /**/ $InstanceIndex\n") +SLANG_RAW(" /**/ $PrimitiveIndex\n") +SLANG_RAW(" /**/ $GeometryIndex\n") +SLANG_RAW(" /**/ $HitKind\n") +SLANG_RAW(" /**/ $HitGroupRecordIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ $CurrentTime\n") +SLANG_RAW(" /**/ $attr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Creates a HitObject representing a miss based on values explicitly passed as arguments, without\n") +SLANG_RAW(" /// tracing a ray. The provided shader table index must reference a valid miss record in the shader\n") +SLANG_RAW(" /// table.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject MakeMiss( \n") +SLANG_RAW(" uint MissShaderIndex, \n") +SLANG_RAW(" RayDesc Ray)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($2=NvMakeMiss($0,$1))\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __glslMakeMiss(__return_val, MissShaderIndex, Ray.Origin, Ray.TMin, Ray.Direction, Ray.TMax);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectRecordMissNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $MissShaderIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// See MakeMiss but handles Motion \n") +SLANG_RAW(" /// Currently only supported on VK\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject MakeMotionMiss( \n") +SLANG_RAW(" uint MissShaderIndex, \n") +SLANG_RAW(" RayDesc Ray,\n") +SLANG_RAW(" float CurrentTime)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"($3=NvMakeMotionMiss($0,$1,$2))\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __glslMakeMotionMiss(__return_val, MissShaderIndex, Ray.Origin, Ray.TMin, Ray.Direction, Ray.TMax, CurrentTime);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let origin = Ray.Origin;\n") +SLANG_RAW(" let direction = Ray.Direction;\n") +SLANG_RAW(" let tmin = Ray.TMin;\n") +SLANG_RAW(" let tmax = Ray.TMax;\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_ray_tracing_motion_blur\";\n") +SLANG_RAW(" OpCapability RayTracingMotionBlurNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectRecordMissMotionNV\n") +SLANG_RAW(" /**/ &__return_val\n") +SLANG_RAW(" /**/ $MissShaderIndex\n") +SLANG_RAW(" /**/ $origin\n") +SLANG_RAW(" /**/ $tmin\n") +SLANG_RAW(" /**/ $direction\n") +SLANG_RAW(" /**/ $tmax\n") +SLANG_RAW(" /**/ $CurrentTime;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Creates a HitObject representing 429496726642949671684294967196NOP429496726642949671684294967197 (no operation) which is neither a hit nor a miss. Invoking a\n") +SLANG_RAW(" /// NOP hit object using HitObject::Invoke has no effect. Reordering by hit objects using\n") +SLANG_RAW(" /// ReorderThread will group NOP hit objects together. This can be useful in some reordering\n") +SLANG_RAW(" /// scenarios where future control flow for some threads is known to process neither a hit nor a\n") +SLANG_RAW(" /// miss.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static HitObject MakeNop()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"($0 = NvMakeNop())\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __glslMakeNop(__return_val);\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectRecordEmptyNV\n") +SLANG_RAW(" /**/ &__return_val;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [require(hlsl, ser)]\n") +SLANG_RAW(" __generic\n") +SLANG_RAW(" static void __InvokeHLSL(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" HitObject HitOrMiss,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvInvokeHitObject\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Invokes closesthit or miss shading for the specified hit object. In case of a NOP HitObject, no\n") +SLANG_RAW(" /// shader is invoked.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void Invoke(\n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure,\n") +SLANG_RAW(" HitObject HitOrMiss,\n") +SLANG_RAW(" inout payload_t Payload)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __InvokeHLSL(\n") +SLANG_RAW(" AccelerationStructure,\n") +SLANG_RAW(" HitOrMiss,\n") +SLANG_RAW(" __forceVarIntoStructTemporarily(Payload));\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Save the payload\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW("\n") +SLANG_RAW(" __glslInvoke(HitOrMiss, __rayPayloadLocation(p));\n") +SLANG_RAW("\n") +SLANG_RAW(" // Write payload result\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__vulkanRayPayload]\n") +SLANG_RAW(" static payload_t p;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Save the payload\n") +SLANG_RAW(" p = Payload;\n") +SLANG_RAW("\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" { \n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV; \n") +SLANG_RAW(" OpHitObjectExecuteShaderNV\n") +SLANG_RAW(" /**/ &HitOrMiss\n") +SLANG_RAW(" /**/ &p;\n") +SLANG_RAW(" };\n") +SLANG_RAW("\n") +SLANG_RAW(" // Write payload result\n") +SLANG_RAW(" Payload = p;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns true if the HitObject encodes a miss, otherwise returns false.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" bool IsMiss()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".IsMiss\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectIsMissNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$bool = OpHitObjectIsMissNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns true if the HitObject encodes a hit, otherwise returns false.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" bool IsHit()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".IsHit\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectIsHitNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$bool = OpHitObjectIsHitNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns true if the HitObject encodes a nop, otherwise returns false.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" bool IsNop()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".IsNop\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectIsEmptyNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$bool = OpHitObjectIsEmptyNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Queries ray properties from HitObject. Valid if the hit object represents a hit or a miss.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" RayDesc GetRayDesc()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \".GetRayDesc\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" RayDesc ray = { __glslGetRayWorldOrigin(), __glslGetTMin(), __glslGetRayWorldDirection(), __glslGetTMax() };\n") +SLANG_RAW(" return ray;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" %origin:$$float3 = OpHitObjectGetWorldRayOriginNV &this;\n") +SLANG_RAW(" %tmin:$$float = OpHitObjectGetRayTMinNV &this;\n") +SLANG_RAW(" %direction:$$float3 = OpHitObjectGetWorldRayDirectionNV &this;\n") +SLANG_RAW(" %tmax:$$float = OpHitObjectGetRayTMaxNV &this;\n") +SLANG_RAW(" result:$$RayDesc = OpCompositeConstruct %origin %tmin %direction %tmax;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Queries shader table index from HitObject. Valid if the hit object represents a hit or a miss.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" uint GetShaderTableIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetShaderTableIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetShaderBindingTableRecordIndexNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$uint = OpHitObjectGetShaderBindingTableRecordIndexNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns the instance index of a hit. Valid if the hit object represents a hit.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" uint GetInstanceIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetInstanceIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetInstanceIdNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$uint = OpHitObjectGetInstanceIdNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns the instance ID of a hit. Valid if the hit object represents a hit.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" uint GetInstanceID()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetInstanceID\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetInstanceCustomIndexNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$uint = OpHitObjectGetInstanceCustomIndexNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns the geometry index of a hit. Valid if the hit object represents a hit.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" uint GetGeometryIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetGeometryIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetGeometryIndexNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$uint = OpHitObjectGetGeometryIndexNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns the primitive index of a hit. Valid if the hit object represents a hit.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" uint GetPrimitiveIndex()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetPrimitiveIndex\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetPrimitiveIndexNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$uint = OpHitObjectGetPrimitiveIndexNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns the hit kind. Valid if the hit object represents a hit.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" uint GetHitKind()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetHitKind\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetHitKindNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$uint = OpHitObjectGetHitKindNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float4x3 GetWorldToObject()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetWorldToObject\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetWorldToObjectNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$float4x3 = OpHitObjectGetWorldToObjectNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float4x3 GetObjectToWorld()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".GetObjectToWorld\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetObjectToWorldNV($0)\";\n") +SLANG_RAW(" case spirv: \n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$float4x3 = OpHitObjectGetObjectToWorldNV &this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float GetCurrentTime() {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"hitObjectGetCurrentTimeNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$float = OpHitObjectGetCurrentTimeNV &this\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float3 GetObjectRayOrigin() {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"hitObjectGetObjectRayOriginNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$float3 = OpHitObjectGetObjectRayOriginNV &this\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float3 GetObjectRayDirection() {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"hitObjectGetObjectRayDirectionNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$float3 = OpHitObjectGetObjectRayDirectionNV &this\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" uint2 GetShaderRecordBufferHandle() {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"hitObjectGetShaderRecordBufferHandleNV($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" result:$$uint2 = OpHitObjectGetShaderRecordBufferHandleNV &this\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Returns the attributes of a hit. Valid if the hit object represents a hit or a miss.\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" attr_t GetAttributes()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" attr_t v;\n") +SLANG_RAW(" __hlslGetAttributesFromHitObject(v);\n") +SLANG_RAW(" return v;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Work out the location\n") +SLANG_RAW(" int attributeLocation = __hitObjectAttributesLocation(__hitObjectAttributes());\n") +SLANG_RAW("\n") +SLANG_RAW(" // Load the attributes from the location\n") +SLANG_RAW(" __glslGetAttributes(attributeLocation);\n") +SLANG_RAW("\n") +SLANG_RAW(" // Return the attributes\n") +SLANG_RAW(" return __hitObjectAttributes();\n") +SLANG_RAW(" }\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __Addr attr = __allocHitObjectAttributes();\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpHitObjectGetAttributesNV &this $attr;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" return *attr;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" /// Loads a root constant from the local root table referenced by the hit object. Valid if the hit object\n") +SLANG_RAW(" /// represents a hit or a miss. RootConstantOffsetInBytes must be a multiple of 4.\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, ser)]\n") +SLANG_RAW(" uint LoadLocalRootTableConstant(uint RootConstantOffsetInBytes)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \".LoadLocalRootTableConstant\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// \n") +SLANG_RAW(" /// !!!! Internal NVAPI HLSL impl. Not part of interface! !!!!!!!!!!!!\n") +SLANG_RAW(" /// \n") +SLANG_RAW("\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" void __hlslGetAttributesFromHitObject(out T t)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvGetAttributesFromHitObject($0, $1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __hlslMakeHitWithRecordIndex(\n") +SLANG_RAW(" uint HitGroupRecordIndex, \n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure, \n") +SLANG_RAW(" uint InstanceIndex, \n") +SLANG_RAW(" uint GeometryIndex, \n") +SLANG_RAW(" uint PrimitiveIndex, \n") +SLANG_RAW(" uint HitKind, \n") +SLANG_RAW(" RayDesc Ray, \n") +SLANG_RAW(" attr_t attributes, \n") +SLANG_RAW(" out HitObject hitObj)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvMakeHitWithRecordIndex\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __hlslMakeHit(RaytracingAccelerationStructure AccelerationStructure, \n") +SLANG_RAW(" uint InstanceIndex, \n") +SLANG_RAW(" uint GeometryIndex, \n") +SLANG_RAW(" uint PrimitiveIndex, \n") +SLANG_RAW(" uint HitKind, \n") +SLANG_RAW(" uint RayContributionToHitGroupIndex, \n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex, \n") +SLANG_RAW(" RayDesc Ray, \n") +SLANG_RAW(" attr_t attributes, \n") +SLANG_RAW(" out HitObject hitObj)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvMakeHit\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __hlslTraceRay( \n") +SLANG_RAW(" RaytracingAccelerationStructure AccelerationStructure, \n") +SLANG_RAW(" uint RayFlags, \n") +SLANG_RAW(" uint InstanceInclusionMask, \n") +SLANG_RAW(" uint RayContributionToHitGroupIndex, \n") +SLANG_RAW(" uint MultiplierForGeometryContributionToHitGroupIndex, \n") +SLANG_RAW(" uint MissShaderIndex, \n") +SLANG_RAW(" RayDesc Ray, \n") +SLANG_RAW(" inout payload_t Payload,\n") +SLANG_RAW(" out HitObject hitObj)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvTraceRayHitObject\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// \n") +SLANG_RAW(" /// !!!! Internal GLSL GL_NV_shader_invocation_reorder impl. Not part of interface! !!!!!!!!!!!!\n") +SLANG_RAW(" /// \n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslMakeMiss(\n") +SLANG_RAW(" out HitObject hitObj,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" float3 Origin,\n") +SLANG_RAW(" float TMin,\n") +SLANG_RAW(" float3 Direction,\n") +SLANG_RAW(" float TMax)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectRecordMissNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // \"void hitObjectRecordMissNV(hitObjectNV, uint, vec3, float, vec3, float);\"\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_ray_tracing_motion_blur) \n") +SLANG_RAW(" [require(glsl, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslMakeMotionMiss(\n") +SLANG_RAW(" out HitObject hitObj,\n") +SLANG_RAW(" uint MissShaderIndex,\n") +SLANG_RAW(" float3 Origin,\n") +SLANG_RAW(" float TMin,\n") +SLANG_RAW(" float3 Direction,\n") +SLANG_RAW(" float TMax, \n") +SLANG_RAW(" float CurrentTime)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectRecordMissMotionNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslMakeNop(out HitObject hitObj)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectRecordEmptyNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float3 __glslGetRayDirection()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetObjectRayDirectionNV($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" \n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float3 __glslGetRayWorldDirection()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetWorldRayDirectionNV($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float3 __glslGetRayWorldOrigin()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetWorldRayOriginNV($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float __glslGetTMax()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetRayTMaxNV($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" float __glslGetTMin()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetRayTMinNV($0)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // \"void hitObjectRecordHitWithIndexNV(hitObjectNV, accelerationStructureEXT,int,int,int,uint,uint,vec3,float,vec3,float,int);\"\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslMakeHitWithIndex(\n") +SLANG_RAW(" out HitObject hitObj,\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" int instanceid,\n") +SLANG_RAW(" int primitiveid,\n") +SLANG_RAW(" int geometryindex,\n") +SLANG_RAW(" uint hitKind,\n") +SLANG_RAW(" uint sbtRecordIndex,\n") +SLANG_RAW(" float3 origin,\n") +SLANG_RAW(" float Tmin,\n") +SLANG_RAW(" float3 direction,\n") +SLANG_RAW(" float Tmax,\n") +SLANG_RAW(" int attributeLocation)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectRecordHitWithIndexNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // \"void hitObjectRecordHitWithIndexMotionNV(hitObjectNV, accelerationStructureEXT,int,int,int,uint,uint,vec3,float,vec3,float,float,int);\"\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" __glsl_extension(GL_NV_ray_tracing_motion_blur)\n") +SLANG_RAW(" [require(glsl, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslMakeMotionHitWithIndex(\n") +SLANG_RAW(" out HitObject hitObj,\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" int instanceid,\n") +SLANG_RAW(" int primitiveid,\n") +SLANG_RAW(" int geometryindex,\n") +SLANG_RAW(" uint hitKind,\n") +SLANG_RAW(" uint sbtRecordIndex,\n") +SLANG_RAW(" float3 origin,\n") +SLANG_RAW(" float Tmin,\n") +SLANG_RAW(" float3 direction,\n") +SLANG_RAW(" float Tmax,\n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" int attributeLocation)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectRecordHitWithIndexMotionNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // \"void hitObjectRecordHitNV(hitObjectNV,accelerationStructureEXT,int,int,int,uint,uint,uint,vec3,float,vec3,float,int);\"\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslMakeHit(\n") +SLANG_RAW(" out HitObject hitObj,\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" int instanceid,\n") +SLANG_RAW(" int primitiveid,\n") +SLANG_RAW(" int geometryindex,\n") +SLANG_RAW(" uint hitKind,\n") +SLANG_RAW(" uint sbtRecordOffset,\n") +SLANG_RAW(" uint sbtRecordStride,\n") +SLANG_RAW(" float3 origin,\n") +SLANG_RAW(" float Tmin,\n") +SLANG_RAW(" float3 direction,\n") +SLANG_RAW(" float Tmax,\n") +SLANG_RAW(" int attributeLocation)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectRecordHitNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" // \"void hitObjectRecordHitMotionNV(hitObjectNV,accelerationStructureEXT,int,int,int,uint,uint,uint,vec3,float,vec3,float,float,int);\"\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" __glsl_extension(GL_NV_ray_tracing_motion_blur)\n") +SLANG_RAW(" [require(glsl, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslMakeMotionHit(\n") +SLANG_RAW(" out HitObject hitObj,\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" int instanceid,\n") +SLANG_RAW(" int primitiveid,\n") +SLANG_RAW(" int geometryindex,\n") +SLANG_RAW(" uint hitKind,\n") +SLANG_RAW(" uint sbtRecordOffset,\n") +SLANG_RAW(" uint sbtRecordStride,\n") +SLANG_RAW(" float3 origin,\n") +SLANG_RAW(" float Tmin,\n") +SLANG_RAW(" float3 direction,\n") +SLANG_RAW(" float Tmax,\n") +SLANG_RAW(" float CurrentTime,\n") +SLANG_RAW(" int attributeLocation)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectRecordHitMotionNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" \n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" void __glslGetAttributes(int attributeLocation)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectGetAttributesNV($0, $1)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslTraceRay(\n") +SLANG_RAW(" out HitObject hitObject,\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" uint rayFlags,\n") +SLANG_RAW(" uint cullMask,\n") +SLANG_RAW(" uint sbtRecordOffset,\n") +SLANG_RAW(" uint sbtRecordStride,\n") +SLANG_RAW(" uint missIndex,\n") +SLANG_RAW(" float3 origin,\n") +SLANG_RAW(" float Tmin,\n") +SLANG_RAW(" float3 direction,\n") +SLANG_RAW(" float Tmax,\n") +SLANG_RAW(" int payload)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectTraceRayNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" __glsl_extension(GL_NV_ray_tracing_motion_blur)\n") +SLANG_RAW(" [require(glsl, ser_motion_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslTraceMotionRay(\n") +SLANG_RAW(" out HitObject hitObject,\n") +SLANG_RAW(" RaytracingAccelerationStructure accelerationStructure,\n") +SLANG_RAW(" uint rayFlags,\n") +SLANG_RAW(" uint cullMask,\n") +SLANG_RAW(" uint sbtRecordOffset,\n") +SLANG_RAW(" uint sbtRecordStride,\n") +SLANG_RAW(" uint missIndex,\n") +SLANG_RAW(" float3 origin,\n") +SLANG_RAW(" float Tmin,\n") +SLANG_RAW(" float3 direction,\n") +SLANG_RAW(" float Tmax,\n") +SLANG_RAW(" float currentTime,\n") +SLANG_RAW(" int payload)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectTraceRayMotionNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW(" [require(glsl, ser_raygen_closesthit_miss)]\n") +SLANG_RAW(" static void __glslInvoke(\n") +SLANG_RAW(" HitObject hitObj,\n") +SLANG_RAW(" int payload)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"hitObjectExecuteShaderNV\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("};\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Reorders threads based on a coherence hint value. NumCoherenceHintBits indicates how many of\n") +SLANG_RAW(" /// the least significant bits of CoherenceHint should be considered during reordering (max: 16).\n") +SLANG_RAW(" /// Applications should set this to the lowest value required to represent all possible values in\n") +SLANG_RAW(" /// CoherenceHint. For best performance, all threads should provide the same value for\n") +SLANG_RAW(" /// NumCoherenceHintBits.\n") +SLANG_RAW(" /// Where possible, reordering will also attempt to retain locality in the thread429496726642949671684294967193s launch indices\n") +SLANG_RAW(" /// (DispatchRaysIndex in DXR).\n") +SLANG_RAW("[__requiresNVAPI]\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW("__glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, ser_raygen)]\n") +SLANG_RAW("void ReorderThread( uint CoherenceHint, uint NumCoherenceHintBitsFromLSB )\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvReorderThread\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"reorderThreadNV\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpReorderThreadWithHintNV $CoherenceHint $NumCoherenceHintBitsFromLSB;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Reorders threads based on a hit object, optionally extended by a coherence hint value. Coherence\n") +SLANG_RAW(" /// hints behave as described in the generic variant of ReorderThread. The maximum number of\n") +SLANG_RAW(" /// coherence hint bits in this variant of ReorderThread is 8. If no coherence hint is desired, set\n") +SLANG_RAW(" /// NumCoherenceHitBits to zero.\n") +SLANG_RAW(" /// Reordering will consider information in the HitObject and coherence hint with the following\n") +SLANG_RAW(" /// priority:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// 1. Shader ID stored in the HitObject\n") +SLANG_RAW(" /// 2. Coherence hint, with the most significant hint bit having highest priority\n") +SLANG_RAW(" /// 3. Spatial information stored in the HitObject\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// That is, ReorderThread will first attempt to group threads whose HitObject references the\n") +SLANG_RAW(" /// same shader ID. (Miss shaders and NOP HitObjects are grouped separately). Within each of these\n") +SLANG_RAW(" /// groups, it will attempt to order threads by the value of their coherence hints. And within ranges\n") +SLANG_RAW(" /// of equal coherence hints, it will attempt to maximize locality in 3D space of the ray hit (if any).\n") +SLANG_RAW("\n") +SLANG_RAW("[__requiresNVAPI]\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW("__glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, ser_raygen)]\n") +SLANG_RAW("void ReorderThread( HitObject HitOrMiss, uint CoherenceHint, uint NumCoherenceHintBitsFromLSB )\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvReorderThread\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"reorderThreadNV\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpReorderThreadWithHitObjectNV &HitOrMiss $CoherenceHint $NumCoherenceHintBitsFromLSB;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Is equivalent to\n") +SLANG_RAW(" /// ```\n") +SLANG_RAW(" /// void ReorderThread( HitObject HitOrMiss, uint CoherenceHint, uint NumCoherenceHintBitsFromLSB );\n") +SLANG_RAW(" /// ```\n") +SLANG_RAW(" /// With CoherenceHint and NumCoherenceHintBitsFromLSB as 0, meaning they are ignored.\n") +SLANG_RAW("\n") +SLANG_RAW("[__requiresNVAPI]\n") +SLANG_RAW("__glsl_extension(GL_EXT_ray_tracing)\n") +SLANG_RAW("__glsl_extension(GL_NV_shader_invocation_reorder)\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, ser_raygen)]\n") +SLANG_RAW("void ReorderThread( HitObject HitOrMiss )\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvReorderThread\";\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"reorderThreadNV\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" spirv_asm \n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_invocation_reorder\";\n") +SLANG_RAW(" OpCapability ShaderInvocationReorderNV;\n") +SLANG_RAW(" OpReorderThreadWithHitObjectNV &HitOrMiss;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("///\n") +SLANG_RAW("/// DebugBreak support \n") +SLANG_RAW("///\n") +SLANG_RAW("/// There doesn't appear to be an equivalent for debugBreak for HLSL\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__specialized_for_target(glsl)\n") +SLANG_RAW("[[vk::spirv_instruction(1, \"NonSemantic.DebugBreak\")]]\n") +SLANG_RAW("void __glslDebugBreak();\n") +SLANG_RAW("\n") +SLANG_RAW("[ForceInline]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl, breakpoint)]\n") +SLANG_RAW("void debugBreak() \n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"/* debugBreak() not currently supported for HLSL */\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"__brkpt()\";\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"SLANG_BREAKPOINT(0)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __glslDebugBreak();\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("// \n") +SLANG_RAW("// Realtime Clock support\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("// https://github.com/KhronosGroup/GLSL/blob/master/extensions/ext/GL_EXT_shader_realtime_clock.txt\n") +SLANG_RAW("\n") +SLANG_RAW("[__requiresNVAPI]\n") +SLANG_RAW("__glsl_extension(GL_EXT_shader_realtime_clock)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shaderclock)]\n") +SLANG_RAW("uint getRealtimeClockLow()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"NvGetSpecial( NV_SPECIALOP_GLOBAL_TIMER_LO)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" return getRealtimeClock().x;\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" __intrinsic_asm \"clock\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return getRealtimeClock().x;\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" __intrinsic_asm \"(uint32_t)std::chrono::high_resolution_clock::now().time_since_epoch().count()\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cpp_cuda, shaderclock)]\n") +SLANG_RAW("int64_t __cudaCppGetRealtimeClock()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cpp: __intrinsic_asm \"std::chrono::high_resolution_clock::now().time_since_epoch().count()\";\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"clock64\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__requiresNVAPI]\n") +SLANG_RAW("__glsl_extension(GL_EXT_shader_realtime_clock)\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cpp_cuda_glsl_hlsl_spirv, shaderclock)]\n") +SLANG_RAW("uint2 getRealtimeClock()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"uint2(NvGetSpecial(NV_SPECIALOP_GLOBAL_TIMER_LO), NvGetSpecial( NV_SPECIALOP_GLOBAL_TIMER_HI))\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"clockRealtime2x32EXT()\";\n") +SLANG_RAW(" case cuda:\n") +SLANG_RAW(" case cpp:\n") +SLANG_RAW(" int64_t ticks = __cudaCppGetRealtimeClock();\n") +SLANG_RAW(" return uint2(uint(ticks), uint(uint64_t(ticks) >> 32));\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm\n") +SLANG_RAW(" {\n") +SLANG_RAW(" OpCapability ShaderClockKHR;\n") +SLANG_RAW(" OpExtension \"SPV_KHR_shader_clock\";\n") +SLANG_RAW(" result : $$uint2 = OpReadClockKHR Device\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// \n") +SLANG_RAW("// CUDA specific \n") +SLANG_RAW("// \n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda)]\n") +SLANG_RAW("uint3 cudaThreadIdx()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"(threadIdx)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda)]\n") +SLANG_RAW("uint3 cudaBlockIdx()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"(blockIdx)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__readNone]\n") +SLANG_RAW("[NonUniformReturn]\n") +SLANG_RAW("[require(cuda)]\n") +SLANG_RAW("uint3 cudaBlockDim()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case cuda: __intrinsic_asm \"(blockDim)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// Workgroup cooperation\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// `saturated_cooperation(c, f, s, u)` will call `f(s, u)` if not all lanes in the\n") +SLANG_RAW("// workgroup are currently executing. however if all lanes are saturated, then\n") +SLANG_RAW("// for each unique `s` across all the active lanes `c(s, u)` is called. The\n") +SLANG_RAW("// return value is the one corresponding to the input `s` from this lane.\n") +SLANG_RAW("//\n") +SLANG_RAW("// Adjacent calls to saturated_cooperation are subject to fusion, i.e.\n") +SLANG_RAW("// saturated_cooperation(c1, f1, s, u1);\n") +SLANG_RAW("// saturated_cooperation(c2, f2, s, u2);\n") +SLANG_RAW("// will be transformed to:\n") +SLANG_RAW("// saturated_cooperation(c1c2, f1f2, s, u1u2);\n") +SLANG_RAW("// where\n") +SLANG_RAW("// c1c2 is a function which calls c1(s, u1) and then c2(s, u2);\n") +SLANG_RAW("// f1f2 is a function which calls f1(s, u1) and then f2(s, u2);\n") +SLANG_RAW("//\n") +SLANG_RAW("// When the input differs, calls are fused\n") +SLANG_RAW("// saturated_cooperation(c1, f1, s1, u1);\n") +SLANG_RAW("// saturated_cooperation(c2, f2, s2, u2);\n") +SLANG_RAW("// will be transformed to:\n") +SLANG_RAW("// saturated_cooperation(c1c2, f1f2, s1s2, u1u2);\n") +SLANG_RAW("// where\n") +SLANG_RAW("// s1s2 is a tuple of s1 and s2\n") +SLANG_RAW("// c1c2 is a function which calls c1(s1, u1) and then c2(s2, u2);\n") +SLANG_RAW("// f1f2 is a function which calls f1(s1, u1) and then f2(s2, u2);\n") +SLANG_RAW("// Note that in this case, we will make a call to c1c2 for every unique pair\n") +SLANG_RAW("// s1s2 across all lanes\n") +SLANG_RAW("//\n") +SLANG_RAW("// (This fusion takes place in the fuse-satcoop pass, and as such any changes to\n") +SLANG_RAW("// the signature or behavior of this function should be adjusted for there).\n") +SLANG_RAW("//\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("[KnownBuiltin(\"saturated_cooperation\")]\n") +SLANG_RAW("func saturated_cooperation(\n") +SLANG_RAW(" cooperate : functype (A, B) -> C,\n") +SLANG_RAW(" fallback : functype (A, B) -> C,\n") +SLANG_RAW(" A input,\n") +SLANG_RAW(" B otherArg)\n") +SLANG_RAW(" -> C\n") +SLANG_RAW("{\n") +SLANG_RAW(" return saturated_cooperation_using(cooperate, fallback, __WaveMatchBuitin, __WaveReadLaneAtBuiltin, input, otherArg);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// These two functions are a temporary (circa May 2023) workaround to the fact\n") +SLANG_RAW("// that we can't deduce which overload to pass to saturated_cooperation_using\n") +SLANG_RAW("// in the call above\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("func __WaveMatchBuitin(T t) -> uint4\n") +SLANG_RAW("{\n") +SLANG_RAW(" return WaveMatch(t);\n") +SLANG_RAW("}\n") +SLANG_RAW("[__unsafeForceInlineEarly]\n") +SLANG_RAW("func __WaveReadLaneAtBuiltin(T t, int i) -> T\n") +SLANG_RAW("{\n") +SLANG_RAW(" return WaveReadLaneAt(t, i);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// saturated_cooperation, but you're able to specify manually the functions:\n") +SLANG_RAW("//\n") +SLANG_RAW("// waveMatch: a function to return a mask of lanes with the same input as this one\n") +SLANG_RAW("// broadcast: a function which returns the value passed into it on the specified lane\n") +SLANG_RAW("//\n") +SLANG_RAW("[KnownBuiltin(\"saturated_cooperation_using\")]\n") +SLANG_RAW("func saturated_cooperation_using(\n") +SLANG_RAW(" cooperate : functype (A, B) -> C,\n") +SLANG_RAW(" fallback : functype (A, B) -> C,\n") +SLANG_RAW(" waveMatch : functype (A) -> uint4,\n") +SLANG_RAW(" broadcast : functype (A, int) -> A,\n") +SLANG_RAW(" A input,\n") +SLANG_RAW(" B otherArg)\n") +SLANG_RAW(" -> C\n") +SLANG_RAW("{\n") +SLANG_RAW(" const bool isWaveSaturated = WaveActiveCountBits(true) == WaveGetLaneCount();\n") +SLANG_RAW(" if(isWaveSaturated)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" let lanesWithSameInput = waveMatch(input).x;\n") +SLANG_RAW(" // Keep least significant lane in our set\n") +SLANG_RAW(" let ourRepresentative = lanesWithSameInput & -lanesWithSameInput;\n") +SLANG_RAW(" // The representative lanes for all lanes\n") +SLANG_RAW(" var allRepresentatives = WaveActiveBitOr(ourRepresentative);\n") +SLANG_RAW("\n") +SLANG_RAW(" C ret;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Iterate over set bits in mask from low to high.\n") +SLANG_RAW(" // In each iteration the lowest bit is cleared.\n") +SLANG_RAW(" while(bool(allRepresentatives))\n") +SLANG_RAW(" {\n") +SLANG_RAW(" // Broadcast input across warp.\n") +SLANG_RAW(" let laneIdx = firstbitlow(allRepresentatives);\n") +SLANG_RAW(" let uniformInput = broadcast(input, int(laneIdx));\n") +SLANG_RAW("\n") +SLANG_RAW(" // All lanes perform some cooperative computation with dynamic\n") +SLANG_RAW(" // uniform input\n") +SLANG_RAW(" C c = cooperate(uniformInput, otherArg);\n") +SLANG_RAW("\n") +SLANG_RAW(" // Update our return value until it\n") +SLANG_RAW(" if(bool(allRepresentatives & ourRepresentative))\n") +SLANG_RAW(" ret = c;\n") +SLANG_RAW("\n") +SLANG_RAW(" // Clear the lowest bit\n") +SLANG_RAW(" allRepresentatives &= allRepresentatives - 1;\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" return ret;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" else\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return fallback(input, otherArg);\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("\n") + +// The NVAPI operations are defined to take the space/register +// indices of their texture and sampler parameters, rather than +// taking the texture/sampler objects directly. +// +// In order to support this approach, we need intrinsics that +// can magically fetch the binding information for a resource. +// +// TODO: These operations are kind of *screaming* for us to +// have a built-in `interface` that all of the opaque resource +// types conform to, so that we can define builtins that work +// for any resource type. +SLANG_RAW("#line 19543 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetRegisterSpace +) +SLANG_RAW(") uint __getRegisterSpace(_Texture texture);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetRegisterSpace +) +SLANG_RAW(") uint __getRegisterSpace(SamplerState sampler);\n") +SLANG_RAW("\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetRegisterIndex +) +SLANG_RAW(") uint __getRegisterIndex(_Texture texture);\n") +SLANG_RAW("__intrinsic_op(") +SLANG_SPLICE(kIROp_GetRegisterIndex +) +SLANG_RAW(") uint __getRegisterIndex(SamplerState sampler);\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("\n") + +// +// Texture Footprint Queries +// +// This section introduces the types and methods related +// to the `GL_NV_shader_texture_footprint` GLSL extension, +// and the matching NVAPI operations. +// +// Footprint queries are allowed on both 2D and 3D textures, +// and are structurally similar for the two, so we will +// use a meta-loop to deduplicate the code for the two +// cases. +// + +// A footprint query yields a data structure +// that describes blocks of texels that +// conservatively cover the data that might +// be fetched in the query. +// +// A given sampling operation might access two +// mip levels of a texture when, e.g., trilinear +// filtering is on. A footprint query may ask for +// a footprint in either the coarse or fine level +// of the pair. +// +// We first define a `struct` type that closely maps +// to how a footprint is defined for each of the +// implementations we support, and then wrap that +// in a derived `struct` that includes the extra +// data that is returned by the GLSL API via the +// function reuslt. +// +SLANG_RAW("#line 19585 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("[__NoSideEffect]\n") +SLANG_RAW("[__requiresNVAPI]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, texturefootprint)]\n") +SLANG_RAW("vector __textureFootprintGetAnchor(__TextureFootprintData data, int nd)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"NvFootprintExtractAnchorTileLoc$!1D($0)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0.anchor\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpCompositeExtract $data 1;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("[__NoSideEffect]\n") +SLANG_RAW("[__requiresNVAPI]\n") +SLANG_RAW("[require(glsl_hlsl_spirv, texturefootprint)]\n") +SLANG_RAW("vector __textureFootprintGetOffset(__TextureFootprintData data, int nd)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"NvFootprintExtractOffset$!1D($0)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0.offset\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$vector = OpCompositeExtract $data 2;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_TextureFootprintType +) +SLANG_RAW(")\n") +SLANG_RAW("[require(glsl_hlsl_spirv, texturefootprint)]\n") +SLANG_RAW("struct __TextureFootprintData\n") +SLANG_RAW("{\n") +SLANG_RAW(" typealias Anchor = vector;\n") +SLANG_RAW(" typealias Offset = vector;\n") +SLANG_RAW(" typealias Mask = uint2;\n") +SLANG_RAW(" typealias LOD = uint;\n") +SLANG_RAW(" typealias Granularity = uint;\n") +SLANG_RAW("\n") +SLANG_RAW(" property anchor : Anchor\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" get { return __textureFootprintGetAnchor(this, ND); }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" property offset : Offset\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" get { return __textureFootprintGetOffset(this, ND); }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" property mask : Mask\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"NvFootprintExtractBitmask\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0.mask\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$Mask = OpCompositeExtract $this 3;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" property lod : LOD\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"NvFootprintExtractLOD\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0.lod\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$LOD = OpCompositeExtract $this 4;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" property granularity : Granularity\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"NvFootprintExtractReturnGran\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0.granularity\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$Granularity = OpCompositeExtract $this 5;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("///@category stage_io\n") +SLANG_RAW("struct TextureFootprint : __TextureFootprintData\n") +SLANG_RAW("{\n") +SLANG_RAW(" bool _isSingleLevel;\n") +SLANG_RAW("\n") +SLANG_RAW(" property isSingleLevel : bool\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" get\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return _isSingleLevel;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("///@category stage_io\n") +SLANG_RAW("typealias TextureFootprint2D = TextureFootprint<2>;\n") +SLANG_RAW("\n") +SLANG_RAW("///@category stage_io\n") +SLANG_RAW("typealias TextureFootprint3D = TextureFootprint<3>;\n") +SLANG_RAW("\n") + +// We define the new operations via an `extension` +// on the relevant texture type(s), rather than +// further clutter the original type declarations. +SLANG_RAW("#line 19736 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") + +// We introduce a few convenience type aliases here, +// which both keep our declarations simpler and easier +// to understand, but which might *also* be useful to +// users of the standard module, so that they can write things +// like `Texture2D.Footprint`, and also have auto-complete +// help them find such members. +// +// TODO: The `Coords` type really ought to be something +// defined on the base texture types, rather than via +// this `extension`. +SLANG_RAW("#line 19752 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW(" typealias Coords = vector;\n") +SLANG_RAW(" typealias Footprint = TextureFootprint;\n") +SLANG_RAW(" typealias __FootprintData = __TextureFootprintData;\n") +SLANG_RAW(" typealias FootprintGranularity = Footprint.Granularity;\n") +SLANG_RAW("\n") + +// For the GLSL extension, the choice between the +// coarse and fine level is modeled as a `bool` +// parameter to the query operation(s). We define +// the GLSL functions here as intrinsics, so that +// we can refer to them later in the definitions +// of our standard module operaitons; not just in glsl module. +// +// Note: despite the GLSL extension defining the `granularity` +// member of the query result as having type `uint`, the +// function signatures all take `int` parameters for the +// granularity instead. +// +SLANG_RAW("#line 19771 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_texture_footprint)\n") +SLANG_RAW(" [require(glsl_spirv, texturefootprint)]\n") +SLANG_RAW(" bool __queryFootprintGLSL(\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" int granularity,\n") +SLANG_RAW(" bool useCoarseLevel,\n") +SLANG_RAW(" out __FootprintData footprint)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureFootprintNV($p, $*2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageFootprintNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_image_footprint\";\n") +SLANG_RAW(" %resultVal:$$__FootprintData = OpImageSampleFootprintNV &this $coords $granularity $useCoarseLevel;\n") +SLANG_RAW(" OpStore &footprint %resultVal;\n") +SLANG_RAW(" result:$$bool = OpCompositeExtract %resultVal 0;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_texture_footprint)\n") +SLANG_RAW(" [require(glsl_spirv, texturefootprint)]\n") +SLANG_RAW(" bool __queryFootprintGLSL(\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" int granularity,\n") +SLANG_RAW(" bool useCoarseLevel,\n") +SLANG_RAW(" out __FootprintData footprint,\n") +SLANG_RAW(" float bias)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureFootprintNV($p, $*2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageFootprintNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_image_footprint\";\n") +SLANG_RAW(" %resultVal:$$__FootprintData = OpImageSampleFootprintNV &this $coords $granularity $useCoarseLevel Bias $bias;\n") +SLANG_RAW(" OpStore &footprint %resultVal;\n") +SLANG_RAW(" result:$$bool = OpCompositeExtract %resultVal 0;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_texture_footprint)\n") +SLANG_RAW(" __glsl_extension(GL_ARB_sparse_texture_clamp)\n") +SLANG_RAW(" [require(glsl_spirv, texturefootprintclamp)]\n") +SLANG_RAW(" bool __queryFootprintClampGLSL(\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" float lodClamp,\n") +SLANG_RAW(" int granularity,\n") +SLANG_RAW(" bool useCoarseLevel,\n") +SLANG_RAW(" out __FootprintData footprint)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureFootprintClampNV($p, $*2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageFootprintNV;\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_image_footprint\";\n") +SLANG_RAW(" %resultVal:$$__FootprintData = OpImageSampleFootprintNV &this $coords $granularity $useCoarseLevel MinLod $lodClamp;\n") +SLANG_RAW(" OpStore &footprint %resultVal;\n") +SLANG_RAW(" result:$$bool = OpCompositeExtract %resultVal 0;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_texture_footprint)\n") +SLANG_RAW(" __glsl_extension(GL_ARB_sparse_texture_clamp)\n") +SLANG_RAW(" [require(glsl_spirv, texturefootprintclamp)]\n") +SLANG_RAW(" bool __queryFootprintClampGLSL(\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" float lodClamp,\n") +SLANG_RAW(" int granularity,\n") +SLANG_RAW(" bool useCoarseLevel,\n") +SLANG_RAW(" out __FootprintData footprint,\n") +SLANG_RAW(" float bias)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureFootprintClampNV($p, $*2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageFootprintNV;\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_image_footprint\";\n") +SLANG_RAW(" %resultVal:$$__FootprintData = OpImageSampleFootprintNV &this $coords $granularity $useCoarseLevel Bias|MinLod $bias $lodClamp;\n") +SLANG_RAW(" OpStore &footprint %resultVal;\n") +SLANG_RAW(" result:$$bool = OpCompositeExtract %resultVal 0;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_texture_footprint)\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(glsl_spirv, texturefootprint)]\n") +SLANG_RAW(" bool __queryFootprintLodGLSL(\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" float lod,\n") +SLANG_RAW(" int granularity,\n") +SLANG_RAW(" bool useCoarseLevel,\n") +SLANG_RAW(" out __FootprintData footprint)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureFootprintLodNV($p, $*2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageFootprintNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_image_footprint\";\n") +SLANG_RAW(" %resultVal:$$__FootprintData = OpImageSampleFootprintNV &this $coords $granularity $useCoarseLevel Lod $lod;\n") +SLANG_RAW(" OpStore &footprint %resultVal;\n") +SLANG_RAW(" result:$$bool = OpCompositeExtract %resultVal 0;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") + + // Texture sampling with gradient is only available for 2D textures. +SLANG_RAW("#line 19916 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_texture_footprint)\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(glsl_spirv, texturefootprint)]\n") +SLANG_RAW(" bool __queryFootprintGradGLSL(\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" Coords dx,\n") +SLANG_RAW(" Coords dy,\n") +SLANG_RAW(" int granularity,\n") +SLANG_RAW(" bool useCoarseLevel,\n") +SLANG_RAW(" out __FootprintData footprint)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureFootprintGradNV($p, $*2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageFootprintNV;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_image_footprint\";\n") +SLANG_RAW(" %resultVal:$$__FootprintData = OpImageSampleFootprintNV &this $coords $granularity $useCoarseLevel Grad $dx $dy;\n") +SLANG_RAW(" OpStore &footprint %resultVal;\n") +SLANG_RAW(" result:$$bool = OpCompositeExtract %resultVal 0;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_NV_shader_texture_footprint)\n") +SLANG_RAW(" __glsl_extension(GL_ARB_sparse_texture_clamp)\n") +SLANG_RAW(" [require(glsl_spirv, texturefootprintclamp)]\n") +SLANG_RAW(" bool __queryFootprintGradClampGLSL(\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" Coords dx,\n") +SLANG_RAW(" Coords dy,\n") +SLANG_RAW(" float lodClamp,\n") +SLANG_RAW(" int granularity,\n") +SLANG_RAW(" bool useCoarseLevel,\n") +SLANG_RAW(" out __FootprintData footprint)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"textureFootprintGradClampNV($p, $*2)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ImageFootprintNV;\n") +SLANG_RAW(" OpCapability MinLod;\n") +SLANG_RAW(" OpExtension \"SPV_NV_shader_image_footprint\";\n") +SLANG_RAW(" %resultVal:$$__FootprintData = OpImageSampleFootprintNV &this $coords $granularity $useCoarseLevel Grad|MinLod $dx $dy $lodClamp;\n") +SLANG_RAW(" OpStore &footprint %resultVal;\n") +SLANG_RAW(" result:$$bool = OpCompositeExtract %resultVal 0;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") + + // End texture2D specific functions. +SLANG_RAW("#line 19978 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("\n") + +// The NVAPI texture query operations encode the choice +// between coarse and fine levels as part of the function +// name, and so we are forced to match this convention +// if we want to provide a more portable API. +// +// TODO: We could conceivably define the functions to use +// a parameter for the coarse/fine choice, which is required +// to be `constexpr` for the HLSL/NVAPI target. +// +static const struct LevelChoice +{ +char const* name; +char const* isCoarseVal; +} kLevelChoices[] = +{ + { "Coarse", "true" }, + { "Fine", "false" }, +}; +for(auto levelChoice : kLevelChoices) +{ + auto CoarseOrFine = levelChoice.name; + auto isCoarseVal = levelChoice.isCoarseVal; + +// We now go ahead and define the intrinsics provided by NVAPI, +// which have a very different signature from the GLSL ones. +// +// Note: the NVAPI functions also support an optional texel +// offset parameter. For now we are not including overloads +// with that parameter, since they have no equivalent in +// the GLSL extension. +// +SLANG_RAW("#line 20013 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, texturefootprint)]\n") +SLANG_RAW(" static __FootprintData __queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("NVAPI(\n") +SLANG_RAW(" int nd,\n") +SLANG_RAW(" uint textureSpace,\n") +SLANG_RAW(" uint textureIndex,\n") +SLANG_RAW(" uint samplerSpace,\n") +SLANG_RAW(" uint samplerIndex,\n") +SLANG_RAW(" float3 coords,\n") +SLANG_RAW(" FootprintGranularity granularity, \n") +SLANG_RAW(" out uint isSingleLod)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("($1, $2, $3, $4, NV_EXTN_TEXTURE_$!0D, $*5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, texturefootprint)]\n") +SLANG_RAW(" static __FootprintData __queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("BiasNVAPI(\n") +SLANG_RAW(" int nd,\n") +SLANG_RAW(" uint textureSpace,\n") +SLANG_RAW(" uint textureIndex,\n") +SLANG_RAW(" uint samplerSpace,\n") +SLANG_RAW(" uint samplerIndex,\n") +SLANG_RAW(" float3 coords,\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" float lodBias, \n") +SLANG_RAW(" out uint isSingleLod)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("Bias($1, $2, $3, $4, NV_EXTN_TEXTURE_$!0D, $*5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, texturefootprint)]\n") +SLANG_RAW(" static __FootprintData __queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("LevelNVAPI(\n") +SLANG_RAW(" int nd,\n") +SLANG_RAW(" uint textureSpace,\n") +SLANG_RAW(" uint textureIndex,\n") +SLANG_RAW(" uint samplerSpace,\n") +SLANG_RAW(" uint samplerIndex,\n") +SLANG_RAW(" float3 coords,\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" float lod, \n") +SLANG_RAW(" out uint isSingleLod)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("Level($1, $2, $3, $4, NV_EXTN_TEXTURE_$!0D, $*5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [require(hlsl, texturefootprint)]\n") +SLANG_RAW(" static __FootprintData __queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("GradNVAPI(\n") +SLANG_RAW(" int nd,\n") +SLANG_RAW(" uint textureSpace,\n") +SLANG_RAW(" uint textureIndex,\n") +SLANG_RAW(" uint samplerSpace,\n") +SLANG_RAW(" uint samplerIndex,\n") +SLANG_RAW(" float3 coords,\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" float3 dx,\n") +SLANG_RAW(" float3 dy, \n") +SLANG_RAW(" out uint isSingleLod)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case hlsl: __intrinsic_asm \"NvFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("Grad($1, $2, $3, $4, NV_EXTN_TEXTURE_$!0D, $*5)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + +// We now define the portable operations that will be officially +// supported by the standard module. For each operation, we +// need to provide both a version that maps to the GLSL extension, +// and a version that uses the NVAPI functions. +// +// Some function variations are only available with one extension +// or the other, so we try our best to only define them where +// each is available. +// +// Note that these functions cannot be marked as [ForceInline] for now +// because the texture resource may get removed after DCE, since the only +// use of those resources are done through __GetRegisterIndex/Space, which is +// replaced early with their binding slot in the compilation process. +// Not inlining these function is a quick way to make sure the texture always +// has live uses. +// +SLANG_RAW("#line 20112 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Query the footprint that would be accessed by a texture sampling operation.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This operation queries the footprint that would be accessed\n") +SLANG_RAW(" /// by a comparable call to:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// t.Sample(sampler, coords);\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" Footprint queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("(\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" Footprint footprint;\n") +SLANG_RAW(" footprint._isSingleLevel = __queryFootprintGLSL(sampler, coords, granularity, ") +SLANG_SPLICE(isCoarseVal +) +SLANG_RAW(", footprint);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW("\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" uint isSingleLod = 0;\n") +SLANG_RAW(" Footprint footprint = {__queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("NVAPI(\n") +SLANG_RAW(" Shape.dimensions,\n") +SLANG_RAW(" __getRegisterSpace(this), __getRegisterIndex(this),\n") +SLANG_RAW(" __getRegisterSpace(sampler), __getRegisterIndex(sampler),\n") +SLANG_RAW(" __vectorReshape<3>(coords), granularity, /* out */isSingleLod), false};\n") +SLANG_RAW(" footprint._isSingleLevel = (isSingleLod != 0);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Query the footprint that would be accessed by a texture sampling operation.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This operation queries the footprint that would be accessed\n") +SLANG_RAW(" /// by a comparable call to:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// t.SampleBias(sampler, coords, lodBias);\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" Footprint queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("Bias(\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" float lodBias)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" Footprint footprint;\n") +SLANG_RAW(" footprint._isSingleLevel = __queryFootprintGLSL(sampler, coords, granularity, ") +SLANG_SPLICE(isCoarseVal +) +SLANG_RAW(", footprint, lodBias);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" uint isSingleLod = 0;\n") +SLANG_RAW(" Footprint footprint = {__queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("BiasNVAPI(\n") +SLANG_RAW(" Shape.dimensions,\n") +SLANG_RAW(" __getRegisterSpace(this), __getRegisterIndex(this),\n") +SLANG_RAW(" __getRegisterSpace(sampler), __getRegisterIndex(sampler),\n") +SLANG_RAW(" __vectorReshape<3>(coords), granularity, lodBias, /* out */isSingleLod), false}; \n") +SLANG_RAW(" \n") +SLANG_RAW(" footprint._isSingleLevel = (isSingleLod != 0);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Query the footprint that would be accessed by a texture sampling operation.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This operation queries the footprint that would be accessed\n") +SLANG_RAW(" /// by a comparable call to:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// t.SampleClamp(sampler, coords, lodClamp);\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" Footprint queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("Clamp(\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" float lodClamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" Footprint footprint;\n") +SLANG_RAW(" footprint._isSingleLevel = __queryFootprintClampGLSL(sampler, coords, lodClamp, granularity, ") +SLANG_SPLICE(isCoarseVal +) +SLANG_RAW(", footprint);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Query the footprint that would be accessed by a texture sampling operation.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This operation queries the footprint that would be accessed\n") +SLANG_RAW(" /// by a comparable call to:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// t.SampleBiasClamp(sampler, coords, lodBias, lodClamp);\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" Footprint queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("BiasClamp(\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" float lodBias,\n") +SLANG_RAW(" float lodClamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" Footprint footprint;\n") +SLANG_RAW(" footprint._isSingleLevel = __queryFootprintClampGLSL(sampler, coords, lodClamp, granularity, ") +SLANG_SPLICE(isCoarseVal +) +SLANG_RAW(", footprint, lodBias);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Query the footprint that would be accessed by a texture sampling operation.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This operation queries the footprint that would be accessed\n") +SLANG_RAW(" /// by a comparable call to:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// t.SampleLevel(sampler, coords, lod);\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" Footprint queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("Level(\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" float lod)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" Footprint footprint;\n") +SLANG_RAW(" footprint._isSingleLevel = __queryFootprintLodGLSL(sampler, coords, lod, granularity, ") +SLANG_SPLICE(isCoarseVal +) +SLANG_RAW(", footprint);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" uint isSingleLod = 0;\n") +SLANG_RAW(" Footprint footprint = {__queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("LevelNVAPI(\n") +SLANG_RAW(" Shape.dimensions,\n") +SLANG_RAW(" __getRegisterSpace(this), __getRegisterIndex(this),\n") +SLANG_RAW(" __getRegisterSpace(sampler), __getRegisterIndex(sampler),\n") +SLANG_RAW(" __vectorReshape<3>(coords), granularity, lod, /* out */isSingleLod), false};\n") +SLANG_RAW(" \n") +SLANG_RAW(" footprint._isSingleLevel = (isSingleLod != 0);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + // TODO: Texture sampling with gradient is only available for 2D textures. +SLANG_RAW("#line 20266 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Query the footprint that would be accessed by a texture sampling operation.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This operation queries the footprint that would be accessed\n") +SLANG_RAW(" /// by a comparable call to:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// t.SampleGrad(sampler, coords, dx, dy);\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__NoSideEffect] [ForceInline]\n") +SLANG_RAW(" Footprint queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("Grad(\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" Coords dx,\n") +SLANG_RAW(" Coords dy)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" Footprint footprint;\n") +SLANG_RAW(" footprint._isSingleLevel = __queryFootprintGradGLSL(sampler, coords, dx, dy, granularity, ") +SLANG_SPLICE(isCoarseVal +) +SLANG_RAW(", footprint);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" uint isSingleLod = 0;\n") +SLANG_RAW(" Footprint footprint = {__queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("GradNVAPI(\n") +SLANG_RAW(" Shape.dimensions,\n") +SLANG_RAW(" __getRegisterSpace(this), __getRegisterIndex(this),\n") +SLANG_RAW(" __getRegisterSpace(sampler), __getRegisterIndex(sampler),\n") +SLANG_RAW(" __vectorReshape<3>(coords), granularity, __vectorReshape<3>(dx), __vectorReshape<3>(dy), /* out */isSingleLod), false};\n") +SLANG_RAW("\n") +SLANG_RAW(" footprint._isSingleLevel = (isSingleLod != 0);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" /// Query the footprint that would be accessed by a texture sampling operation.\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// This operation queries the footprint that would be accessed\n") +SLANG_RAW(" /// by a comparable call to:\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" /// t.SampleGradClamp(sampler, coords, dx, dy, lodClamp);\n") +SLANG_RAW(" ///\n") +SLANG_RAW(" [__NoSideEffect][ForceInline]\n") +SLANG_RAW(" Footprint queryFootprint") +SLANG_SPLICE(CoarseOrFine +) +SLANG_RAW("GradClamp(\n") +SLANG_RAW(" FootprintGranularity granularity,\n") +SLANG_RAW(" SamplerState sampler,\n") +SLANG_RAW(" Coords coords,\n") +SLANG_RAW(" Coords dx,\n") +SLANG_RAW(" Coords dy,\n") +SLANG_RAW(" float lodClamp)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" Footprint footprint;\n") +SLANG_RAW(" footprint._isSingleLevel = __queryFootprintGradClampGLSL(sampler, coords, dx, dy, lodClamp, granularity, ") +SLANG_SPLICE(isCoarseVal +) +SLANG_RAW(", footprint);\n") +SLANG_RAW(" return footprint;\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") + + // TODO: end texture2D specific functions. +SLANG_RAW("#line 20331 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") + +} +SLANG_RAW("#line 20335 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("} // extension\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture\n") +SLANG_RAW("{\n") +SLANG_RAW(" [__requiresNVAPI]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" __glsl_extension(GL_EXT_shader_atomic_float)\n") +SLANG_RAW(" [require(glsl_hlsl_spirv, atomic_glsl_hlsl_nvapi_cuda_metal_float1)]\n") +SLANG_RAW(" void InterlockedAddF32(vector coord, float value, out float originalValue)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" originalValue = __atomic_add(this[coord], value);\n") +SLANG_RAW(" return;\n") +SLANG_RAW(" case hlsl:\n") +SLANG_RAW(" __intrinsic_asm \"$3 = NvInterlockedAddFp32($0, $1, $2)\";\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$3 = imageAtomicAdd($0, $1, $2)\";\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" float InterlockedAddF32(vector coord, float value)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" float originalValue;\n") +SLANG_RAW(" InterlockedAddF32(coord, value, originalValue);\n") +SLANG_RAW(" return originalValue;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("// Buffer Pointer\n") +SLANG_RAW("\n") +SLANG_RAW("//@hidden:\n") +SLANG_RAW("\n") +SLANG_RAW("namespace vk\n") +SLANG_RAW("{\n") +SLANG_RAW(" // Partial implementation of the vk::buffer_ref proposal:\n") +SLANG_RAW(" // https://github.com/microsoft/hlsl-specs/blob/main/proposals/0010-vk-buffer-ref.md\n") +SLANG_RAW(" struct BufferPointer\n") +SLANG_RAW(" {\n") +SLANG_RAW(" T *_ptr;\n") +SLANG_RAW(" [ForceInline] __init(T *ptr) { _ptr = ptr; }\n") +SLANG_RAW(" [ForceInline] __init(uint64_t val) { _ptr = (T *)val; }\n") +SLANG_RAW(" [ForceInline] Ref Get() { return *_ptr; }\n") +SLANG_RAW(" [ForceInline] T *getPtr() { return _ptr;}\n") +SLANG_RAW(" }\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" BufferPointer static_pointer_cast(BufferPointer src)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return BufferPointer((U*)(src.getPtr()));\n") +SLANG_RAW(" }\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" BufferPointer reinterpret_pointer_cast(BufferPointer src)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" return BufferPointer((U *)(src.getPtr()));\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("attribute_syntax[vk_aliased_pointer] : VkAliasedPointerAttribute;\n") +SLANG_RAW("attribute_syntax[vk_restrict_pointer] : VkRestrictPointerAttribute;\n") +SLANG_RAW("\n") +SLANG_RAW("extension uint64_t\n") +SLANG_RAW("{\n") +SLANG_RAW(" __init(vk::BufferPointer ptr)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" this = (uint64_t)ptr._ptr;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_HLSLConstBufferPointerType +) +SLANG_RAW(")\n") +SLANG_RAW("__glsl_extension(GL_EXT_buffer_reference)\n") +SLANG_RAW("__magic_type(ConstBufferPointerType)\n") +SLANG_RAW("[require(glsl_spirv, bufferreference)]\n") +SLANG_RAW("struct ConstBufferPointer\n") +SLANG_RAW("{\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_buffer_reference)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" T get()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$0._data\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$T = OpLoad $this Aligned !Alignment;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW(" __subscript(int index) -> T\n") +SLANG_RAW(" {\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" get {return ConstBufferPointer.fromUInt(toUInt() + __naturalStrideOf() * index).get(); }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_shader_explicit_arithmetic_types_int64)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_buffer_reference)\n") +SLANG_RAW(" [require(glsl_spirv, bufferreference_int64)]\n") +SLANG_RAW(" static ConstBufferPointer fromUInt(uint64_t val)\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"$TR($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$ConstBufferPointer = OpConvertUToPtr $val;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_shader_explicit_arithmetic_types_int64)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_buffer_reference)\n") +SLANG_RAW(" [require(glsl_spirv, bufferreference_int64)]\n") +SLANG_RAW(" uint64_t toUInt()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"uint64_t($0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" result:$$uint64_t = OpConvertPtrToU $this;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("\n") +SLANG_RAW(" __glsl_version(450)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_shader_explicit_arithmetic_types_int64)\n") +SLANG_RAW(" __glsl_extension(GL_EXT_buffer_reference)\n") +SLANG_RAW(" [__NoSideEffect]\n") +SLANG_RAW(" [ForceInline]\n") +SLANG_RAW(" [require(glsl_spirv, bufferreference_int64)]\n") +SLANG_RAW(" bool isValid()\n") +SLANG_RAW(" {\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl:\n") +SLANG_RAW(" __intrinsic_asm \"(uint64_t($0) != 0)\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" uint64_t zero = 0ULL;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" %ptrval:$$uint64_t = OpConvertPtrToU $this;\n") +SLANG_RAW(" result:$$bool = OpINotEqual %ptrval $zero;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//\n") +SLANG_RAW("// HLSL-like dynamic resources\n") +SLANG_RAW("// https://microsoft.github.io/DirectX-Specs/d3d/HLSL_SM_6_6_DynamicResources.html\n") +SLANG_RAW("//\n") +SLANG_RAW("// For Khronos targets, `__DynamicResource` can be used to declare \"untyped\" global bindings as\n") +SLANG_RAW("// usual (e.g. unsized arrays for descriptor indexing), which will then be materialized into \n") +SLANG_RAW("// new aliased bindings for each distinct cast type.\n") +SLANG_RAW("//\n") +SLANG_RAW("\n") +SLANG_RAW("__magic_type(DynamicResourceType)\n") +SLANG_RAW("__intrinsic_type(") +SLANG_SPLICE(kIROp_DynamicResourceType +) +SLANG_RAW(")\n") +SLANG_RAW("struct __DynamicResource\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_CastDynamicResource +) +SLANG_RAW(")\n") +SLANG_RAW(" T as>();\n") +SLANG_RAW("}\n") +SLANG_RAW("interface __IDynamicResourceCastable\n") +SLANG_RAW("{\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("enum __DynamicResourceKind\n") +SLANG_RAW("{\n") +SLANG_RAW(" General = 0, // CBV_SRV_UAV\n") +SLANG_RAW(" Sampler = 1\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__generic\n") +SLANG_RAW("extension _Texture : __IDynamicResourceCastable<__DynamicResourceKind.General>\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_CastDynamicResource +) +SLANG_RAW(")\n") +SLANG_RAW(" __implicit_conversion(") +SLANG_SPLICE(kConversionCost_GenericParamUpcast +) +SLANG_RAW(")\n") +SLANG_RAW(" __init(__DynamicResource res);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +const char* kDynamicResourceCastableTypes[] = { + "StructuredBuffer", "RWStructuredBuffer", + "AppendStructuredBuffer", "ConsumeStructuredBuffer", "RasterizerOrderedStructuredBuffer", + "ByteAddressBuffer", "RWByteAddressBuffer", "RasterizerOrderedByteAddressBuffer", + + "SamplerState", "SamplerComparisonState", + + "ConstantBuffer", "TextureBuffer", +}; + +for (auto typeName : kDynamicResourceCastableTypes) { + auto kind = strstr(typeName, "Sampler") ? "Sampler" : "General"; + + if (strstr(typeName, "StructuredBuffer")) + sb << "__generic\n"; + else if (strstr(typeName, "ConstantBuffer")) + sb << "__generic\n"; + else if (strstr(typeName, "Buffer")) + sb << "__generic\n"; +SLANG_RAW("#line 20550 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("extension ") +SLANG_SPLICE(typeName +) +SLANG_RAW(" : __IDynamicResourceCastable<__DynamicResourceKind.") +SLANG_SPLICE(kind +) +SLANG_RAW(">\n") +SLANG_RAW("{\n") +SLANG_RAW(" __intrinsic_op(") +SLANG_SPLICE(kIROp_CastDynamicResource +) +SLANG_RAW(")\n") +SLANG_RAW(" __implicit_conversion(") +SLANG_SPLICE(kConversionCost_GenericParamUpcast +) +SLANG_RAW(")\n") +SLANG_RAW(" __init(__DynamicResource res);\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") + +} +SLANG_RAW("#line 20561 \"hlsl.meta.slang\"") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_version(450)\n") +SLANG_RAW("__glsl_extension(GL_ARB_shader_clock)\n") +SLANG_RAW("[require(glsl_spirv, GL_ARB_shader_clock)]\n") +SLANG_RAW("uint2 clock2x32ARB()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"clock2x32ARB\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const uint32_t scopeId_subgroup = 3;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ShaderClockKHR;\n") +SLANG_RAW(" OpExtension \"SPV_KHR_shader_clock\";\n") +SLANG_RAW(" result:$$uint2 = OpReadClockKHR $scopeId_subgroup;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("__glsl_version(450)\n") +SLANG_RAW("__glsl_extension(GL_ARB_shader_clock)\n") +SLANG_RAW("__glsl_extension(GL_ARB_gpu_shader_int64)\n") +SLANG_RAW("[require(glsl_spirv, GL_ARB_shader_clock64)]\n") +SLANG_RAW("uint64_t clockARB()\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case glsl: __intrinsic_asm \"clockARB\";\n") +SLANG_RAW(" case spirv:\n") +SLANG_RAW(" const uint32_t scopeId_subgroup = 3;\n") +SLANG_RAW(" return spirv_asm {\n") +SLANG_RAW(" OpCapability ShaderClockKHR;\n") +SLANG_RAW(" OpExtension \"SPV_KHR_shader_clock\";\n") +SLANG_RAW(" result:$$uint64_t = OpReadClockKHR $scopeId_subgroup;\n") +SLANG_RAW(" };\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("extension StructuredBuffer : IArray\n") +SLANG_RAW("{\n") +SLANG_RAW(" int getCount() { uint count; uint stride; this.GetDimensions(count, stride); return count; }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("extension RWStructuredBuffer : IRWArray\n") +SLANG_RAW("{\n") +SLANG_RAW(" int getCount() { uint count; uint stride; this.GetDimensions(count, stride); return count; }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("extension RasterizerOrderedStructuredBuffer : IRWArray\n") +SLANG_RAW("{\n") +SLANG_RAW(" int getCount() { uint count; uint stride; this.GetDimensions(count, stride); return count; }\n") +SLANG_RAW("}\n") +SLANG_RAW("\n") +SLANG_RAW("//@public:\n") +SLANG_RAW("\n") +SLANG_RAW("/// Mark a variable as being workgroup uniform.\n") +SLANG_RAW("/// @param v The variable to mark as workgroup uniform.\n") +SLANG_RAW("/// @return The value of `v`.\n") +SLANG_RAW("/// @remarks This intrinsic maps to `workgroupUniformLoad` when targeting WGSL and is a no-op on other targets.\n") +SLANG_RAW("/// WGSL is strict on uniformity, and this intrinsic is needed to mark a variable as workgroup uniform in order\n") +SLANG_RAW("// to silence uniformity errors in certain cases.\n") +SLANG_RAW("T workgroupUniformLoad(__ref T v)\n") +SLANG_RAW("{\n") +SLANG_RAW(" __target_switch\n") +SLANG_RAW(" {\n") +SLANG_RAW(" case wgsl:\n") +SLANG_RAW(" __intrinsic_asm \"workgroupUniformLoad(&($0))\";\n") +SLANG_RAW(" default:\n") +SLANG_RAW(" return v;\n") +SLANG_RAW(" }\n") +SLANG_RAW("}\n") diff --git a/tools/slang-cpp-parser/node.cpp b/tools/slang-cpp-parser/node.cpp index 16484ead39..af15508580 100644 --- a/tools/slang-cpp-parser/node.cpp +++ b/tools/slang-cpp-parser/node.cpp @@ -688,6 +688,26 @@ void ClassLikeNode::dump(int indentCount, StringBuilder& out) out << " {\n"; + if (m_guid != Guid()) + { + _indent(indentCount + 1, out); + StringUtil::appendFormat( + out, + "COM_INTERFACE(0x%08lx, 0x%04hx, 0x%04hx, {0x%02hhx, 0x%02hhx, 0x%02hhx, 0x%02hhx, " + "0x%02hhx, 0x%02hhx, 0x%02hhx, 0x%02hhx})\n", + m_guid.data1, + m_guid.data2, + m_guid.data3, + m_guid.data4[0], + m_guid.data4[1], + m_guid.data4[2], + m_guid.data4[3], + m_guid.data4[4], + m_guid.data4[5], + m_guid.data4[6], + m_guid.data4[7]); + } + for (Node* child : m_children) { child->dump(indentCount + 1, out); diff --git a/tools/slang-cpp-parser/node.h b/tools/slang-cpp-parser/node.h index 4dad9473e0..04cee8d715 100644 --- a/tools/slang-cpp-parser/node.h +++ b/tools/slang-cpp-parser/node.h @@ -316,7 +316,7 @@ struct ClassLikeNode : public ScopeNode virtual void dump(int indent, StringBuilder& out) SLANG_OVERRIDE; ClassLikeNode(Kind kind) - : Super(kind), m_origin(nullptr), m_typeSet(nullptr), m_superNode(nullptr) + : Super(kind), m_origin(nullptr), m_typeSet(nullptr), m_superNode(nullptr), m_guid(Guid()) { SLANG_ASSERT(kind == Kind::ClassType || kind == Kind::StructType); } @@ -333,6 +333,8 @@ struct ClassLikeNode : public ScopeNode Token m_super; ///< Super class name ClassLikeNode* m_superNode; ///< If this is a class/struct, the type it is derived from (or ///< nullptr if base) + + Guid m_guid; ///< The guid associated with this type }; struct CallableNode : public Node diff --git a/tools/slang-cpp-parser/parser.cpp b/tools/slang-cpp-parser/parser.cpp index 9fcf6a6b98..32f68216b3 100644 --- a/tools/slang-cpp-parser/parser.cpp +++ b/tools/slang-cpp-parser/parser.cpp @@ -1109,32 +1109,58 @@ SlangResult Parser::_parseSpecialMacro() Token name; SLANG_RETURN_ON_FAIL(expect(TokenType::Identifier, &name)); - List params; + const UnownedStringSlice suffix = name.getContent().tail(m_options->m_markPrefix.getLength()); - if (m_reader.peekTokenType() == TokenType::LParent) + if (suffix == "COM_INTERFACE") { - // Mark the start - auto startCursor = m_reader.getCursor(); + return _parseGuid(); + } - // Consume the params + if (m_reader.peekTokenType() == TokenType::LParent) + { SLANG_RETURN_ON_FAIL(_consumeBalancedParens()); + } - auto endCursor = m_reader.getCursor(); - m_reader.setCursor(startCursor); + return SLANG_OK; +} - while (!m_reader.isAtCursor(endCursor)) +SlangResult Parser::_parseGuid() +{ + Guid guid{}; + Token guidToken; + Int value; + + SLANG_RETURN_ON_FAIL(expect(TokenType::LParent)); + + SLANG_RETURN_ON_FAIL(expect(TokenType::IntegerLiteral, &guidToken)); + StringUtil::parseInt(guidToken.getContent(), value); + guid.data1 = value; + SLANG_RETURN_ON_FAIL(expect(TokenType::Comma)); + SLANG_RETURN_ON_FAIL(expect(TokenType::IntegerLiteral, &guidToken)); + StringUtil::parseInt(guidToken.getContent(), value); + guid.data2 = value; + SLANG_RETURN_ON_FAIL(expect(TokenType::Comma)); + SLANG_RETURN_ON_FAIL(expect(TokenType::IntegerLiteral, &guidToken)); + StringUtil::parseInt(guidToken.getContent(), value); + guid.data3 = value; + SLANG_RETURN_ON_FAIL(expect(TokenType::Comma)); + SLANG_RETURN_ON_FAIL(expect(TokenType::LBrace)); + for (Index i = 0; i < 8; ++i) + { + SLANG_RETURN_ON_FAIL(expect(TokenType::IntegerLiteral, &guidToken)); + StringUtil::parseInt(guidToken.getContent(), value); + guid.data4[i] = value; + if (i < 7) { - params.add(m_reader.advanceToken()); + SLANG_RETURN_ON_FAIL(expect(TokenType::Comma)); } } + SLANG_RETURN_ON_FAIL(expect(TokenType::RBrace)); + SLANG_RETURN_ON_FAIL(expect(TokenType::RParent)); - // Can do special handling here - const UnownedStringSlice suffix = name.getContent().tail(m_options->m_markPrefix.getLength()); + ClassLikeNode* node = as(m_currentScope); - if (suffix == "COM_INTERFACE") - { - // TODO(JS): It's a com interface. Extact the GUID - } + node->m_guid = guid; return SLANG_OK; } diff --git a/tools/slang-cpp-parser/parser.h b/tools/slang-cpp-parser/parser.h index c3ccf9656a..675fdccde1 100644 --- a/tools/slang-cpp-parser/parser.h +++ b/tools/slang-cpp-parser/parser.h @@ -44,6 +44,7 @@ class Parser SlangResult _parseTypeDef(); SlangResult _parseEnum(); + SlangResult _parseGuid(); SlangResult _parseMarker(); SlangResult _parseSpecialMacro(); diff --git a/tools/slang-cpp-parser/unit-test.cpp b/tools/slang-cpp-parser/unit-test.cpp index 7320e708e8..da8542be45 100644 --- a/tools/slang-cpp-parser/unit-test.cpp +++ b/tools/slang-cpp-parser/unit-test.cpp @@ -42,6 +42,8 @@ struct TestState static const char someSource[] = "class ISomeInterface\n" "{\n" + " SLANG_COM_INTERFACE(0x514027d8, 0x23d1, 0x4093, " + "{0x94,0x85,0xb9,0x2c,0x06,0x95,0x7f,0x5e})\n" " public:\n" " virtual int SLANG_MCALL someMethod(int a, int b) const = 0;\n" " virtual float SLANG_MCALL anotherMethod(float a) = 0;\n"