1/*
2 * Copyright 2015-2021 Arm Limited
3 * SPDX-License-Identifier: Apache-2.0 OR MIT
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18/*
19 * At your option, you may choose to accept this material under either:
20 * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
21 * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
22 */
23
24#ifndef SPIRV_CROSS_COMMON_HPP
25#define SPIRV_CROSS_COMMON_HPP
26
27#ifndef SPV_ENABLE_UTILITY_CODE
28#define SPV_ENABLE_UTILITY_CODE
29#endif
30#include "spirv.hpp"
31
32#include "spirv_cross_containers.hpp"
33#include "spirv_cross_error_handling.hpp"
34#include <functional>
35
36// A bit crude, but allows projects which embed SPIRV-Cross statically to
37// effectively hide all the symbols from other projects.
38// There is a case where we have:
39// - Project A links against SPIRV-Cross statically.
40// - Project A links against Project B statically.
41// - Project B links against SPIRV-Cross statically (might be a different version).
42// This leads to a conflict with extremely bizarre results.
43// By overriding the namespace in one of the project builds, we can work around this.
44// If SPIRV-Cross is embedded in dynamic libraries,
45// prefer using -fvisibility=hidden on GCC/Clang instead.
46#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE
47#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE
48#else
49#define SPIRV_CROSS_NAMESPACE spirv_cross
50#endif
51
52namespace SPIRV_CROSS_NAMESPACE
53{
54namespace inner
55{
56template <typename T>
57void join_helper(StringStream<> &stream, T &&t)
58{
59 stream << std::forward<T>(t);
60}
61
62template <typename T, typename... Ts>
63void join_helper(StringStream<> &stream, T &&t, Ts &&... ts)
64{
65 stream << std::forward<T>(t);
66 join_helper(stream, std::forward<Ts>(ts)...);
67}
68} // namespace inner
69
70class Bitset
71{
72public:
73 Bitset() = default;
74 explicit inline Bitset(uint64_t lower_)
75 : lower(lower_)
76 {
77 }
78
79 inline bool get(uint32_t bit) const
80 {
81 if (bit < 64)
82 return (lower & (1ull << bit)) != 0;
83 else
84 return higher.count(x: bit) != 0;
85 }
86
87 inline void set(uint32_t bit)
88 {
89 if (bit < 64)
90 lower |= 1ull << bit;
91 else
92 higher.insert(x: bit);
93 }
94
95 inline void clear(uint32_t bit)
96 {
97 if (bit < 64)
98 lower &= ~(1ull << bit);
99 else
100 higher.erase(x: bit);
101 }
102
103 inline uint64_t get_lower() const
104 {
105 return lower;
106 }
107
108 inline void reset()
109 {
110 lower = 0;
111 higher.clear();
112 }
113
114 inline void merge_and(const Bitset &other)
115 {
116 lower &= other.lower;
117 std::unordered_set<uint32_t> tmp_set;
118 for (auto &v : higher)
119 if (other.higher.count(x: v) != 0)
120 tmp_set.insert(x: v);
121 higher = std::move(tmp_set);
122 }
123
124 inline void merge_or(const Bitset &other)
125 {
126 lower |= other.lower;
127 for (auto &v : other.higher)
128 higher.insert(x: v);
129 }
130
131 inline bool operator==(const Bitset &other) const
132 {
133 if (lower != other.lower)
134 return false;
135
136 if (higher.size() != other.higher.size())
137 return false;
138
139 for (auto &v : higher)
140 if (other.higher.count(x: v) == 0)
141 return false;
142
143 return true;
144 }
145
146 inline bool operator!=(const Bitset &other) const
147 {
148 return !(*this == other);
149 }
150
151 template <typename Op>
152 void for_each_bit(const Op &op) const
153 {
154 // TODO: Add ctz-based iteration.
155 for (uint32_t i = 0; i < 64; i++)
156 {
157 if (lower & (1ull << i))
158 op(i);
159 }
160
161 if (higher.empty())
162 return;
163
164 // Need to enforce an order here for reproducible results,
165 // but hitting this path should happen extremely rarely, so having this slow path is fine.
166 SmallVector<uint32_t> bits;
167 bits.reserve(count: higher.size());
168 for (auto &v : higher)
169 bits.push_back(t: v);
170 std::sort(first: std::begin(cont&: bits), last: std::end(cont&: bits));
171
172 for (auto &v : bits)
173 op(v);
174 }
175
176 inline bool empty() const
177 {
178 return lower == 0 && higher.empty();
179 }
180
181private:
182 // The most common bits to set are all lower than 64,
183 // so optimize for this case. Bits spilling outside 64 go into a slower data structure.
184 // In almost all cases, higher data structure will not be used.
185 uint64_t lower = 0;
186 std::unordered_set<uint32_t> higher;
187};
188
189// Helper template to avoid lots of nasty string temporary munging.
190template <typename... Ts>
191std::string join(Ts &&... ts)
192{
193 StringStream<> stream;
194 inner::join_helper(stream, std::forward<Ts>(ts)...);
195 return stream.str();
196}
197
198inline std::string merge(const SmallVector<std::string> &list, const char *between = ", ")
199{
200 StringStream<> stream;
201 for (auto &elem : list)
202 {
203 stream << elem;
204 if (&elem != &list.back())
205 stream << between;
206 }
207 return stream.str();
208}
209
210// Make sure we don't accidentally call this with float or doubles with SFINAE.
211// Have to use the radix-aware overload.
212template <typename T, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
213inline std::string convert_to_string(const T &t)
214{
215 return std::to_string(t);
216}
217
218static inline std::string convert_to_string(int32_t value)
219{
220 // INT_MIN is ... special on some backends. If we use a decimal literal, and negate it, we
221 // could accidentally promote the literal to long first, then negate.
222 // To workaround it, emit int(0x80000000) instead.
223 if (value == (std::numeric_limits<int32_t>::min)())
224 return "int(0x80000000)";
225 else
226 return std::to_string(val: value);
227}
228
229static inline std::string convert_to_string(int64_t value, const std::string &int64_type, bool long_long_literal_suffix)
230{
231 // INT64_MIN is ... special on some backends.
232 // If we use a decimal literal, and negate it, we might overflow the representable numbers.
233 // To workaround it, emit int(0x80000000) instead.
234 if (value == (std::numeric_limits<int64_t>::min)())
235 return join(ts: int64_type, ts: "(0x8000000000000000u", ts: (long_long_literal_suffix ? "ll" : "l"), ts: ")");
236 else
237 return std::to_string(val: value) + (long_long_literal_suffix ? "ll" : "l");
238}
239
240// Allow implementations to set a convenient standard precision
241#ifndef SPIRV_CROSS_FLT_FMT
242#define SPIRV_CROSS_FLT_FMT "%.32g"
243#endif
244
245// Disable sprintf and strcat warnings.
246// We cannot rely on snprintf and family existing because, ..., MSVC.
247#if defined(__clang__) || defined(__GNUC__)
248#pragma GCC diagnostic push
249#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
250#elif defined(_MSC_VER)
251#pragma warning(push)
252#pragma warning(disable : 4996)
253#endif
254
255static inline void fixup_radix_point(char *str, char radix_point)
256{
257 // Setting locales is a very risky business in multi-threaded program,
258 // so just fixup locales instead. We only need to care about the radix point.
259 if (radix_point != '.')
260 {
261 while (*str != '\0')
262 {
263 if (*str == radix_point)
264 *str = '.';
265 str++;
266 }
267 }
268}
269
270inline std::string convert_to_string(float t, char locale_radix_point)
271{
272 // std::to_string for floating point values is broken.
273 // Fallback to something more sane.
274 char buf[64];
275 sprintf(s: buf, SPIRV_CROSS_FLT_FMT, t);
276 fixup_radix_point(str: buf, radix_point: locale_radix_point);
277
278 // Ensure that the literal is float.
279 if (!strchr(s: buf, c: '.') && !strchr(s: buf, c: 'e'))
280 strcat(dest: buf, src: ".0");
281 return buf;
282}
283
284inline std::string convert_to_string(double t, char locale_radix_point)
285{
286 // std::to_string for floating point values is broken.
287 // Fallback to something more sane.
288 char buf[64];
289 sprintf(s: buf, SPIRV_CROSS_FLT_FMT, t);
290 fixup_radix_point(str: buf, radix_point: locale_radix_point);
291
292 // Ensure that the literal is float.
293 if (!strchr(s: buf, c: '.') && !strchr(s: buf, c: 'e'))
294 strcat(dest: buf, src: ".0");
295 return buf;
296}
297
298#if defined(__clang__) || defined(__GNUC__)
299#pragma GCC diagnostic pop
300#elif defined(_MSC_VER)
301#pragma warning(pop)
302#endif
303
304class FloatFormatter
305{
306public:
307 virtual ~FloatFormatter() = default;
308 virtual std::string format_float(float value) = 0;
309 virtual std::string format_double(double value) = 0;
310};
311
312template <typename T>
313struct ValueSaver
314{
315 explicit ValueSaver(T &current_)
316 : current(current_)
317 , saved(current_)
318 {
319 }
320
321 void release()
322 {
323 current = saved;
324 }
325
326 ~ValueSaver()
327 {
328 release();
329 }
330
331 T &current;
332 T saved;
333};
334
335struct Instruction
336{
337 uint16_t op = 0;
338 uint16_t count = 0;
339 // If offset is 0 (not a valid offset into the instruction stream),
340 // we have an instruction stream which is embedded in the object.
341 uint32_t offset = 0;
342 uint32_t length = 0;
343
344 inline bool is_embedded() const
345 {
346 return offset == 0;
347 }
348};
349
350struct EmbeddedInstruction : Instruction
351{
352 SmallVector<uint32_t> ops;
353};
354
355enum Types
356{
357 TypeNone,
358 TypeType,
359 TypeVariable,
360 TypeConstant,
361 TypeFunction,
362 TypeFunctionPrototype,
363 TypeBlock,
364 TypeExtension,
365 TypeExpression,
366 TypeConstantOp,
367 TypeCombinedImageSampler,
368 TypeAccessChain,
369 TypeUndef,
370 TypeString,
371 TypeCount
372};
373
374template <Types type>
375class TypedID;
376
377template <>
378class TypedID<TypeNone>
379{
380public:
381 TypedID() = default;
382 TypedID(uint32_t id_)
383 : id(id_)
384 {
385 }
386
387 template <Types U>
388 TypedID(const TypedID<U> &other)
389 {
390 *this = other;
391 }
392
393 template <Types U>
394 TypedID &operator=(const TypedID<U> &other)
395 {
396 id = uint32_t(other);
397 return *this;
398 }
399
400 // Implicit conversion to u32 is desired here.
401 // As long as we block implicit conversion between TypedID<A> and TypedID<B> we're good.
402 operator uint32_t() const
403 {
404 return id;
405 }
406
407 template <Types U>
408 operator TypedID<U>() const
409 {
410 return TypedID<U>(*this);
411 }
412
413private:
414 uint32_t id = 0;
415};
416
417template <Types type>
418class TypedID
419{
420public:
421 TypedID() = default;
422 TypedID(uint32_t id_)
423 : id(id_)
424 {
425 }
426
427 explicit TypedID(const TypedID<TypeNone> &other)
428 : id(uint32_t(other))
429 {
430 }
431
432 operator uint32_t() const
433 {
434 return id;
435 }
436
437private:
438 uint32_t id = 0;
439};
440
441using VariableID = TypedID<TypeVariable>;
442using TypeID = TypedID<TypeType>;
443using ConstantID = TypedID<TypeConstant>;
444using FunctionID = TypedID<TypeFunction>;
445using BlockID = TypedID<TypeBlock>;
446using ID = TypedID<TypeNone>;
447
448// Helper for Variant interface.
449struct IVariant
450{
451 virtual ~IVariant() = default;
452 virtual IVariant *clone(ObjectPoolBase *pool) = 0;
453 ID self = 0;
454
455protected:
456 IVariant() = default;
457 IVariant(const IVariant&) = default;
458 IVariant &operator=(const IVariant&) = default;
459};
460
461#define SPIRV_CROSS_DECLARE_CLONE(T) \
462 IVariant *clone(ObjectPoolBase *pool) override \
463 { \
464 return static_cast<ObjectPool<T> *>(pool)->allocate(*this); \
465 }
466
467struct SPIRUndef : IVariant
468{
469 enum
470 {
471 type = TypeUndef
472 };
473
474 explicit SPIRUndef(TypeID basetype_)
475 : basetype(basetype_)
476 {
477 }
478 TypeID basetype;
479
480 SPIRV_CROSS_DECLARE_CLONE(SPIRUndef)
481};
482
483struct SPIRString : IVariant
484{
485 enum
486 {
487 type = TypeString
488 };
489
490 explicit SPIRString(std::string str_)
491 : str(std::move(str_))
492 {
493 }
494
495 std::string str;
496
497 SPIRV_CROSS_DECLARE_CLONE(SPIRString)
498};
499
500// This type is only used by backends which need to access the combined image and sampler IDs separately after
501// the OpSampledImage opcode.
502struct SPIRCombinedImageSampler : IVariant
503{
504 enum
505 {
506 type = TypeCombinedImageSampler
507 };
508 SPIRCombinedImageSampler(TypeID type_, VariableID image_, VariableID sampler_)
509 : combined_type(type_)
510 , image(image_)
511 , sampler(sampler_)
512 {
513 }
514 TypeID combined_type;
515 VariableID image;
516 VariableID sampler;
517
518 SPIRV_CROSS_DECLARE_CLONE(SPIRCombinedImageSampler)
519};
520
521struct SPIRConstantOp : IVariant
522{
523 enum
524 {
525 type = TypeConstantOp
526 };
527
528 SPIRConstantOp(TypeID result_type, spv::Op op, const uint32_t *args, uint32_t length)
529 : opcode(op)
530 , basetype(result_type)
531 {
532 arguments.reserve(count: length);
533 for (uint32_t i = 0; i < length; i++)
534 arguments.push_back(t: args[i]);
535 }
536
537 spv::Op opcode;
538 SmallVector<uint32_t> arguments;
539 TypeID basetype;
540
541 SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp)
542};
543
544struct SPIRType : IVariant
545{
546 enum
547 {
548 type = TypeType
549 };
550
551 spv::Op op = spv::Op::OpNop;
552 explicit SPIRType(spv::Op op_) : op(op_) {}
553
554 enum BaseType
555 {
556 Unknown,
557 Void,
558 Boolean,
559 SByte,
560 UByte,
561 Short,
562 UShort,
563 Int,
564 UInt,
565 Int64,
566 UInt64,
567 AtomicCounter,
568 Half,
569 Float,
570 Double,
571 Struct,
572 Image,
573 SampledImage,
574 Sampler,
575 AccelerationStructure,
576 RayQuery,
577
578 // Keep internal types at the end.
579 ControlPointArray,
580 Interpolant,
581 Char,
582 // MSL specific type, that is used by 'object'(analog of 'task' from glsl) shader.
583 MeshGridProperties
584 };
585
586 // Scalar/vector/matrix support.
587 BaseType basetype = Unknown;
588 uint32_t width = 0;
589 uint32_t vecsize = 1;
590 uint32_t columns = 1;
591
592 // Arrays, support array of arrays by having a vector of array sizes.
593 SmallVector<uint32_t> array;
594
595 // Array elements can be either specialization constants or specialization ops.
596 // This array determines how to interpret the array size.
597 // If an element is true, the element is a literal,
598 // otherwise, it's an expression, which must be resolved on demand.
599 // The actual size is not really known until runtime.
600 SmallVector<bool> array_size_literal;
601
602 // Pointers
603 // Keep track of how many pointer layers we have.
604 uint32_t pointer_depth = 0;
605 bool pointer = false;
606 bool forward_pointer = false;
607
608 spv::StorageClass storage = spv::StorageClassGeneric;
609
610 SmallVector<TypeID> member_types;
611
612 // If member order has been rewritten to handle certain scenarios with Offset,
613 // allow codegen to rewrite the index.
614 SmallVector<uint32_t> member_type_index_redirection;
615
616 struct ImageType
617 {
618 TypeID type;
619 spv::Dim dim;
620 bool depth;
621 bool arrayed;
622 bool ms;
623 uint32_t sampled;
624 spv::ImageFormat format;
625 spv::AccessQualifier access;
626 } image = {};
627
628 // Structs can be declared multiple times if they are used as part of interface blocks.
629 // We want to detect this so that we only emit the struct definition once.
630 // Since we cannot rely on OpName to be equal, we need to figure out aliases.
631 TypeID type_alias = 0;
632
633 // Denotes the type which this type is based on.
634 // Allows the backend to traverse how a complex type is built up during access chains.
635 TypeID parent_type = 0;
636
637 // Used in backends to avoid emitting members with conflicting names.
638 std::unordered_set<std::string> member_name_cache;
639
640 SPIRV_CROSS_DECLARE_CLONE(SPIRType)
641};
642
643struct SPIRExtension : IVariant
644{
645 enum
646 {
647 type = TypeExtension
648 };
649
650 enum Extension
651 {
652 Unsupported,
653 GLSL,
654 SPV_debug_info,
655 SPV_AMD_shader_ballot,
656 SPV_AMD_shader_explicit_vertex_parameter,
657 SPV_AMD_shader_trinary_minmax,
658 SPV_AMD_gcn_shader,
659 NonSemanticDebugPrintf,
660 NonSemanticShaderDebugInfo,
661 NonSemanticGeneric
662 };
663
664 explicit SPIRExtension(Extension ext_)
665 : ext(ext_)
666 {
667 }
668
669 Extension ext;
670 SPIRV_CROSS_DECLARE_CLONE(SPIRExtension)
671};
672
673// SPIREntryPoint is not a variant since its IDs are used to decorate OpFunction,
674// so in order to avoid conflicts, we can't stick them in the ids array.
675struct SPIREntryPoint
676{
677 SPIREntryPoint(FunctionID self_, spv::ExecutionModel execution_model, const std::string &entry_name)
678 : self(self_)
679 , name(entry_name)
680 , orig_name(entry_name)
681 , model(execution_model)
682 {
683 }
684 SPIREntryPoint() = default;
685
686 FunctionID self = 0;
687 std::string name;
688 std::string orig_name;
689 SmallVector<VariableID> interface_variables;
690
691 Bitset flags;
692 struct WorkgroupSize
693 {
694 uint32_t x = 0, y = 0, z = 0;
695 uint32_t id_x = 0, id_y = 0, id_z = 0;
696 uint32_t constant = 0; // Workgroup size can be expressed as a constant/spec-constant instead.
697 } workgroup_size;
698 uint32_t invocations = 0;
699 uint32_t output_vertices = 0;
700 uint32_t output_primitives = 0;
701 spv::ExecutionModel model = spv::ExecutionModelMax;
702 bool geometry_passthrough = false;
703};
704
705struct SPIRExpression : IVariant
706{
707 enum
708 {
709 type = TypeExpression
710 };
711
712 // Only created by the backend target to avoid creating tons of temporaries.
713 SPIRExpression(std::string expr, TypeID expression_type_, bool immutable_)
714 : expression(std::move(expr))
715 , expression_type(expression_type_)
716 , immutable(immutable_)
717 {
718 }
719
720 // If non-zero, prepend expression with to_expression(base_expression).
721 // Used in amortizing multiple calls to to_expression()
722 // where in certain cases that would quickly force a temporary when not needed.
723 ID base_expression = 0;
724
725 std::string expression;
726 TypeID expression_type = 0;
727
728 // If this expression is a forwarded load,
729 // allow us to reference the original variable.
730 ID loaded_from = 0;
731
732 // If this expression will never change, we can avoid lots of temporaries
733 // in high level source.
734 // An expression being immutable can be speculative,
735 // it is assumed that this is true almost always.
736 bool immutable = false;
737
738 // Before use, this expression must be transposed.
739 // This is needed for targets which don't support row_major layouts.
740 bool need_transpose = false;
741
742 // Whether or not this is an access chain expression.
743 bool access_chain = false;
744
745 // Whether or not gl_MeshVerticesEXT[].gl_Position (as a whole or .y) is referenced
746 bool access_meshlet_position_y = false;
747
748 // A list of expressions which this expression depends on.
749 SmallVector<ID> expression_dependencies;
750
751 // Similar as expression dependencies, but does not stop the tracking for force-temporary variables.
752 // We need to know the full chain from store back to any SSA variable.
753 SmallVector<ID> invariance_dependencies;
754
755 // By reading this expression, we implicitly read these expressions as well.
756 // Used by access chain Store and Load since we read multiple expressions in this case.
757 SmallVector<ID> implied_read_expressions;
758
759 // The expression was emitted at a certain scope. Lets us track when an expression read means multiple reads.
760 uint32_t emitted_loop_level = 0;
761
762 SPIRV_CROSS_DECLARE_CLONE(SPIRExpression)
763};
764
765struct SPIRFunctionPrototype : IVariant
766{
767 enum
768 {
769 type = TypeFunctionPrototype
770 };
771
772 explicit SPIRFunctionPrototype(TypeID return_type_)
773 : return_type(return_type_)
774 {
775 }
776
777 TypeID return_type;
778 SmallVector<uint32_t> parameter_types;
779
780 SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype)
781};
782
783struct SPIRBlock : IVariant
784{
785 enum
786 {
787 type = TypeBlock
788 };
789
790 enum Terminator
791 {
792 Unknown,
793 Direct, // Emit next block directly without a particular condition.
794
795 Select, // Block ends with an if/else block.
796 MultiSelect, // Block ends with switch statement.
797
798 Return, // Block ends with return.
799 Unreachable, // Noop
800 Kill, // Discard
801 IgnoreIntersection, // Ray Tracing
802 TerminateRay, // Ray Tracing
803 EmitMeshTasks // Mesh shaders
804 };
805
806 enum Merge
807 {
808 MergeNone,
809 MergeLoop,
810 MergeSelection
811 };
812
813 enum Hints
814 {
815 HintNone,
816 HintUnroll,
817 HintDontUnroll,
818 HintFlatten,
819 HintDontFlatten
820 };
821
822 enum Method
823 {
824 MergeToSelectForLoop,
825 MergeToDirectForLoop,
826 MergeToSelectContinueForLoop
827 };
828
829 enum ContinueBlockType
830 {
831 ContinueNone,
832
833 // Continue block is branchless and has at least one instruction.
834 ForLoop,
835
836 // Noop continue block.
837 WhileLoop,
838
839 // Continue block is conditional.
840 DoWhileLoop,
841
842 // Highly unlikely that anything will use this,
843 // since it is really awkward/impossible to express in GLSL.
844 ComplexLoop
845 };
846
847 enum : uint32_t
848 {
849 NoDominator = 0xffffffffu
850 };
851
852 Terminator terminator = Unknown;
853 Merge merge = MergeNone;
854 Hints hint = HintNone;
855 BlockID next_block = 0;
856 BlockID merge_block = 0;
857 BlockID continue_block = 0;
858
859 ID return_value = 0; // If 0, return nothing (void).
860 ID condition = 0;
861 BlockID true_block = 0;
862 BlockID false_block = 0;
863 BlockID default_block = 0;
864
865 // If terminator is EmitMeshTasksEXT.
866 struct
867 {
868 ID groups[3];
869 ID payload;
870 } mesh = {};
871
872 SmallVector<Instruction> ops;
873
874 struct Phi
875 {
876 ID local_variable; // flush local variable ...
877 BlockID parent; // If we're in from_block and want to branch into this block ...
878 VariableID function_variable; // to this function-global "phi" variable first.
879 };
880
881 // Before entering this block flush out local variables to magical "phi" variables.
882 SmallVector<Phi> phi_variables;
883
884 // Declare these temporaries before beginning the block.
885 // Used for handling complex continue blocks which have side effects.
886 SmallVector<std::pair<TypeID, ID>> declare_temporary;
887
888 // Declare these temporaries, but only conditionally if this block turns out to be
889 // a complex loop header.
890 SmallVector<std::pair<TypeID, ID>> potential_declare_temporary;
891
892 struct Case
893 {
894 uint64_t value;
895 BlockID block;
896 };
897 SmallVector<Case> cases_32bit;
898 SmallVector<Case> cases_64bit;
899
900 // If we have tried to optimize code for this block but failed,
901 // keep track of this.
902 bool disable_block_optimization = false;
903
904 // If the continue block is complex, fallback to "dumb" for loops.
905 bool complex_continue = false;
906
907 // Do we need a ladder variable to defer breaking out of a loop construct after a switch block?
908 bool need_ladder_break = false;
909
910 // If marked, we have explicitly handled Phi from this block, so skip any flushes related to that on a branch.
911 // Used to handle an edge case with switch and case-label fallthrough where fall-through writes to Phi.
912 BlockID ignore_phi_from_block = 0;
913
914 // The dominating block which this block might be within.
915 // Used in continue; blocks to determine if we really need to write continue.
916 BlockID loop_dominator = 0;
917
918 // All access to these variables are dominated by this block,
919 // so before branching anywhere we need to make sure that we declare these variables.
920 SmallVector<VariableID> dominated_variables;
921
922 // These are variables which should be declared in a for loop header, if we
923 // fail to use a classic for-loop,
924 // we remove these variables, and fall back to regular variables outside the loop.
925 SmallVector<VariableID> loop_variables;
926
927 // Some expressions are control-flow dependent, i.e. any instruction which relies on derivatives or
928 // sub-group-like operations.
929 // Make sure that we only use these expressions in the original block.
930 SmallVector<ID> invalidate_expressions;
931
932 SPIRV_CROSS_DECLARE_CLONE(SPIRBlock)
933};
934
935struct SPIRFunction : IVariant
936{
937 enum
938 {
939 type = TypeFunction
940 };
941
942 SPIRFunction(TypeID return_type_, TypeID function_type_)
943 : return_type(return_type_)
944 , function_type(function_type_)
945 {
946 }
947
948 struct Parameter
949 {
950 TypeID type;
951 ID id;
952 uint32_t read_count;
953 uint32_t write_count;
954
955 // Set to true if this parameter aliases a global variable,
956 // used mostly in Metal where global variables
957 // have to be passed down to functions as regular arguments.
958 // However, for this kind of variable, we should not care about
959 // read and write counts as access to the function arguments
960 // is not local to the function in question.
961 bool alias_global_variable;
962 };
963
964 // When calling a function, and we're remapping separate image samplers,
965 // resolve these arguments into combined image samplers and pass them
966 // as additional arguments in this order.
967 // It gets more complicated as functions can pull in their own globals
968 // and combine them with parameters,
969 // so we need to distinguish if something is local parameter index
970 // or a global ID.
971 struct CombinedImageSamplerParameter
972 {
973 VariableID id;
974 VariableID image_id;
975 VariableID sampler_id;
976 bool global_image;
977 bool global_sampler;
978 bool depth;
979 };
980
981 TypeID return_type;
982 TypeID function_type;
983 SmallVector<Parameter> arguments;
984
985 // Can be used by backends to add magic arguments.
986 // Currently used by combined image/sampler implementation.
987
988 SmallVector<Parameter> shadow_arguments;
989 SmallVector<VariableID> local_variables;
990 BlockID entry_block = 0;
991 SmallVector<BlockID> blocks;
992 SmallVector<CombinedImageSamplerParameter> combined_parameters;
993
994 struct EntryLine
995 {
996 uint32_t file_id = 0;
997 uint32_t line_literal = 0;
998 };
999 EntryLine entry_line;
1000
1001 void add_local_variable(VariableID id)
1002 {
1003 local_variables.push_back(t: id);
1004 }
1005
1006 void add_parameter(TypeID parameter_type, ID id, bool alias_global_variable = false)
1007 {
1008 // Arguments are read-only until proven otherwise.
1009 arguments.push_back(t: { .type: parameter_type, .id: id, .read_count: 0u, .write_count: 0u, .alias_global_variable: alias_global_variable });
1010 }
1011
1012 // Hooks to be run when the function returns.
1013 // Mostly used for lowering internal data structures onto flattened structures.
1014 // Need to defer this, because they might rely on things which change during compilation.
1015 // Intentionally not a small vector, this one is rare, and std::function can be large.
1016 Vector<std::function<void()>> fixup_hooks_out;
1017
1018 // Hooks to be run when the function begins.
1019 // Mostly used for populating internal data structures from flattened structures.
1020 // Need to defer this, because they might rely on things which change during compilation.
1021 // Intentionally not a small vector, this one is rare, and std::function can be large.
1022 Vector<std::function<void()>> fixup_hooks_in;
1023
1024 // On function entry, make sure to copy a constant array into thread addr space to work around
1025 // the case where we are passing a constant array by value to a function on backends which do not
1026 // consider arrays value types.
1027 SmallVector<ID> constant_arrays_needed_on_stack;
1028
1029 bool active = false;
1030 bool flush_undeclared = true;
1031 bool do_combined_parameters = true;
1032
1033 SPIRV_CROSS_DECLARE_CLONE(SPIRFunction)
1034};
1035
1036struct SPIRAccessChain : IVariant
1037{
1038 enum
1039 {
1040 type = TypeAccessChain
1041 };
1042
1043 SPIRAccessChain(TypeID basetype_, spv::StorageClass storage_, std::string base_, std::string dynamic_index_,
1044 int32_t static_index_)
1045 : basetype(basetype_)
1046 , storage(storage_)
1047 , base(std::move(base_))
1048 , dynamic_index(std::move(dynamic_index_))
1049 , static_index(static_index_)
1050 {
1051 }
1052
1053 // The access chain represents an offset into a buffer.
1054 // Some backends need more complicated handling of access chains to be able to use buffers, like HLSL
1055 // which has no usable buffer type ala GLSL SSBOs.
1056 // StructuredBuffer is too limited, so our only option is to deal with ByteAddressBuffer which works with raw addresses.
1057
1058 TypeID basetype;
1059 spv::StorageClass storage;
1060 std::string base;
1061 std::string dynamic_index;
1062 int32_t static_index;
1063
1064 VariableID loaded_from = 0;
1065 uint32_t matrix_stride = 0;
1066 uint32_t array_stride = 0;
1067 bool row_major_matrix = false;
1068 bool immutable = false;
1069
1070 // By reading this expression, we implicitly read these expressions as well.
1071 // Used by access chain Store and Load since we read multiple expressions in this case.
1072 SmallVector<ID> implied_read_expressions;
1073
1074 SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain)
1075};
1076
1077struct SPIRVariable : IVariant
1078{
1079 enum
1080 {
1081 type = TypeVariable
1082 };
1083
1084 SPIRVariable() = default;
1085 SPIRVariable(TypeID basetype_, spv::StorageClass storage_, ID initializer_ = 0, VariableID basevariable_ = 0)
1086 : basetype(basetype_)
1087 , storage(storage_)
1088 , initializer(initializer_)
1089 , basevariable(basevariable_)
1090 {
1091 }
1092
1093 TypeID basetype = 0;
1094 spv::StorageClass storage = spv::StorageClassGeneric;
1095 uint32_t decoration = 0;
1096 ID initializer = 0;
1097 VariableID basevariable = 0;
1098
1099 SmallVector<uint32_t> dereference_chain;
1100 bool compat_builtin = false;
1101
1102 // If a variable is shadowed, we only statically assign to it
1103 // and never actually emit a statement for it.
1104 // When we read the variable as an expression, just forward
1105 // shadowed_id as the expression.
1106 bool statically_assigned = false;
1107 ID static_expression = 0;
1108
1109 // Temporaries which can remain forwarded as long as this variable is not modified.
1110 SmallVector<ID> dependees;
1111
1112 bool deferred_declaration = false;
1113 bool phi_variable = false;
1114
1115 // Used to deal with Phi variable flushes. See flush_phi().
1116 bool allocate_temporary_copy = false;
1117
1118 bool remapped_variable = false;
1119 uint32_t remapped_components = 0;
1120
1121 // The block which dominates all access to this variable.
1122 BlockID dominator = 0;
1123 // If true, this variable is a loop variable, when accessing the variable
1124 // outside a loop,
1125 // we should statically forward it.
1126 bool loop_variable = false;
1127 // Set to true while we're inside the for loop.
1128 bool loop_variable_enable = false;
1129
1130 // Used to find global LUTs
1131 bool is_written_to = false;
1132
1133 SPIRFunction::Parameter *parameter = nullptr;
1134
1135 SPIRV_CROSS_DECLARE_CLONE(SPIRVariable)
1136};
1137
1138struct SPIRConstant : IVariant
1139{
1140 enum
1141 {
1142 type = TypeConstant
1143 };
1144
1145 union Constant
1146 {
1147 uint32_t u32;
1148 int32_t i32;
1149 float f32;
1150
1151 uint64_t u64;
1152 int64_t i64;
1153 double f64;
1154 };
1155
1156 struct ConstantVector
1157 {
1158 Constant r[4];
1159 // If != 0, this element is a specialization constant, and we should keep track of it as such.
1160 ID id[4];
1161 uint32_t vecsize = 1;
1162
1163 ConstantVector()
1164 {
1165 memset(s: r, c: 0, n: sizeof(r));
1166 }
1167 };
1168
1169 struct ConstantMatrix
1170 {
1171 ConstantVector c[4];
1172 // If != 0, this column is a specialization constant, and we should keep track of it as such.
1173 ID id[4];
1174 uint32_t columns = 1;
1175 };
1176
1177 static inline float f16_to_f32(uint16_t u16_value)
1178 {
1179 // Based on the GLM implementation.
1180 int s = (u16_value >> 15) & 0x1;
1181 int e = (u16_value >> 10) & 0x1f;
1182 int m = (u16_value >> 0) & 0x3ff;
1183
1184 union
1185 {
1186 float f32;
1187 uint32_t u32;
1188 } u;
1189
1190 if (e == 0)
1191 {
1192 if (m == 0)
1193 {
1194 u.u32 = uint32_t(s) << 31;
1195 return u.f32;
1196 }
1197 else
1198 {
1199 while ((m & 0x400) == 0)
1200 {
1201 m <<= 1;
1202 e--;
1203 }
1204
1205 e++;
1206 m &= ~0x400;
1207 }
1208 }
1209 else if (e == 31)
1210 {
1211 if (m == 0)
1212 {
1213 u.u32 = (uint32_t(s) << 31) | 0x7f800000u;
1214 return u.f32;
1215 }
1216 else
1217 {
1218 u.u32 = (uint32_t(s) << 31) | 0x7f800000u | (m << 13);
1219 return u.f32;
1220 }
1221 }
1222
1223 e += 127 - 15;
1224 m <<= 13;
1225 u.u32 = (uint32_t(s) << 31) | (e << 23) | m;
1226 return u.f32;
1227 }
1228
1229 inline uint32_t specialization_constant_id(uint32_t col, uint32_t row) const
1230 {
1231 return m.c[col].id[row];
1232 }
1233
1234 inline uint32_t specialization_constant_id(uint32_t col) const
1235 {
1236 return m.id[col];
1237 }
1238
1239 inline uint32_t scalar(uint32_t col = 0, uint32_t row = 0) const
1240 {
1241 return m.c[col].r[row].u32;
1242 }
1243
1244 inline int16_t scalar_i16(uint32_t col = 0, uint32_t row = 0) const
1245 {
1246 return int16_t(m.c[col].r[row].u32 & 0xffffu);
1247 }
1248
1249 inline uint16_t scalar_u16(uint32_t col = 0, uint32_t row = 0) const
1250 {
1251 return uint16_t(m.c[col].r[row].u32 & 0xffffu);
1252 }
1253
1254 inline int8_t scalar_i8(uint32_t col = 0, uint32_t row = 0) const
1255 {
1256 return int8_t(m.c[col].r[row].u32 & 0xffu);
1257 }
1258
1259 inline uint8_t scalar_u8(uint32_t col = 0, uint32_t row = 0) const
1260 {
1261 return uint8_t(m.c[col].r[row].u32 & 0xffu);
1262 }
1263
1264 inline float scalar_f16(uint32_t col = 0, uint32_t row = 0) const
1265 {
1266 return f16_to_f32(u16_value: scalar_u16(col, row));
1267 }
1268
1269 inline float scalar_f32(uint32_t col = 0, uint32_t row = 0) const
1270 {
1271 return m.c[col].r[row].f32;
1272 }
1273
1274 inline int32_t scalar_i32(uint32_t col = 0, uint32_t row = 0) const
1275 {
1276 return m.c[col].r[row].i32;
1277 }
1278
1279 inline double scalar_f64(uint32_t col = 0, uint32_t row = 0) const
1280 {
1281 return m.c[col].r[row].f64;
1282 }
1283
1284 inline int64_t scalar_i64(uint32_t col = 0, uint32_t row = 0) const
1285 {
1286 return m.c[col].r[row].i64;
1287 }
1288
1289 inline uint64_t scalar_u64(uint32_t col = 0, uint32_t row = 0) const
1290 {
1291 return m.c[col].r[row].u64;
1292 }
1293
1294 inline const ConstantVector &vector() const
1295 {
1296 return m.c[0];
1297 }
1298
1299 inline uint32_t vector_size() const
1300 {
1301 return m.c[0].vecsize;
1302 }
1303
1304 inline uint32_t columns() const
1305 {
1306 return m.columns;
1307 }
1308
1309 inline void make_null(const SPIRType &constant_type_)
1310 {
1311 m = {};
1312 m.columns = constant_type_.columns;
1313 for (auto &c : m.c)
1314 c.vecsize = constant_type_.vecsize;
1315 }
1316
1317 inline bool constant_is_null() const
1318 {
1319 if (specialization)
1320 return false;
1321 if (!subconstants.empty())
1322 return false;
1323
1324 for (uint32_t col = 0; col < columns(); col++)
1325 for (uint32_t row = 0; row < vector_size(); row++)
1326 if (scalar_u64(col, row) != 0)
1327 return false;
1328
1329 return true;
1330 }
1331
1332 explicit SPIRConstant(uint32_t constant_type_)
1333 : constant_type(constant_type_)
1334 {
1335 }
1336
1337 SPIRConstant() = default;
1338
1339 SPIRConstant(TypeID constant_type_, const uint32_t *elements, uint32_t num_elements, bool specialized)
1340 : constant_type(constant_type_)
1341 , specialization(specialized)
1342 {
1343 subconstants.reserve(count: num_elements);
1344 for (uint32_t i = 0; i < num_elements; i++)
1345 subconstants.push_back(t: elements[i]);
1346 specialization = specialized;
1347 }
1348
1349 // Construct scalar (32-bit).
1350 SPIRConstant(TypeID constant_type_, uint32_t v0, bool specialized)
1351 : constant_type(constant_type_)
1352 , specialization(specialized)
1353 {
1354 m.c[0].r[0].u32 = v0;
1355 m.c[0].vecsize = 1;
1356 m.columns = 1;
1357 }
1358
1359 // Construct scalar (64-bit).
1360 SPIRConstant(TypeID constant_type_, uint64_t v0, bool specialized)
1361 : constant_type(constant_type_)
1362 , specialization(specialized)
1363 {
1364 m.c[0].r[0].u64 = v0;
1365 m.c[0].vecsize = 1;
1366 m.columns = 1;
1367 }
1368
1369 // Construct vectors and matrices.
1370 SPIRConstant(TypeID constant_type_, const SPIRConstant *const *vector_elements, uint32_t num_elements,
1371 bool specialized)
1372 : constant_type(constant_type_)
1373 , specialization(specialized)
1374 {
1375 bool matrix = vector_elements[0]->m.c[0].vecsize > 1;
1376
1377 if (matrix)
1378 {
1379 m.columns = num_elements;
1380
1381 for (uint32_t i = 0; i < num_elements; i++)
1382 {
1383 m.c[i] = vector_elements[i]->m.c[0];
1384 if (vector_elements[i]->specialization)
1385 m.id[i] = vector_elements[i]->self;
1386 }
1387 }
1388 else
1389 {
1390 m.c[0].vecsize = num_elements;
1391 m.columns = 1;
1392
1393 for (uint32_t i = 0; i < num_elements; i++)
1394 {
1395 m.c[0].r[i] = vector_elements[i]->m.c[0].r[0];
1396 if (vector_elements[i]->specialization)
1397 m.c[0].id[i] = vector_elements[i]->self;
1398 }
1399 }
1400 }
1401
1402 TypeID constant_type = 0;
1403 ConstantMatrix m;
1404
1405 // If this constant is a specialization constant (i.e. created with OpSpecConstant*).
1406 bool specialization = false;
1407 // If this constant is used as an array length which creates specialization restrictions on some backends.
1408 bool is_used_as_array_length = false;
1409
1410 // If true, this is a LUT, and should always be declared in the outer scope.
1411 bool is_used_as_lut = false;
1412
1413 // For composites which are constant arrays, etc.
1414 SmallVector<ConstantID> subconstants;
1415
1416 // Non-Vulkan GLSL, HLSL and sometimes MSL emits defines for each specialization constant,
1417 // and uses them to initialize the constant. This allows the user
1418 // to still be able to specialize the value by supplying corresponding
1419 // preprocessor directives before compiling the shader.
1420 std::string specialization_constant_macro_name;
1421
1422 SPIRV_CROSS_DECLARE_CLONE(SPIRConstant)
1423};
1424
1425// Variants have a very specific allocation scheme.
1426struct ObjectPoolGroup
1427{
1428 std::unique_ptr<ObjectPoolBase> pools[TypeCount];
1429};
1430
1431class Variant
1432{
1433public:
1434 explicit Variant(ObjectPoolGroup *group_)
1435 : group(group_)
1436 {
1437 }
1438
1439 ~Variant()
1440 {
1441 if (holder)
1442 group->pools[type]->deallocate_opaque(ptr: holder);
1443 }
1444
1445 // Marking custom move constructor as noexcept is important.
1446 Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT
1447 {
1448 *this = std::move(other);
1449 }
1450
1451 // We cannot copy from other variant without our own pool group.
1452 // Have to explicitly copy.
1453 Variant(const Variant &variant) = delete;
1454
1455 // Marking custom move constructor as noexcept is important.
1456 Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT
1457 {
1458 if (this != &other)
1459 {
1460 if (holder)
1461 group->pools[type]->deallocate_opaque(ptr: holder);
1462 holder = other.holder;
1463 group = other.group;
1464 type = other.type;
1465 allow_type_rewrite = other.allow_type_rewrite;
1466
1467 other.holder = nullptr;
1468 other.type = TypeNone;
1469 }
1470 return *this;
1471 }
1472
1473 // This copy/clone should only be called in the Compiler constructor.
1474 // If this is called inside ::compile(), we invalidate any references we took higher in the stack.
1475 // This should never happen.
1476 Variant &operator=(const Variant &other)
1477 {
1478//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
1479#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
1480 abort();
1481#endif
1482 if (this != &other)
1483 {
1484 if (holder)
1485 group->pools[type]->deallocate_opaque(ptr: holder);
1486
1487 if (other.holder)
1488 holder = other.holder->clone(pool: group->pools[other.type].get());
1489 else
1490 holder = nullptr;
1491
1492 type = other.type;
1493 allow_type_rewrite = other.allow_type_rewrite;
1494 }
1495 return *this;
1496 }
1497
1498 void set(IVariant *val, Types new_type)
1499 {
1500 if (holder)
1501 group->pools[type]->deallocate_opaque(ptr: holder);
1502 holder = nullptr;
1503
1504 if (!allow_type_rewrite && type != TypeNone && type != new_type)
1505 {
1506 if (val)
1507 group->pools[new_type]->deallocate_opaque(ptr: val);
1508 SPIRV_CROSS_THROW("Overwriting a variant with new type.");
1509 }
1510
1511 holder = val;
1512 type = new_type;
1513 allow_type_rewrite = false;
1514 }
1515
1516 template <typename T, typename... Ts>
1517 T *allocate_and_set(Types new_type, Ts &&... ts)
1518 {
1519 T *val = static_cast<ObjectPool<T> &>(*group->pools[new_type]).allocate(std::forward<Ts>(ts)...);
1520 set(val, new_type);
1521 return val;
1522 }
1523
1524 template <typename T>
1525 T &get()
1526 {
1527 if (!holder)
1528 SPIRV_CROSS_THROW("nullptr");
1529 if (static_cast<Types>(T::type) != type)
1530 SPIRV_CROSS_THROW("Bad cast");
1531 return *static_cast<T *>(holder);
1532 }
1533
1534 template <typename T>
1535 const T &get() const
1536 {
1537 if (!holder)
1538 SPIRV_CROSS_THROW("nullptr");
1539 if (static_cast<Types>(T::type) != type)
1540 SPIRV_CROSS_THROW("Bad cast");
1541 return *static_cast<const T *>(holder);
1542 }
1543
1544 Types get_type() const
1545 {
1546 return type;
1547 }
1548
1549 ID get_id() const
1550 {
1551 return holder ? holder->self : ID(0);
1552 }
1553
1554 bool empty() const
1555 {
1556 return !holder;
1557 }
1558
1559 void reset()
1560 {
1561 if (holder)
1562 group->pools[type]->deallocate_opaque(ptr: holder);
1563 holder = nullptr;
1564 type = TypeNone;
1565 }
1566
1567 void set_allow_type_rewrite()
1568 {
1569 allow_type_rewrite = true;
1570 }
1571
1572private:
1573 ObjectPoolGroup *group = nullptr;
1574 IVariant *holder = nullptr;
1575 Types type = TypeNone;
1576 bool allow_type_rewrite = false;
1577};
1578
1579template <typename T>
1580T &variant_get(Variant &var)
1581{
1582 return var.get<T>();
1583}
1584
1585template <typename T>
1586const T &variant_get(const Variant &var)
1587{
1588 return var.get<T>();
1589}
1590
1591template <typename T, typename... P>
1592T &variant_set(Variant &var, P &&... args)
1593{
1594 auto *ptr = var.allocate_and_set<T>(static_cast<Types>(T::type), std::forward<P>(args)...);
1595 return *ptr;
1596}
1597
1598struct AccessChainMeta
1599{
1600 uint32_t storage_physical_type = 0;
1601 bool need_transpose = false;
1602 bool storage_is_packed = false;
1603 bool storage_is_invariant = false;
1604 bool flattened_struct = false;
1605 bool relaxed_precision = false;
1606 bool access_meshlet_position_y = false;
1607 bool chain_is_builtin = false;
1608 spv::BuiltIn builtin = {};
1609};
1610
1611enum ExtendedDecorations
1612{
1613 // Marks if a buffer block is re-packed, i.e. member declaration might be subject to PhysicalTypeID remapping and padding.
1614 SPIRVCrossDecorationBufferBlockRepacked = 0,
1615
1616 // A type in a buffer block might be declared with a different physical type than the logical type.
1617 // If this is not set, PhysicalTypeID == the SPIR-V type as declared.
1618 SPIRVCrossDecorationPhysicalTypeID,
1619
1620 // Marks if the physical type is to be declared with tight packing rules, i.e. packed_floatN on MSL and friends.
1621 // If this is set, PhysicalTypeID might also be set. It can be set to same as logical type if all we're doing
1622 // is converting float3 to packed_float3 for example.
1623 // If this is marked on a struct, it means the struct itself must use only Packed types for all its members.
1624 SPIRVCrossDecorationPhysicalTypePacked,
1625
1626 // The padding in bytes before declaring this struct member.
1627 // If used on a struct type, marks the target size of a struct.
1628 SPIRVCrossDecorationPaddingTarget,
1629
1630 SPIRVCrossDecorationInterfaceMemberIndex,
1631 SPIRVCrossDecorationInterfaceOrigID,
1632 SPIRVCrossDecorationResourceIndexPrimary,
1633 // Used for decorations like resource indices for samplers when part of combined image samplers.
1634 // A variable might need to hold two resource indices in this case.
1635 SPIRVCrossDecorationResourceIndexSecondary,
1636 // Used for resource indices for multiplanar images when part of combined image samplers.
1637 SPIRVCrossDecorationResourceIndexTertiary,
1638 SPIRVCrossDecorationResourceIndexQuaternary,
1639
1640 // Marks a buffer block for using explicit offsets (GLSL/HLSL).
1641 SPIRVCrossDecorationExplicitOffset,
1642
1643 // Apply to a variable in the Input storage class; marks it as holding the base group passed to vkCmdDispatchBase(),
1644 // or the base vertex and instance indices passed to vkCmdDrawIndexed().
1645 // In MSL, this is used to adjust the WorkgroupId and GlobalInvocationId variables in compute shaders,
1646 // and to hold the BaseVertex and BaseInstance variables in vertex shaders.
1647 SPIRVCrossDecorationBuiltInDispatchBase,
1648
1649 // Apply to a variable that is a function parameter; marks it as being a "dynamic"
1650 // combined image-sampler. In MSL, this is used when a function parameter might hold
1651 // either a regular combined image-sampler or one that has an attached sampler
1652 // Y'CbCr conversion.
1653 SPIRVCrossDecorationDynamicImageSampler,
1654
1655 // Apply to a variable in the Input storage class; marks it as holding the size of the stage
1656 // input grid.
1657 // In MSL, this is used to hold the vertex and instance counts in a tessellation pipeline
1658 // vertex shader.
1659 SPIRVCrossDecorationBuiltInStageInputSize,
1660
1661 // Apply to any access chain of a tessellation I/O variable; stores the type of the sub-object
1662 // that was chained to, as recorded in the input variable itself. This is used in case the pointer
1663 // is itself used as the base of an access chain, to calculate the original type of the sub-object
1664 // chained to, in case a swizzle needs to be applied. This should not happen normally with valid
1665 // SPIR-V, but the MSL backend can change the type of input variables, necessitating the
1666 // addition of swizzles to keep the generated code compiling.
1667 SPIRVCrossDecorationTessIOOriginalInputTypeID,
1668
1669 // Apply to any access chain of an interface variable used with pull-model interpolation, where the variable is a
1670 // vector but the resulting pointer is a scalar; stores the component index that is to be accessed by the chain.
1671 // This is used when emitting calls to interpolation functions on the chain in MSL: in this case, the component
1672 // must be applied to the result, since pull-model interpolants in MSL cannot be swizzled directly, but the
1673 // results of interpolation can.
1674 SPIRVCrossDecorationInterpolantComponentExpr,
1675
1676 // Apply to any struct type that is used in the Workgroup storage class.
1677 // This causes matrices in MSL prior to Metal 3.0 to be emitted using a special
1678 // class that is convertible to the standard matrix type, to work around the
1679 // lack of constructors in the 'threadgroup' address space.
1680 SPIRVCrossDecorationWorkgroupStruct,
1681
1682 SPIRVCrossDecorationOverlappingBinding,
1683
1684 SPIRVCrossDecorationCount
1685};
1686
1687struct Meta
1688{
1689 struct Decoration
1690 {
1691 std::string alias;
1692 std::string qualified_alias;
1693 std::string hlsl_semantic;
1694 std::string user_type;
1695 Bitset decoration_flags;
1696 spv::BuiltIn builtin_type = spv::BuiltInMax;
1697 uint32_t location = 0;
1698 uint32_t component = 0;
1699 uint32_t set = 0;
1700 uint32_t binding = 0;
1701 uint32_t offset = 0;
1702 uint32_t xfb_buffer = 0;
1703 uint32_t xfb_stride = 0;
1704 uint32_t stream = 0;
1705 uint32_t array_stride = 0;
1706 uint32_t matrix_stride = 0;
1707 uint32_t input_attachment = 0;
1708 uint32_t spec_id = 0;
1709 uint32_t index = 0;
1710 spv::FPRoundingMode fp_rounding_mode = spv::FPRoundingModeMax;
1711 bool builtin = false;
1712 bool qualified_alias_explicit_override = false;
1713
1714 struct Extended
1715 {
1716 Extended()
1717 {
1718 // MSVC 2013 workaround to init like this.
1719 for (auto &v : values)
1720 v = 0;
1721 }
1722
1723 Bitset flags;
1724 uint32_t values[SPIRVCrossDecorationCount];
1725 } extended;
1726 };
1727
1728 Decoration decoration;
1729
1730 // Intentionally not a SmallVector. Decoration is large and somewhat rare.
1731 Vector<Decoration> members;
1732
1733 std::unordered_map<uint32_t, uint32_t> decoration_word_offset;
1734
1735 // For SPV_GOOGLE_hlsl_functionality1.
1736 bool hlsl_is_magic_counter_buffer = false;
1737 // ID for the sibling counter buffer.
1738 uint32_t hlsl_magic_counter_buffer = 0;
1739};
1740
1741// A user callback that remaps the type of any variable.
1742// var_name is the declared name of the variable.
1743// name_of_type is the textual name of the type which will be used in the code unless written to by the callback.
1744using VariableTypeRemapCallback =
1745 std::function<void(const SPIRType &type, const std::string &var_name, std::string &name_of_type)>;
1746
1747class Hasher
1748{
1749public:
1750 inline void u32(uint32_t value)
1751 {
1752 h = (h * 0x100000001b3ull) ^ value;
1753 }
1754
1755 inline uint64_t get() const
1756 {
1757 return h;
1758 }
1759
1760private:
1761 uint64_t h = 0xcbf29ce484222325ull;
1762};
1763
1764static inline bool type_is_floating_point(const SPIRType &type)
1765{
1766 return type.basetype == SPIRType::Half || type.basetype == SPIRType::Float || type.basetype == SPIRType::Double;
1767}
1768
1769static inline bool type_is_integral(const SPIRType &type)
1770{
1771 return type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte || type.basetype == SPIRType::Short ||
1772 type.basetype == SPIRType::UShort || type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt ||
1773 type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64;
1774}
1775
1776static inline SPIRType::BaseType to_signed_basetype(uint32_t width)
1777{
1778 switch (width)
1779 {
1780 case 8:
1781 return SPIRType::SByte;
1782 case 16:
1783 return SPIRType::Short;
1784 case 32:
1785 return SPIRType::Int;
1786 case 64:
1787 return SPIRType::Int64;
1788 default:
1789 SPIRV_CROSS_THROW("Invalid bit width.");
1790 }
1791}
1792
1793static inline SPIRType::BaseType to_unsigned_basetype(uint32_t width)
1794{
1795 switch (width)
1796 {
1797 case 8:
1798 return SPIRType::UByte;
1799 case 16:
1800 return SPIRType::UShort;
1801 case 32:
1802 return SPIRType::UInt;
1803 case 64:
1804 return SPIRType::UInt64;
1805 default:
1806 SPIRV_CROSS_THROW("Invalid bit width.");
1807 }
1808}
1809
1810// Returns true if an arithmetic operation does not change behavior depending on signedness.
1811static inline bool opcode_is_sign_invariant(spv::Op opcode)
1812{
1813 switch (opcode)
1814 {
1815 case spv::OpIEqual:
1816 case spv::OpINotEqual:
1817 case spv::OpISub:
1818 case spv::OpIAdd:
1819 case spv::OpIMul:
1820 case spv::OpShiftLeftLogical:
1821 case spv::OpBitwiseOr:
1822 case spv::OpBitwiseXor:
1823 case spv::OpBitwiseAnd:
1824 return true;
1825
1826 default:
1827 return false;
1828 }
1829}
1830
1831static inline bool opcode_can_promote_integer_implicitly(spv::Op opcode)
1832{
1833 switch (opcode)
1834 {
1835 case spv::OpSNegate:
1836 case spv::OpNot:
1837 case spv::OpBitwiseAnd:
1838 case spv::OpBitwiseOr:
1839 case spv::OpBitwiseXor:
1840 case spv::OpShiftLeftLogical:
1841 case spv::OpShiftRightLogical:
1842 case spv::OpShiftRightArithmetic:
1843 case spv::OpIAdd:
1844 case spv::OpISub:
1845 case spv::OpIMul:
1846 case spv::OpSDiv:
1847 case spv::OpUDiv:
1848 case spv::OpSRem:
1849 case spv::OpUMod:
1850 case spv::OpSMod:
1851 return true;
1852
1853 default:
1854 return false;
1855 }
1856}
1857
1858struct SetBindingPair
1859{
1860 uint32_t desc_set;
1861 uint32_t binding;
1862
1863 inline bool operator==(const SetBindingPair &other) const
1864 {
1865 return desc_set == other.desc_set && binding == other.binding;
1866 }
1867
1868 inline bool operator<(const SetBindingPair &other) const
1869 {
1870 return desc_set < other.desc_set || (desc_set == other.desc_set && binding < other.binding);
1871 }
1872};
1873
1874struct LocationComponentPair
1875{
1876 uint32_t location;
1877 uint32_t component;
1878
1879 inline bool operator==(const LocationComponentPair &other) const
1880 {
1881 return location == other.location && component == other.component;
1882 }
1883
1884 inline bool operator<(const LocationComponentPair &other) const
1885 {
1886 return location < other.location || (location == other.location && component < other.component);
1887 }
1888};
1889
1890struct StageSetBinding
1891{
1892 spv::ExecutionModel model;
1893 uint32_t desc_set;
1894 uint32_t binding;
1895
1896 inline bool operator==(const StageSetBinding &other) const
1897 {
1898 return model == other.model && desc_set == other.desc_set && binding == other.binding;
1899 }
1900};
1901
1902struct InternalHasher
1903{
1904 inline size_t operator()(const SetBindingPair &value) const
1905 {
1906 // Quality of hash doesn't really matter here.
1907 auto hash_set = std::hash<uint32_t>()(value.desc_set);
1908 auto hash_binding = std::hash<uint32_t>()(value.binding);
1909 return (hash_set * 0x10001b31) ^ hash_binding;
1910 }
1911
1912 inline size_t operator()(const LocationComponentPair &value) const
1913 {
1914 // Quality of hash doesn't really matter here.
1915 auto hash_set = std::hash<uint32_t>()(value.location);
1916 auto hash_binding = std::hash<uint32_t>()(value.component);
1917 return (hash_set * 0x10001b31) ^ hash_binding;
1918 }
1919
1920 inline size_t operator()(const StageSetBinding &value) const
1921 {
1922 // Quality of hash doesn't really matter here.
1923 auto hash_model = std::hash<uint32_t>()(value.model);
1924 auto hash_set = std::hash<uint32_t>()(value.desc_set);
1925 auto tmp_hash = (hash_model * 0x10001b31) ^ hash_set;
1926 return (tmp_hash * 0x10001b31) ^ value.binding;
1927 }
1928};
1929
1930// Special constant used in a {MSL,HLSL}ResourceBinding desc_set
1931// element to indicate the bindings for the push constants.
1932static const uint32_t ResourceBindingPushConstantDescriptorSet = ~(0u);
1933
1934// Special constant used in a {MSL,HLSL}ResourceBinding binding
1935// element to indicate the bindings for the push constants.
1936static const uint32_t ResourceBindingPushConstantBinding = 0;
1937} // namespace SPIRV_CROSS_NAMESPACE
1938
1939namespace std
1940{
1941template <SPIRV_CROSS_NAMESPACE::Types type>
1942struct hash<SPIRV_CROSS_NAMESPACE::TypedID<type>>
1943{
1944 size_t operator()(const SPIRV_CROSS_NAMESPACE::TypedID<type> &value) const
1945 {
1946 return std::hash<uint32_t>()(value);
1947 }
1948};
1949} // namespace std
1950
1951#endif
1952

source code of qtshadertools/src/3rdparty/SPIRV-Cross/spirv_common.hpp