1/*
2 * Copyright 2018-2021 Arm Limited
3 * SPDX-License-Identifier: Apache-2.0 OR MIT
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18/*
19 * At your option, you may choose to accept this material under either:
20 * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
21 * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
22 */
23
24#include "spirv_parser.hpp"
25#include <assert.h>
26
27using namespace std;
28using namespace spv;
29
30namespace SPIRV_CROSS_NAMESPACE
31{
32Parser::Parser(vector<uint32_t> spirv)
33{
34 ir.spirv = std::move(spirv);
35}
36
37Parser::Parser(const uint32_t *spirv_data, size_t word_count)
38{
39 ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
40}
41
42static bool decoration_is_string(Decoration decoration)
43{
44 switch (decoration)
45 {
46 case DecorationHlslSemanticGOOGLE:
47 return true;
48
49 default:
50 return false;
51 }
52}
53
54static inline uint32_t swap_endian(uint32_t v)
55{
56 return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
57}
58
59static bool is_valid_spirv_version(uint32_t version)
60{
61 switch (version)
62 {
63 // Allow v99 since it tends to just work.
64 case 99:
65 case 0x10000: // SPIR-V 1.0
66 case 0x10100: // SPIR-V 1.1
67 case 0x10200: // SPIR-V 1.2
68 case 0x10300: // SPIR-V 1.3
69 case 0x10400: // SPIR-V 1.4
70 case 0x10500: // SPIR-V 1.5
71 case 0x10600: // SPIR-V 1.6
72 return true;
73
74 default:
75 return false;
76 }
77}
78
79void Parser::parse()
80{
81 auto &spirv = ir.spirv;
82
83 auto len = spirv.size();
84 if (len < 5)
85 SPIRV_CROSS_THROW("SPIRV file too small.");
86
87 auto s = spirv.data();
88
89 // Endian-swap if we need to.
90 if (s[0] == swap_endian(v: MagicNumber))
91 transform(first: begin(cont&: spirv), last: end(cont&: spirv), result: begin(cont&: spirv), unary_op: [](uint32_t c) { return swap_endian(v: c); });
92
93 if (s[0] != MagicNumber || !is_valid_spirv_version(version: s[1]))
94 SPIRV_CROSS_THROW("Invalid SPIRV format.");
95
96 uint32_t bound = s[3];
97
98 const uint32_t MaximumNumberOfIDs = 0x3fffff;
99 if (bound > MaximumNumberOfIDs)
100 SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
101
102 ir.set_id_bounds(bound);
103
104 uint32_t offset = 5;
105
106 SmallVector<Instruction> instructions;
107 while (offset < len)
108 {
109 Instruction instr = {};
110 instr.op = spirv[offset] & 0xffff;
111 instr.count = (spirv[offset] >> 16) & 0xffff;
112
113 if (instr.count == 0)
114 SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
115
116 instr.offset = offset + 1;
117 instr.length = instr.count - 1;
118
119 offset += instr.count;
120
121 if (offset > spirv.size())
122 SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
123
124 instructions.push_back(t: instr);
125 }
126
127 for (auto &i : instructions)
128 parse(instr: i);
129
130 for (auto &fixup : forward_pointer_fixups)
131 {
132 auto &target = get<SPIRType>(id: fixup.first);
133 auto &source = get<SPIRType>(id: fixup.second);
134 target.member_types = source.member_types;
135 target.basetype = source.basetype;
136 target.self = source.self;
137 }
138 forward_pointer_fixups.clear();
139
140 if (current_function)
141 SPIRV_CROSS_THROW("Function was not terminated.");
142 if (current_block)
143 SPIRV_CROSS_THROW("Block was not terminated.");
144 if (ir.default_entry_point == 0)
145 SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
146}
147
148const uint32_t *Parser::stream(const Instruction &instr) const
149{
150 // If we're not going to use any arguments, just return nullptr.
151 // We want to avoid case where we return an out of range pointer
152 // that trips debug assertions on some platforms.
153 if (!instr.length)
154 return nullptr;
155
156 if (instr.offset + instr.length > ir.spirv.size())
157 SPIRV_CROSS_THROW("Compiler::stream() out of range.");
158 return &ir.spirv[instr.offset];
159}
160
161static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
162{
163 string ret;
164 for (uint32_t i = offset; i < spirv.size(); i++)
165 {
166 uint32_t w = spirv[i];
167
168 for (uint32_t j = 0; j < 4; j++, w >>= 8)
169 {
170 char c = w & 0xff;
171 if (c == '\0')
172 return ret;
173 ret += c;
174 }
175 }
176
177 SPIRV_CROSS_THROW("String was not terminated before EOF");
178}
179
180void Parser::parse(const Instruction &instruction)
181{
182 auto *ops = stream(instr: instruction);
183 auto op = static_cast<Op>(instruction.op);
184 uint32_t length = instruction.length;
185
186 switch (op)
187 {
188 case OpSourceContinued:
189 case OpSourceExtension:
190 case OpNop:
191 case OpModuleProcessed:
192 break;
193
194 case OpString:
195 {
196 set<SPIRString>(id: ops[0], args: extract_string(spirv: ir.spirv, offset: instruction.offset + 1));
197 break;
198 }
199
200 case OpMemoryModel:
201 ir.addressing_model = static_cast<AddressingModel>(ops[0]);
202 ir.memory_model = static_cast<MemoryModel>(ops[1]);
203 break;
204
205 case OpSource:
206 {
207 auto lang = static_cast<SourceLanguage>(ops[0]);
208 switch (lang)
209 {
210 case SourceLanguageESSL:
211 ir.source.es = true;
212 ir.source.version = ops[1];
213 ir.source.known = true;
214 ir.source.hlsl = false;
215 break;
216
217 case SourceLanguageGLSL:
218 ir.source.es = false;
219 ir.source.version = ops[1];
220 ir.source.known = true;
221 ir.source.hlsl = false;
222 break;
223
224 case SourceLanguageHLSL:
225 // For purposes of cross-compiling, this is GLSL 450.
226 ir.source.es = false;
227 ir.source.version = 450;
228 ir.source.known = true;
229 ir.source.hlsl = true;
230 break;
231
232 default:
233 ir.source.known = false;
234 break;
235 }
236 break;
237 }
238
239 case OpUndef:
240 {
241 uint32_t result_type = ops[0];
242 uint32_t id = ops[1];
243 set<SPIRUndef>(id, args&: result_type);
244 if (current_block)
245 current_block->ops.push_back(t: instruction);
246 break;
247 }
248
249 case OpCapability:
250 {
251 uint32_t cap = ops[0];
252 if (cap == CapabilityKernel)
253 SPIRV_CROSS_THROW("Kernel capability not supported.");
254
255 ir.declared_capabilities.push_back(t: static_cast<Capability>(ops[0]));
256 break;
257 }
258
259 case OpExtension:
260 {
261 auto ext = extract_string(spirv: ir.spirv, offset: instruction.offset);
262 ir.declared_extensions.push_back(t: std::move(ext));
263 break;
264 }
265
266 case OpExtInstImport:
267 {
268 uint32_t id = ops[0];
269 auto ext = extract_string(spirv: ir.spirv, offset: instruction.offset + 1);
270 if (ext == "GLSL.std.450")
271 set<SPIRExtension>(id, args: SPIRExtension::GLSL);
272 else if (ext == "DebugInfo")
273 set<SPIRExtension>(id, args: SPIRExtension::SPV_debug_info);
274 else if (ext == "SPV_AMD_shader_ballot")
275 set<SPIRExtension>(id, args: SPIRExtension::SPV_AMD_shader_ballot);
276 else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
277 set<SPIRExtension>(id, args: SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter);
278 else if (ext == "SPV_AMD_shader_trinary_minmax")
279 set<SPIRExtension>(id, args: SPIRExtension::SPV_AMD_shader_trinary_minmax);
280 else if (ext == "SPV_AMD_gcn_shader")
281 set<SPIRExtension>(id, args: SPIRExtension::SPV_AMD_gcn_shader);
282 else if (ext == "NonSemantic.DebugPrintf")
283 set<SPIRExtension>(id, args: SPIRExtension::NonSemanticDebugPrintf);
284 else
285 set<SPIRExtension>(id, args: SPIRExtension::Unsupported);
286
287 // Other SPIR-V extensions which have ExtInstrs are currently not supported.
288
289 break;
290 }
291
292 case OpExtInst:
293 {
294 // The SPIR-V debug information extended instructions might come at global scope.
295 if (current_block)
296 {
297 current_block->ops.push_back(t: instruction);
298 if (length >= 2)
299 {
300 const auto *type = maybe_get<SPIRType>(id: ops[0]);
301 if (type)
302 ir.load_type_width.insert(x: { ops[1], type->width });
303 }
304 }
305 break;
306 }
307
308 case OpEntryPoint:
309 {
310 auto itr =
311 ir.entry_points.insert(x: make_pair(x: ops[1], y: SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
312 extract_string(spirv: ir.spirv, offset: instruction.offset + 2))));
313 auto &e = itr.first->second;
314
315 // Strings need nul-terminator and consume the whole word.
316 uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
317
318 for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
319 e.interface_variables.push_back(t: ops[i]);
320
321 // Set the name of the entry point in case OpName is not provided later.
322 ir.set_name(id: ops[1], name: e.name);
323
324 // If we don't have an entry, make the first one our "default".
325 if (!ir.default_entry_point)
326 ir.default_entry_point = ops[1];
327 break;
328 }
329
330 case OpExecutionMode:
331 {
332 auto &execution = ir.entry_points[ops[0]];
333 auto mode = static_cast<ExecutionMode>(ops[1]);
334 execution.flags.set(mode);
335
336 switch (mode)
337 {
338 case ExecutionModeInvocations:
339 execution.invocations = ops[2];
340 break;
341
342 case ExecutionModeLocalSize:
343 execution.workgroup_size.x = ops[2];
344 execution.workgroup_size.y = ops[3];
345 execution.workgroup_size.z = ops[4];
346 break;
347
348 case ExecutionModeOutputVertices:
349 execution.output_vertices = ops[2];
350 break;
351
352 default:
353 break;
354 }
355 break;
356 }
357
358 case OpExecutionModeId:
359 {
360 auto &execution = ir.entry_points[ops[0]];
361 auto mode = static_cast<ExecutionMode>(ops[1]);
362 execution.flags.set(mode);
363
364 if (mode == ExecutionModeLocalSizeId)
365 {
366 execution.workgroup_size.id_x = ops[2];
367 execution.workgroup_size.id_y = ops[3];
368 execution.workgroup_size.id_z = ops[4];
369 }
370
371 break;
372 }
373
374 case OpName:
375 {
376 uint32_t id = ops[0];
377 ir.set_name(id, name: extract_string(spirv: ir.spirv, offset: instruction.offset + 1));
378 break;
379 }
380
381 case OpMemberName:
382 {
383 uint32_t id = ops[0];
384 uint32_t member = ops[1];
385 ir.set_member_name(id, index: member, name: extract_string(spirv: ir.spirv, offset: instruction.offset + 2));
386 break;
387 }
388
389 case OpDecorationGroup:
390 {
391 // Noop, this simply means an ID should be a collector of decorations.
392 // The meta array is already a flat array of decorations which will contain the relevant decorations.
393 break;
394 }
395
396 case OpGroupDecorate:
397 {
398 uint32_t group_id = ops[0];
399 auto &decorations = ir.meta[group_id].decoration;
400 auto &flags = decorations.decoration_flags;
401
402 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
403 // i.e., we cannot just copy the meta structure directly.
404 for (uint32_t i = 1; i < length; i++)
405 {
406 uint32_t target = ops[i];
407 flags.for_each_bit(op: [&](uint32_t bit) {
408 auto decoration = static_cast<Decoration>(bit);
409
410 if (decoration_is_string(decoration))
411 {
412 ir.set_decoration_string(id: target, decoration, argument: ir.get_decoration_string(id: group_id, decoration));
413 }
414 else
415 {
416 ir.meta[target].decoration_word_offset[decoration] =
417 ir.meta[group_id].decoration_word_offset[decoration];
418 ir.set_decoration(id: target, decoration, argument: ir.get_decoration(id: group_id, decoration));
419 }
420 });
421 }
422 break;
423 }
424
425 case OpGroupMemberDecorate:
426 {
427 uint32_t group_id = ops[0];
428 auto &flags = ir.meta[group_id].decoration.decoration_flags;
429
430 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
431 // i.e., we cannot just copy the meta structure directly.
432 for (uint32_t i = 1; i + 1 < length; i += 2)
433 {
434 uint32_t target = ops[i + 0];
435 uint32_t index = ops[i + 1];
436 flags.for_each_bit(op: [&](uint32_t bit) {
437 auto decoration = static_cast<Decoration>(bit);
438
439 if (decoration_is_string(decoration))
440 ir.set_member_decoration_string(id: target, index, decoration,
441 argument: ir.get_decoration_string(id: group_id, decoration));
442 else
443 ir.set_member_decoration(id: target, index, decoration, argument: ir.get_decoration(id: group_id, decoration));
444 });
445 }
446 break;
447 }
448
449 case OpDecorate:
450 case OpDecorateId:
451 {
452 // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
453 // so merge decorate and decorate-id here.
454 uint32_t id = ops[0];
455
456 auto decoration = static_cast<Decoration>(ops[1]);
457 if (length >= 3)
458 {
459 ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
460 ir.set_decoration(id, decoration, argument: ops[2]);
461 }
462 else
463 ir.set_decoration(id, decoration);
464
465 break;
466 }
467
468 case OpDecorateStringGOOGLE:
469 {
470 uint32_t id = ops[0];
471 auto decoration = static_cast<Decoration>(ops[1]);
472 ir.set_decoration_string(id, decoration, argument: extract_string(spirv: ir.spirv, offset: instruction.offset + 2));
473 break;
474 }
475
476 case OpMemberDecorate:
477 {
478 uint32_t id = ops[0];
479 uint32_t member = ops[1];
480 auto decoration = static_cast<Decoration>(ops[2]);
481 if (length >= 4)
482 ir.set_member_decoration(id, index: member, decoration, argument: ops[3]);
483 else
484 ir.set_member_decoration(id, index: member, decoration);
485 break;
486 }
487
488 case OpMemberDecorateStringGOOGLE:
489 {
490 uint32_t id = ops[0];
491 uint32_t member = ops[1];
492 auto decoration = static_cast<Decoration>(ops[2]);
493 ir.set_member_decoration_string(id, index: member, decoration, argument: extract_string(spirv: ir.spirv, offset: instruction.offset + 3));
494 break;
495 }
496
497 // Build up basic types.
498 case OpTypeVoid:
499 {
500 uint32_t id = ops[0];
501 auto &type = set<SPIRType>(id);
502 type.basetype = SPIRType::Void;
503 break;
504 }
505
506 case OpTypeBool:
507 {
508 uint32_t id = ops[0];
509 auto &type = set<SPIRType>(id);
510 type.basetype = SPIRType::Boolean;
511 type.width = 1;
512 break;
513 }
514
515 case OpTypeFloat:
516 {
517 uint32_t id = ops[0];
518 uint32_t width = ops[1];
519 auto &type = set<SPIRType>(id);
520 if (width == 64)
521 type.basetype = SPIRType::Double;
522 else if (width == 32)
523 type.basetype = SPIRType::Float;
524 else if (width == 16)
525 type.basetype = SPIRType::Half;
526 else
527 SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
528 type.width = width;
529 break;
530 }
531
532 case OpTypeInt:
533 {
534 uint32_t id = ops[0];
535 uint32_t width = ops[1];
536 bool signedness = ops[2] != 0;
537 auto &type = set<SPIRType>(id);
538 type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
539 type.width = width;
540 break;
541 }
542
543 // Build composite types by "inheriting".
544 // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
545 // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
546 case OpTypeVector:
547 {
548 uint32_t id = ops[0];
549 uint32_t vecsize = ops[2];
550
551 auto &base = get<SPIRType>(id: ops[1]);
552 auto &vecbase = set<SPIRType>(id);
553
554 vecbase = base;
555 vecbase.vecsize = vecsize;
556 vecbase.self = id;
557 vecbase.parent_type = ops[1];
558 break;
559 }
560
561 case OpTypeMatrix:
562 {
563 uint32_t id = ops[0];
564 uint32_t colcount = ops[2];
565
566 auto &base = get<SPIRType>(id: ops[1]);
567 auto &matrixbase = set<SPIRType>(id);
568
569 matrixbase = base;
570 matrixbase.columns = colcount;
571 matrixbase.self = id;
572 matrixbase.parent_type = ops[1];
573 break;
574 }
575
576 case OpTypeArray:
577 {
578 uint32_t id = ops[0];
579 auto &arraybase = set<SPIRType>(id);
580
581 uint32_t tid = ops[1];
582 auto &base = get<SPIRType>(id: tid);
583
584 arraybase = base;
585 arraybase.parent_type = tid;
586
587 uint32_t cid = ops[2];
588 ir.mark_used_as_array_length(id: cid);
589 auto *c = maybe_get<SPIRConstant>(id: cid);
590 bool literal = c && !c->specialization;
591
592 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
593 // references.
594 if (base.forward_pointer)
595 forward_pointer_fixups.push_back(t: { id, tid });
596
597 arraybase.array_size_literal.push_back(t: literal);
598 arraybase.array.push_back(t: literal ? c->scalar() : cid);
599 // Do NOT set arraybase.self!
600 break;
601 }
602
603 case OpTypeRuntimeArray:
604 {
605 uint32_t id = ops[0];
606
607 auto &base = get<SPIRType>(id: ops[1]);
608 auto &arraybase = set<SPIRType>(id);
609
610 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
611 // references.
612 if (base.forward_pointer)
613 forward_pointer_fixups.push_back(t: { id, ops[1] });
614
615 arraybase = base;
616 arraybase.array.push_back(t: 0);
617 arraybase.array_size_literal.push_back(t: true);
618 arraybase.parent_type = ops[1];
619 // Do NOT set arraybase.self!
620 break;
621 }
622
623 case OpTypeImage:
624 {
625 uint32_t id = ops[0];
626 auto &type = set<SPIRType>(id);
627 type.basetype = SPIRType::Image;
628 type.image.type = ops[1];
629 type.image.dim = static_cast<Dim>(ops[2]);
630 type.image.depth = ops[3] == 1;
631 type.image.arrayed = ops[4] != 0;
632 type.image.ms = ops[5] != 0;
633 type.image.sampled = ops[6];
634 type.image.format = static_cast<ImageFormat>(ops[7]);
635 type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
636 break;
637 }
638
639 case OpTypeSampledImage:
640 {
641 uint32_t id = ops[0];
642 uint32_t imagetype = ops[1];
643 auto &type = set<SPIRType>(id);
644 type = get<SPIRType>(id: imagetype);
645 type.basetype = SPIRType::SampledImage;
646 type.self = id;
647 break;
648 }
649
650 case OpTypeSampler:
651 {
652 uint32_t id = ops[0];
653 auto &type = set<SPIRType>(id);
654 type.basetype = SPIRType::Sampler;
655 break;
656 }
657
658 case OpTypePointer:
659 {
660 uint32_t id = ops[0];
661
662 // Very rarely, we might receive a FunctionPrototype here.
663 // We won't be able to compile it, but we shouldn't crash when parsing.
664 // We should be able to reflect.
665 auto *base = maybe_get<SPIRType>(id: ops[2]);
666 auto &ptrbase = set<SPIRType>(id);
667
668 if (base)
669 ptrbase = *base;
670
671 ptrbase.pointer = true;
672 ptrbase.pointer_depth++;
673 ptrbase.storage = static_cast<StorageClass>(ops[1]);
674
675 if (ptrbase.storage == StorageClassAtomicCounter)
676 ptrbase.basetype = SPIRType::AtomicCounter;
677
678 if (base && base->forward_pointer)
679 forward_pointer_fixups.push_back(t: { id, ops[2] });
680
681 ptrbase.parent_type = ops[2];
682
683 // Do NOT set ptrbase.self!
684 break;
685 }
686
687 case OpTypeForwardPointer:
688 {
689 uint32_t id = ops[0];
690 auto &ptrbase = set<SPIRType>(id);
691 ptrbase.pointer = true;
692 ptrbase.pointer_depth++;
693 ptrbase.storage = static_cast<StorageClass>(ops[1]);
694 ptrbase.forward_pointer = true;
695
696 if (ptrbase.storage == StorageClassAtomicCounter)
697 ptrbase.basetype = SPIRType::AtomicCounter;
698
699 break;
700 }
701
702 case OpTypeStruct:
703 {
704 uint32_t id = ops[0];
705 auto &type = set<SPIRType>(id);
706 type.basetype = SPIRType::Struct;
707 for (uint32_t i = 1; i < length; i++)
708 type.member_types.push_back(t: ops[i]);
709
710 // Check if we have seen this struct type before, with just different
711 // decorations.
712 //
713 // Add workaround for issue #17 as well by looking at OpName for the struct
714 // types, which we shouldn't normally do.
715 // We should not normally have to consider type aliases like this to begin with
716 // however ... glslang issues #304, #307 cover this.
717
718 // For stripped names, never consider struct type aliasing.
719 // We risk declaring the same struct multiple times, but type-punning is not allowed
720 // so this is safe.
721 bool consider_aliasing = !ir.get_name(id: type.self).empty();
722 if (consider_aliasing)
723 {
724 for (auto &other : global_struct_cache)
725 {
726 if (ir.get_name(id: type.self) == ir.get_name(id: other) &&
727 types_are_logically_equivalent(a: type, b: get<SPIRType>(id: other)))
728 {
729 type.type_alias = other;
730 break;
731 }
732 }
733
734 if (type.type_alias == TypeID(0))
735 global_struct_cache.push_back(t: id);
736 }
737 break;
738 }
739
740 case OpTypeFunction:
741 {
742 uint32_t id = ops[0];
743 uint32_t ret = ops[1];
744
745 auto &func = set<SPIRFunctionPrototype>(id, args&: ret);
746 for (uint32_t i = 2; i < length; i++)
747 func.parameter_types.push_back(t: ops[i]);
748 break;
749 }
750
751 case OpTypeAccelerationStructureKHR:
752 {
753 uint32_t id = ops[0];
754 auto &type = set<SPIRType>(id);
755 type.basetype = SPIRType::AccelerationStructure;
756 break;
757 }
758
759 case OpTypeRayQueryKHR:
760 {
761 uint32_t id = ops[0];
762 auto &type = set<SPIRType>(id);
763 type.basetype = SPIRType::RayQuery;
764 break;
765 }
766
767 // Variable declaration
768 // All variables are essentially pointers with a storage qualifier.
769 case OpVariable:
770 {
771 uint32_t type = ops[0];
772 uint32_t id = ops[1];
773 auto storage = static_cast<StorageClass>(ops[2]);
774 uint32_t initializer = length == 4 ? ops[3] : 0;
775
776 if (storage == StorageClassFunction)
777 {
778 if (!current_function)
779 SPIRV_CROSS_THROW("No function currently in scope");
780 current_function->add_local_variable(id);
781 }
782
783 set<SPIRVariable>(id, args&: type, args&: storage, args&: initializer);
784 break;
785 }
786
787 // OpPhi
788 // OpPhi is a fairly magical opcode.
789 // It selects temporary variables based on which parent block we *came from*.
790 // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
791 // variable to emulate SSA Phi.
792 case OpPhi:
793 {
794 if (!current_function)
795 SPIRV_CROSS_THROW("No function currently in scope");
796 if (!current_block)
797 SPIRV_CROSS_THROW("No block currently in scope");
798
799 uint32_t result_type = ops[0];
800 uint32_t id = ops[1];
801
802 // Instead of a temporary, create a new function-wide temporary with this ID instead.
803 auto &var = set<SPIRVariable>(id, args&: result_type, args: spv::StorageClassFunction);
804 var.phi_variable = true;
805
806 current_function->add_local_variable(id);
807
808 for (uint32_t i = 2; i + 2 <= length; i += 2)
809 current_block->phi_variables.push_back(t: { .local_variable: ops[i], .parent: ops[i + 1], .function_variable: id });
810 break;
811 }
812
813 // Constants
814 case OpSpecConstant:
815 case OpConstant:
816 {
817 uint32_t id = ops[1];
818 auto &type = get<SPIRType>(id: ops[0]);
819
820 if (type.width > 32)
821 set<SPIRConstant>(id, args: ops[0], args: ops[2] | (uint64_t(ops[3]) << 32), args: op == OpSpecConstant);
822 else
823 set<SPIRConstant>(id, args: ops[0], args: ops[2], args: op == OpSpecConstant);
824 break;
825 }
826
827 case OpSpecConstantFalse:
828 case OpConstantFalse:
829 {
830 uint32_t id = ops[1];
831 set<SPIRConstant>(id, args: ops[0], args: uint32_t(0), args: op == OpSpecConstantFalse);
832 break;
833 }
834
835 case OpSpecConstantTrue:
836 case OpConstantTrue:
837 {
838 uint32_t id = ops[1];
839 set<SPIRConstant>(id, args: ops[0], args: uint32_t(1), args: op == OpSpecConstantTrue);
840 break;
841 }
842
843 case OpConstantNull:
844 {
845 uint32_t id = ops[1];
846 uint32_t type = ops[0];
847 ir.make_constant_null(id, type, add_to_typed_id_set: true);
848 break;
849 }
850
851 case OpSpecConstantComposite:
852 case OpConstantComposite:
853 {
854 uint32_t id = ops[1];
855 uint32_t type = ops[0];
856
857 auto &ctype = get<SPIRType>(id: type);
858
859 // We can have constants which are structs and arrays.
860 // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
861 // can refer to.
862 if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
863 {
864 set<SPIRConstant>(id, args&: type, args: ops + 2, args: length - 2, args: op == OpSpecConstantComposite);
865 }
866 else
867 {
868 uint32_t elements = length - 2;
869 if (elements > 4)
870 SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
871
872 SPIRConstant remapped_constant_ops[4];
873 const SPIRConstant *c[4];
874 for (uint32_t i = 0; i < elements; i++)
875 {
876 // Specialization constants operations can also be part of this.
877 // We do not know their value, so any attempt to query SPIRConstant later
878 // will fail. We can only propagate the ID of the expression and use to_expression on it.
879 auto *constant_op = maybe_get<SPIRConstantOp>(id: ops[2 + i]);
880 auto *undef_op = maybe_get<SPIRUndef>(id: ops[2 + i]);
881 if (constant_op)
882 {
883 if (op == OpConstantComposite)
884 SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
885
886 remapped_constant_ops[i].make_null(constant_type_: get<SPIRType>(id: constant_op->basetype));
887 remapped_constant_ops[i].self = constant_op->self;
888 remapped_constant_ops[i].constant_type = constant_op->basetype;
889 remapped_constant_ops[i].specialization = true;
890 c[i] = &remapped_constant_ops[i];
891 }
892 else if (undef_op)
893 {
894 // Undefined, just pick 0.
895 remapped_constant_ops[i].make_null(constant_type_: get<SPIRType>(id: undef_op->basetype));
896 remapped_constant_ops[i].constant_type = undef_op->basetype;
897 c[i] = &remapped_constant_ops[i];
898 }
899 else
900 c[i] = &get<SPIRConstant>(id: ops[2 + i]);
901 }
902 set<SPIRConstant>(id, args&: type, args&: c, args&: elements, args: op == OpSpecConstantComposite);
903 }
904 break;
905 }
906
907 // Functions
908 case OpFunction:
909 {
910 uint32_t res = ops[0];
911 uint32_t id = ops[1];
912 // Control
913 uint32_t type = ops[3];
914
915 if (current_function)
916 SPIRV_CROSS_THROW("Must end a function before starting a new one!");
917
918 current_function = &set<SPIRFunction>(id, args&: res, args&: type);
919 break;
920 }
921
922 case OpFunctionParameter:
923 {
924 uint32_t type = ops[0];
925 uint32_t id = ops[1];
926
927 if (!current_function)
928 SPIRV_CROSS_THROW("Must be in a function!");
929
930 current_function->add_parameter(parameter_type: type, id);
931 set<SPIRVariable>(id, args&: type, args: StorageClassFunction);
932 break;
933 }
934
935 case OpFunctionEnd:
936 {
937 if (current_block)
938 {
939 // Very specific error message, but seems to come up quite often.
940 SPIRV_CROSS_THROW(
941 "Cannot end a function before ending the current block.\n"
942 "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
943 }
944 current_function = nullptr;
945 break;
946 }
947
948 // Blocks
949 case OpLabel:
950 {
951 // OpLabel always starts a block.
952 if (!current_function)
953 SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
954
955 uint32_t id = ops[0];
956
957 current_function->blocks.push_back(t: id);
958 if (!current_function->entry_block)
959 current_function->entry_block = id;
960
961 if (current_block)
962 SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
963
964 current_block = &set<SPIRBlock>(id);
965 break;
966 }
967
968 // Branch instructions end blocks.
969 case OpBranch:
970 {
971 if (!current_block)
972 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
973
974 uint32_t target = ops[0];
975 current_block->terminator = SPIRBlock::Direct;
976 current_block->next_block = target;
977 current_block = nullptr;
978 break;
979 }
980
981 case OpBranchConditional:
982 {
983 if (!current_block)
984 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
985
986 current_block->condition = ops[0];
987 current_block->true_block = ops[1];
988 current_block->false_block = ops[2];
989
990 current_block->terminator = SPIRBlock::Select;
991
992 if (current_block->true_block == current_block->false_block)
993 {
994 // Bogus conditional, translate to a direct branch.
995 // Avoids some ugly edge cases later when analyzing CFGs.
996
997 // There are some super jank cases where the merge block is different from the true/false,
998 // and later branches can "break" out of the selection construct this way.
999 // This is complete nonsense, but CTS hits this case.
1000 // In this scenario, we should see the selection construct as more of a Switch with one default case.
1001 // The problem here is that this breaks any attempt to break out of outer switch statements,
1002 // but it's theoretically solvable if this ever comes up using the ladder breaking system ...
1003
1004 if (current_block->true_block != current_block->next_block &&
1005 current_block->merge == SPIRBlock::MergeSelection)
1006 {
1007 uint32_t ids = ir.increase_bound_by(count: 2);
1008
1009 SPIRType type;
1010 type.basetype = SPIRType::Int;
1011 type.width = 32;
1012 set<SPIRType>(id: ids, args&: type);
1013 auto &c = set<SPIRConstant>(id: ids + 1, args&: ids);
1014
1015 current_block->condition = c.self;
1016 current_block->default_block = current_block->true_block;
1017 current_block->terminator = SPIRBlock::MultiSelect;
1018 ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1019 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
1020 }
1021 else
1022 {
1023 ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1024 current_block->next_block = current_block->true_block;
1025 current_block->condition = 0;
1026 current_block->true_block = 0;
1027 current_block->false_block = 0;
1028 current_block->merge_block = 0;
1029 current_block->merge = SPIRBlock::MergeNone;
1030 current_block->terminator = SPIRBlock::Direct;
1031 }
1032 }
1033
1034 current_block = nullptr;
1035 break;
1036 }
1037
1038 case OpSwitch:
1039 {
1040 if (!current_block)
1041 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1042
1043 current_block->terminator = SPIRBlock::MultiSelect;
1044
1045 current_block->condition = ops[0];
1046 current_block->default_block = ops[1];
1047
1048 uint32_t remaining_ops = length - 2;
1049 if ((remaining_ops % 2) == 0)
1050 {
1051 for (uint32_t i = 2; i + 2 <= length; i += 2)
1052 current_block->cases_32bit.push_back(t: { .value: ops[i], .block: ops[i + 1] });
1053 }
1054
1055 if ((remaining_ops % 3) == 0)
1056 {
1057 for (uint32_t i = 2; i + 3 <= length; i += 3)
1058 {
1059 uint64_t value = (static_cast<uint64_t>(ops[i + 1]) << 32) | ops[i];
1060 current_block->cases_64bit.push_back(t: { .value: value, .block: ops[i + 2] });
1061 }
1062 }
1063
1064 // If we jump to next block, make it break instead since we're inside a switch case block at that point.
1065 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
1066
1067 current_block = nullptr;
1068 break;
1069 }
1070
1071 case OpKill:
1072 case OpTerminateInvocation:
1073 {
1074 if (!current_block)
1075 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1076 current_block->terminator = SPIRBlock::Kill;
1077 current_block = nullptr;
1078 break;
1079 }
1080
1081 case OpTerminateRayKHR:
1082 // NV variant is not a terminator.
1083 if (!current_block)
1084 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1085 current_block->terminator = SPIRBlock::TerminateRay;
1086 current_block = nullptr;
1087 break;
1088
1089 case OpIgnoreIntersectionKHR:
1090 // NV variant is not a terminator.
1091 if (!current_block)
1092 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1093 current_block->terminator = SPIRBlock::IgnoreIntersection;
1094 current_block = nullptr;
1095 break;
1096
1097 case OpReturn:
1098 {
1099 if (!current_block)
1100 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1101 current_block->terminator = SPIRBlock::Return;
1102 current_block = nullptr;
1103 break;
1104 }
1105
1106 case OpReturnValue:
1107 {
1108 if (!current_block)
1109 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1110 current_block->terminator = SPIRBlock::Return;
1111 current_block->return_value = ops[0];
1112 current_block = nullptr;
1113 break;
1114 }
1115
1116 case OpUnreachable:
1117 {
1118 if (!current_block)
1119 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1120 current_block->terminator = SPIRBlock::Unreachable;
1121 current_block = nullptr;
1122 break;
1123 }
1124
1125 case OpSelectionMerge:
1126 {
1127 if (!current_block)
1128 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1129
1130 current_block->next_block = ops[0];
1131 current_block->merge = SPIRBlock::MergeSelection;
1132 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1133
1134 if (length >= 2)
1135 {
1136 if (ops[1] & SelectionControlFlattenMask)
1137 current_block->hint = SPIRBlock::HintFlatten;
1138 else if (ops[1] & SelectionControlDontFlattenMask)
1139 current_block->hint = SPIRBlock::HintDontFlatten;
1140 }
1141 break;
1142 }
1143
1144 case OpLoopMerge:
1145 {
1146 if (!current_block)
1147 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1148
1149 current_block->merge_block = ops[0];
1150 current_block->continue_block = ops[1];
1151 current_block->merge = SPIRBlock::MergeLoop;
1152
1153 ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
1154 ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1155
1156 ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
1157
1158 // Don't add loop headers to continue blocks,
1159 // which would make it impossible branch into the loop header since
1160 // they are treated as continues.
1161 if (current_block->continue_block != BlockID(current_block->self))
1162 ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
1163
1164 if (length >= 3)
1165 {
1166 if (ops[2] & LoopControlUnrollMask)
1167 current_block->hint = SPIRBlock::HintUnroll;
1168 else if (ops[2] & LoopControlDontUnrollMask)
1169 current_block->hint = SPIRBlock::HintDontUnroll;
1170 }
1171 break;
1172 }
1173
1174 case OpSpecConstantOp:
1175 {
1176 if (length < 3)
1177 SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1178
1179 uint32_t result_type = ops[0];
1180 uint32_t id = ops[1];
1181 auto spec_op = static_cast<Op>(ops[2]);
1182
1183 set<SPIRConstantOp>(id, args&: result_type, args&: spec_op, args: ops + 3, args: length - 3);
1184 break;
1185 }
1186
1187 case OpLine:
1188 {
1189 // OpLine might come at global scope, but we don't care about those since they will not be declared in any
1190 // meaningful correct order.
1191 // Ignore all OpLine directives which live outside a function.
1192 if (current_block)
1193 current_block->ops.push_back(t: instruction);
1194
1195 // Line directives may arrive before first OpLabel.
1196 // Treat this as the line of the function declaration,
1197 // so warnings for arguments can propagate properly.
1198 if (current_function)
1199 {
1200 // Store the first one we find and emit it before creating the function prototype.
1201 if (current_function->entry_line.file_id == 0)
1202 {
1203 current_function->entry_line.file_id = ops[0];
1204 current_function->entry_line.line_literal = ops[1];
1205 }
1206 }
1207 break;
1208 }
1209
1210 case OpNoLine:
1211 {
1212 // OpNoLine might come at global scope.
1213 if (current_block)
1214 current_block->ops.push_back(t: instruction);
1215 break;
1216 }
1217
1218 // Actual opcodes.
1219 default:
1220 {
1221 if (length >= 2)
1222 {
1223 const auto *type = maybe_get<SPIRType>(id: ops[0]);
1224 if (type)
1225 ir.load_type_width.insert(x: { ops[1], type->width });
1226 }
1227
1228 if (!current_block)
1229 SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1230
1231 current_block->ops.push_back(t: instruction);
1232 break;
1233 }
1234 }
1235}
1236
1237bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1238{
1239 if (a.basetype != b.basetype)
1240 return false;
1241 if (a.width != b.width)
1242 return false;
1243 if (a.vecsize != b.vecsize)
1244 return false;
1245 if (a.columns != b.columns)
1246 return false;
1247 if (a.array.size() != b.array.size())
1248 return false;
1249
1250 size_t array_count = a.array.size();
1251 if (array_count && memcmp(s1: a.array.data(), s2: b.array.data(), n: array_count * sizeof(uint32_t)) != 0)
1252 return false;
1253
1254 if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1255 {
1256 if (memcmp(s1: &a.image, s2: &b.image, n: sizeof(SPIRType::Image)) != 0)
1257 return false;
1258 }
1259
1260 if (a.member_types.size() != b.member_types.size())
1261 return false;
1262
1263 size_t member_types = a.member_types.size();
1264 for (size_t i = 0; i < member_types; i++)
1265 {
1266 if (!types_are_logically_equivalent(a: get<SPIRType>(id: a.member_types[i]), b: get<SPIRType>(id: b.member_types[i])))
1267 return false;
1268 }
1269
1270 return true;
1271}
1272
1273bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1274{
1275 auto &type = get<SPIRType>(id: v.basetype);
1276
1277 auto *type_meta = ir.find_meta(id: type.self);
1278
1279 bool ssbo = v.storage == StorageClassStorageBuffer ||
1280 (type_meta && type_meta->decoration.decoration_flags.get(bit: DecorationBufferBlock));
1281 bool image = type.basetype == SPIRType::Image;
1282 bool counter = type.basetype == SPIRType::AtomicCounter;
1283
1284 bool is_restrict;
1285 if (ssbo)
1286 is_restrict = ir.get_buffer_block_flags(var: v).get(bit: DecorationRestrict);
1287 else
1288 is_restrict = ir.has_decoration(id: v.self, decoration: DecorationRestrict);
1289
1290 return !is_restrict && (ssbo || image || counter);
1291}
1292} // namespace SPIRV_CROSS_NAMESPACE
1293

source code of qtshadertools/src/3rdparty/SPIRV-Cross/spirv_parser.cpp