1/*
2 * Copyright 2018-2021 Arm Limited
3 * SPDX-License-Identifier: Apache-2.0 OR MIT
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18/*
19 * At your option, you may choose to accept this material under either:
20 * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
21 * 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
22 */
23
24#include "spirv_parser.hpp"
25#include <assert.h>
26
27using namespace std;
28using namespace spv;
29
30namespace SPIRV_CROSS_NAMESPACE
31{
32Parser::Parser(vector<uint32_t> spirv)
33{
34 ir.spirv = std::move(spirv);
35}
36
37Parser::Parser(const uint32_t *spirv_data, size_t word_count)
38{
39 ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
40}
41
42static bool decoration_is_string(Decoration decoration)
43{
44 switch (decoration)
45 {
46 case DecorationHlslSemanticGOOGLE:
47 return true;
48
49 default:
50 return false;
51 }
52}
53
54static inline uint32_t swap_endian(uint32_t v)
55{
56 return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
57}
58
59static bool is_valid_spirv_version(uint32_t version)
60{
61 switch (version)
62 {
63 // Allow v99 since it tends to just work.
64 case 99:
65 case 0x10000: // SPIR-V 1.0
66 case 0x10100: // SPIR-V 1.1
67 case 0x10200: // SPIR-V 1.2
68 case 0x10300: // SPIR-V 1.3
69 case 0x10400: // SPIR-V 1.4
70 case 0x10500: // SPIR-V 1.5
71 case 0x10600: // SPIR-V 1.6
72 return true;
73
74 default:
75 return false;
76 }
77}
78
79void Parser::parse()
80{
81 auto &spirv = ir.spirv;
82
83 auto len = spirv.size();
84 if (len < 5)
85 SPIRV_CROSS_THROW("SPIRV file too small.");
86
87 auto s = spirv.data();
88
89 // Endian-swap if we need to.
90 if (s[0] == swap_endian(v: MagicNumber))
91 transform(first: begin(cont&: spirv), last: end(cont&: spirv), result: begin(cont&: spirv), unary_op: [](uint32_t c) { return swap_endian(v: c); });
92
93 if (s[0] != MagicNumber || !is_valid_spirv_version(version: s[1]))
94 SPIRV_CROSS_THROW("Invalid SPIRV format.");
95
96 uint32_t bound = s[3];
97
98 const uint32_t MaximumNumberOfIDs = 0x3fffff;
99 if (bound > MaximumNumberOfIDs)
100 SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
101
102 ir.set_id_bounds(bound);
103
104 uint32_t offset = 5;
105
106 SmallVector<Instruction> instructions;
107 while (offset < len)
108 {
109 Instruction instr = {};
110 instr.op = spirv[offset] & 0xffff;
111 instr.count = (spirv[offset] >> 16) & 0xffff;
112
113 if (instr.count == 0)
114 SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
115
116 instr.offset = offset + 1;
117 instr.length = instr.count - 1;
118
119 offset += instr.count;
120
121 if (offset > spirv.size())
122 SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
123
124 instructions.push_back(t: instr);
125 }
126
127 for (auto &i : instructions)
128 parse(instr: i);
129
130 for (auto &fixup : forward_pointer_fixups)
131 {
132 auto &target = get<SPIRType>(id: fixup.first);
133 auto &source = get<SPIRType>(id: fixup.second);
134 target.member_types = source.member_types;
135 target.basetype = source.basetype;
136 target.self = source.self;
137 }
138 forward_pointer_fixups.clear();
139
140 if (current_function)
141 SPIRV_CROSS_THROW("Function was not terminated.");
142 if (current_block)
143 SPIRV_CROSS_THROW("Block was not terminated.");
144 if (ir.default_entry_point == 0)
145 SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
146}
147
148const uint32_t *Parser::stream(const Instruction &instr) const
149{
150 // If we're not going to use any arguments, just return nullptr.
151 // We want to avoid case where we return an out of range pointer
152 // that trips debug assertions on some platforms.
153 if (!instr.length)
154 return nullptr;
155
156 if (instr.offset + instr.length > ir.spirv.size())
157 SPIRV_CROSS_THROW("Compiler::stream() out of range.");
158 return &ir.spirv[instr.offset];
159}
160
161static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
162{
163 string ret;
164 for (uint32_t i = offset; i < spirv.size(); i++)
165 {
166 uint32_t w = spirv[i];
167
168 for (uint32_t j = 0; j < 4; j++, w >>= 8)
169 {
170 char c = w & 0xff;
171 if (c == '\0')
172 return ret;
173 ret += c;
174 }
175 }
176
177 SPIRV_CROSS_THROW("String was not terminated before EOF");
178}
179
180void Parser::parse(const Instruction &instruction)
181{
182 auto *ops = stream(instr: instruction);
183 auto op = static_cast<Op>(instruction.op);
184 uint32_t length = instruction.length;
185
186 // HACK for glslang that might emit OpEmitMeshTasksEXT followed by return / branch.
187 // Instead of failing hard, just ignore it.
188 if (ignore_trailing_block_opcodes)
189 {
190 ignore_trailing_block_opcodes = false;
191 if (op == OpReturn || op == OpBranch || op == OpUnreachable)
192 return;
193 }
194
195 switch (op)
196 {
197 case OpSourceContinued:
198 case OpSourceExtension:
199 case OpNop:
200 case OpModuleProcessed:
201 break;
202
203 case OpString:
204 {
205 set<SPIRString>(id: ops[0], args: extract_string(spirv: ir.spirv, offset: instruction.offset + 1));
206 break;
207 }
208
209 case OpMemoryModel:
210 ir.addressing_model = static_cast<AddressingModel>(ops[0]);
211 ir.memory_model = static_cast<MemoryModel>(ops[1]);
212 break;
213
214 case OpSource:
215 {
216 auto lang = static_cast<SourceLanguage>(ops[0]);
217 switch (lang)
218 {
219 case SourceLanguageESSL:
220 ir.source.es = true;
221 ir.source.version = ops[1];
222 ir.source.known = true;
223 ir.source.hlsl = false;
224 break;
225
226 case SourceLanguageGLSL:
227 ir.source.es = false;
228 ir.source.version = ops[1];
229 ir.source.known = true;
230 ir.source.hlsl = false;
231 break;
232
233 case SourceLanguageHLSL:
234 // For purposes of cross-compiling, this is GLSL 450.
235 ir.source.es = false;
236 ir.source.version = 450;
237 ir.source.known = true;
238 ir.source.hlsl = true;
239 break;
240
241 default:
242 ir.source.known = false;
243 break;
244 }
245 break;
246 }
247
248 case OpUndef:
249 {
250 uint32_t result_type = ops[0];
251 uint32_t id = ops[1];
252 set<SPIRUndef>(id, args&: result_type);
253 if (current_block)
254 current_block->ops.push_back(t: instruction);
255 break;
256 }
257
258 case OpCapability:
259 {
260 uint32_t cap = ops[0];
261 if (cap == CapabilityKernel)
262 SPIRV_CROSS_THROW("Kernel capability not supported.");
263
264 ir.declared_capabilities.push_back(t: static_cast<Capability>(ops[0]));
265 break;
266 }
267
268 case OpExtension:
269 {
270 auto ext = extract_string(spirv: ir.spirv, offset: instruction.offset);
271 ir.declared_extensions.push_back(t: std::move(ext));
272 break;
273 }
274
275 case OpExtInstImport:
276 {
277 uint32_t id = ops[0];
278
279 SPIRExtension::Extension spirv_ext = SPIRExtension::Unsupported;
280
281 auto ext = extract_string(spirv: ir.spirv, offset: instruction.offset + 1);
282 if (ext == "GLSL.std.450")
283 spirv_ext = SPIRExtension::GLSL;
284 else if (ext == "DebugInfo")
285 spirv_ext = SPIRExtension::SPV_debug_info;
286 else if (ext == "SPV_AMD_shader_ballot")
287 spirv_ext = SPIRExtension::SPV_AMD_shader_ballot;
288 else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
289 spirv_ext = SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter;
290 else if (ext == "SPV_AMD_shader_trinary_minmax")
291 spirv_ext = SPIRExtension::SPV_AMD_shader_trinary_minmax;
292 else if (ext == "SPV_AMD_gcn_shader")
293 spirv_ext = SPIRExtension::SPV_AMD_gcn_shader;
294 else if (ext == "NonSemantic.DebugPrintf")
295 spirv_ext = SPIRExtension::NonSemanticDebugPrintf;
296 else if (ext == "NonSemantic.Shader.DebugInfo.100")
297 spirv_ext = SPIRExtension::NonSemanticShaderDebugInfo;
298 else if (ext.find(s: "NonSemantic.") == 0)
299 spirv_ext = SPIRExtension::NonSemanticGeneric;
300
301 set<SPIRExtension>(id, args&: spirv_ext);
302 // Other SPIR-V extensions which have ExtInstrs are currently not supported.
303
304 break;
305 }
306
307 case OpExtInst:
308 {
309 // The SPIR-V debug information extended instructions might come at global scope.
310 if (current_block)
311 {
312 current_block->ops.push_back(t: instruction);
313 if (length >= 2)
314 {
315 const auto *type = maybe_get<SPIRType>(id: ops[0]);
316 if (type)
317 ir.load_type_width.insert(x: { ops[1], type->width });
318 }
319 }
320 break;
321 }
322
323 case OpEntryPoint:
324 {
325 auto itr =
326 ir.entry_points.insert(x: make_pair(x: ops[1], y: SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
327 extract_string(spirv: ir.spirv, offset: instruction.offset + 2))));
328 auto &e = itr.first->second;
329
330 // Strings need nul-terminator and consume the whole word.
331 uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
332
333 for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
334 e.interface_variables.push_back(t: ops[i]);
335
336 // Set the name of the entry point in case OpName is not provided later.
337 ir.set_name(id: ops[1], name: e.name);
338
339 // If we don't have an entry, make the first one our "default".
340 if (!ir.default_entry_point)
341 ir.default_entry_point = ops[1];
342 break;
343 }
344
345 case OpExecutionMode:
346 {
347 auto &execution = ir.entry_points[ops[0]];
348 auto mode = static_cast<ExecutionMode>(ops[1]);
349 execution.flags.set(mode);
350
351 switch (mode)
352 {
353 case ExecutionModeInvocations:
354 execution.invocations = ops[2];
355 break;
356
357 case ExecutionModeLocalSize:
358 execution.workgroup_size.x = ops[2];
359 execution.workgroup_size.y = ops[3];
360 execution.workgroup_size.z = ops[4];
361 break;
362
363 case ExecutionModeOutputVertices:
364 execution.output_vertices = ops[2];
365 break;
366
367 case ExecutionModeOutputPrimitivesEXT:
368 execution.output_primitives = ops[2];
369 break;
370
371 default:
372 break;
373 }
374 break;
375 }
376
377 case OpExecutionModeId:
378 {
379 auto &execution = ir.entry_points[ops[0]];
380 auto mode = static_cast<ExecutionMode>(ops[1]);
381 execution.flags.set(mode);
382
383 if (mode == ExecutionModeLocalSizeId)
384 {
385 execution.workgroup_size.id_x = ops[2];
386 execution.workgroup_size.id_y = ops[3];
387 execution.workgroup_size.id_z = ops[4];
388 }
389
390 break;
391 }
392
393 case OpName:
394 {
395 uint32_t id = ops[0];
396 ir.set_name(id, name: extract_string(spirv: ir.spirv, offset: instruction.offset + 1));
397 break;
398 }
399
400 case OpMemberName:
401 {
402 uint32_t id = ops[0];
403 uint32_t member = ops[1];
404 ir.set_member_name(id, index: member, name: extract_string(spirv: ir.spirv, offset: instruction.offset + 2));
405 break;
406 }
407
408 case OpDecorationGroup:
409 {
410 // Noop, this simply means an ID should be a collector of decorations.
411 // The meta array is already a flat array of decorations which will contain the relevant decorations.
412 break;
413 }
414
415 case OpGroupDecorate:
416 {
417 uint32_t group_id = ops[0];
418 auto &decorations = ir.meta[group_id].decoration;
419 auto &flags = decorations.decoration_flags;
420
421 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
422 // i.e., we cannot just copy the meta structure directly.
423 for (uint32_t i = 1; i < length; i++)
424 {
425 uint32_t target = ops[i];
426 flags.for_each_bit(op: [&](uint32_t bit) {
427 auto decoration = static_cast<Decoration>(bit);
428
429 if (decoration_is_string(decoration))
430 {
431 ir.set_decoration_string(id: target, decoration, argument: ir.get_decoration_string(id: group_id, decoration));
432 }
433 else
434 {
435 ir.meta[target].decoration_word_offset[decoration] =
436 ir.meta[group_id].decoration_word_offset[decoration];
437 ir.set_decoration(id: target, decoration, argument: ir.get_decoration(id: group_id, decoration));
438 }
439 });
440 }
441 break;
442 }
443
444 case OpGroupMemberDecorate:
445 {
446 uint32_t group_id = ops[0];
447 auto &flags = ir.meta[group_id].decoration.decoration_flags;
448
449 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
450 // i.e., we cannot just copy the meta structure directly.
451 for (uint32_t i = 1; i + 1 < length; i += 2)
452 {
453 uint32_t target = ops[i + 0];
454 uint32_t index = ops[i + 1];
455 flags.for_each_bit(op: [&](uint32_t bit) {
456 auto decoration = static_cast<Decoration>(bit);
457
458 if (decoration_is_string(decoration))
459 ir.set_member_decoration_string(id: target, index, decoration,
460 argument: ir.get_decoration_string(id: group_id, decoration));
461 else
462 ir.set_member_decoration(id: target, index, decoration, argument: ir.get_decoration(id: group_id, decoration));
463 });
464 }
465 break;
466 }
467
468 case OpDecorate:
469 case OpDecorateId:
470 {
471 // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
472 // so merge decorate and decorate-id here.
473 uint32_t id = ops[0];
474
475 auto decoration = static_cast<Decoration>(ops[1]);
476 if (length >= 3)
477 {
478 ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
479 ir.set_decoration(id, decoration, argument: ops[2]);
480 }
481 else
482 ir.set_decoration(id, decoration);
483
484 break;
485 }
486
487 case OpDecorateStringGOOGLE:
488 {
489 uint32_t id = ops[0];
490 auto decoration = static_cast<Decoration>(ops[1]);
491 ir.set_decoration_string(id, decoration, argument: extract_string(spirv: ir.spirv, offset: instruction.offset + 2));
492 break;
493 }
494
495 case OpMemberDecorate:
496 {
497 uint32_t id = ops[0];
498 uint32_t member = ops[1];
499 auto decoration = static_cast<Decoration>(ops[2]);
500 if (length >= 4)
501 ir.set_member_decoration(id, index: member, decoration, argument: ops[3]);
502 else
503 ir.set_member_decoration(id, index: member, decoration);
504 break;
505 }
506
507 case OpMemberDecorateStringGOOGLE:
508 {
509 uint32_t id = ops[0];
510 uint32_t member = ops[1];
511 auto decoration = static_cast<Decoration>(ops[2]);
512 ir.set_member_decoration_string(id, index: member, decoration, argument: extract_string(spirv: ir.spirv, offset: instruction.offset + 3));
513 break;
514 }
515
516 // Build up basic types.
517 case OpTypeVoid:
518 {
519 uint32_t id = ops[0];
520 auto &type = set<SPIRType>(id, args&: op);
521 type.basetype = SPIRType::Void;
522 break;
523 }
524
525 case OpTypeBool:
526 {
527 uint32_t id = ops[0];
528 auto &type = set<SPIRType>(id, args&: op);
529 type.basetype = SPIRType::Boolean;
530 type.width = 1;
531 break;
532 }
533
534 case OpTypeFloat:
535 {
536 uint32_t id = ops[0];
537 uint32_t width = ops[1];
538 auto &type = set<SPIRType>(id, args&: op);
539 if (width == 64)
540 type.basetype = SPIRType::Double;
541 else if (width == 32)
542 type.basetype = SPIRType::Float;
543 else if (width == 16)
544 type.basetype = SPIRType::Half;
545 else
546 SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
547 type.width = width;
548 break;
549 }
550
551 case OpTypeInt:
552 {
553 uint32_t id = ops[0];
554 uint32_t width = ops[1];
555 bool signedness = ops[2] != 0;
556 auto &type = set<SPIRType>(id, args&: op);
557 type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
558 type.width = width;
559 break;
560 }
561
562 // Build composite types by "inheriting".
563 // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
564 // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
565 case OpTypeVector:
566 {
567 uint32_t id = ops[0];
568 uint32_t vecsize = ops[2];
569
570 auto &base = get<SPIRType>(id: ops[1]);
571 auto &vecbase = set<SPIRType>(id, args&: base);
572
573 vecbase.op = op;
574 vecbase.vecsize = vecsize;
575 vecbase.self = id;
576 vecbase.parent_type = ops[1];
577 break;
578 }
579
580 case OpTypeMatrix:
581 {
582 uint32_t id = ops[0];
583 uint32_t colcount = ops[2];
584
585 auto &base = get<SPIRType>(id: ops[1]);
586 auto &matrixbase = set<SPIRType>(id, args&: base);
587
588 matrixbase.op = op;
589 matrixbase.columns = colcount;
590 matrixbase.self = id;
591 matrixbase.parent_type = ops[1];
592 break;
593 }
594
595 case OpTypeArray:
596 {
597 uint32_t id = ops[0];
598 uint32_t tid = ops[1];
599 auto &base = get<SPIRType>(id: tid);
600 auto &arraybase = set<SPIRType>(id, args&: base);
601
602 arraybase.op = op;
603 arraybase.parent_type = tid;
604
605 uint32_t cid = ops[2];
606 ir.mark_used_as_array_length(id: cid);
607 auto *c = maybe_get<SPIRConstant>(id: cid);
608 bool literal = c && !c->specialization;
609
610 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
611 // references.
612 if (base.forward_pointer)
613 forward_pointer_fixups.push_back(t: { id, tid });
614
615 arraybase.array_size_literal.push_back(t: literal);
616 arraybase.array.push_back(t: literal ? c->scalar() : cid);
617
618 // .self resolves down to non-array/non-pointer type.
619 arraybase.self = base.self;
620 break;
621 }
622
623 case OpTypeRuntimeArray:
624 {
625 uint32_t id = ops[0];
626
627 auto &base = get<SPIRType>(id: ops[1]);
628 auto &arraybase = set<SPIRType>(id, args&: base);
629
630 // We're copying type information into Array types, so we'll need a fixup for any physical pointer
631 // references.
632 if (base.forward_pointer)
633 forward_pointer_fixups.push_back(t: { id, ops[1] });
634
635 arraybase.op = op;
636 arraybase.array.push_back(t: 0);
637 arraybase.array_size_literal.push_back(t: true);
638 arraybase.parent_type = ops[1];
639
640 // .self resolves down to non-array/non-pointer type.
641 arraybase.self = base.self;
642 break;
643 }
644
645 case OpTypeImage:
646 {
647 uint32_t id = ops[0];
648 auto &type = set<SPIRType>(id, args&: op);
649 type.basetype = SPIRType::Image;
650 type.image.type = ops[1];
651 type.image.dim = static_cast<Dim>(ops[2]);
652 type.image.depth = ops[3] == 1;
653 type.image.arrayed = ops[4] != 0;
654 type.image.ms = ops[5] != 0;
655 type.image.sampled = ops[6];
656 type.image.format = static_cast<ImageFormat>(ops[7]);
657 type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
658 break;
659 }
660
661 case OpTypeSampledImage:
662 {
663 uint32_t id = ops[0];
664 uint32_t imagetype = ops[1];
665 auto &type = set<SPIRType>(id, args&: op);
666 type = get<SPIRType>(id: imagetype);
667 type.basetype = SPIRType::SampledImage;
668 type.self = id;
669 break;
670 }
671
672 case OpTypeSampler:
673 {
674 uint32_t id = ops[0];
675 auto &type = set<SPIRType>(id, args&: op);
676 type.basetype = SPIRType::Sampler;
677 break;
678 }
679
680 case OpTypePointer:
681 {
682 uint32_t id = ops[0];
683
684 // Very rarely, we might receive a FunctionPrototype here.
685 // We won't be able to compile it, but we shouldn't crash when parsing.
686 // We should be able to reflect.
687 auto *base = maybe_get<SPIRType>(id: ops[2]);
688 auto &ptrbase = set<SPIRType>(id, args&: op);
689
690 if (base)
691 {
692 ptrbase = *base;
693 ptrbase.op = op;
694 }
695
696 ptrbase.pointer = true;
697 ptrbase.pointer_depth++;
698 ptrbase.storage = static_cast<StorageClass>(ops[1]);
699
700 if (ptrbase.storage == StorageClassAtomicCounter)
701 ptrbase.basetype = SPIRType::AtomicCounter;
702
703 if (base && base->forward_pointer)
704 forward_pointer_fixups.push_back(t: { id, ops[2] });
705
706 ptrbase.parent_type = ops[2];
707
708 // Do NOT set ptrbase.self!
709 break;
710 }
711
712 case OpTypeForwardPointer:
713 {
714 uint32_t id = ops[0];
715 auto &ptrbase = set<SPIRType>(id, args&: op);
716 ptrbase.pointer = true;
717 ptrbase.pointer_depth++;
718 ptrbase.storage = static_cast<StorageClass>(ops[1]);
719 ptrbase.forward_pointer = true;
720
721 if (ptrbase.storage == StorageClassAtomicCounter)
722 ptrbase.basetype = SPIRType::AtomicCounter;
723
724 break;
725 }
726
727 case OpTypeStruct:
728 {
729 uint32_t id = ops[0];
730 auto &type = set<SPIRType>(id, args&: op);
731 type.basetype = SPIRType::Struct;
732 for (uint32_t i = 1; i < length; i++)
733 type.member_types.push_back(t: ops[i]);
734
735 // Check if we have seen this struct type before, with just different
736 // decorations.
737 //
738 // Add workaround for issue #17 as well by looking at OpName for the struct
739 // types, which we shouldn't normally do.
740 // We should not normally have to consider type aliases like this to begin with
741 // however ... glslang issues #304, #307 cover this.
742
743 // For stripped names, never consider struct type aliasing.
744 // We risk declaring the same struct multiple times, but type-punning is not allowed
745 // so this is safe.
746 bool consider_aliasing = !ir.get_name(id: type.self).empty();
747 if (consider_aliasing)
748 {
749 for (auto &other : global_struct_cache)
750 {
751 if (ir.get_name(id: type.self) == ir.get_name(id: other) &&
752 types_are_logically_equivalent(a: type, b: get<SPIRType>(id: other)))
753 {
754 type.type_alias = other;
755 break;
756 }
757 }
758
759 if (type.type_alias == TypeID(0))
760 global_struct_cache.push_back(t: id);
761 }
762 break;
763 }
764
765 case OpTypeFunction:
766 {
767 uint32_t id = ops[0];
768 uint32_t ret = ops[1];
769
770 auto &func = set<SPIRFunctionPrototype>(id, args&: ret);
771 for (uint32_t i = 2; i < length; i++)
772 func.parameter_types.push_back(t: ops[i]);
773 break;
774 }
775
776 case OpTypeAccelerationStructureKHR:
777 {
778 uint32_t id = ops[0];
779 auto &type = set<SPIRType>(id, args&: op);
780 type.basetype = SPIRType::AccelerationStructure;
781 break;
782 }
783
784 case OpTypeRayQueryKHR:
785 {
786 uint32_t id = ops[0];
787 auto &type = set<SPIRType>(id, args&: op);
788 type.basetype = SPIRType::RayQuery;
789 break;
790 }
791
792 // Variable declaration
793 // All variables are essentially pointers with a storage qualifier.
794 case OpVariable:
795 {
796 uint32_t type = ops[0];
797 uint32_t id = ops[1];
798 auto storage = static_cast<StorageClass>(ops[2]);
799 uint32_t initializer = length == 4 ? ops[3] : 0;
800
801 if (storage == StorageClassFunction)
802 {
803 if (!current_function)
804 SPIRV_CROSS_THROW("No function currently in scope");
805 current_function->add_local_variable(id);
806 }
807
808 set<SPIRVariable>(id, args&: type, args&: storage, args&: initializer);
809 break;
810 }
811
812 // OpPhi
813 // OpPhi is a fairly magical opcode.
814 // It selects temporary variables based on which parent block we *came from*.
815 // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
816 // variable to emulate SSA Phi.
817 case OpPhi:
818 {
819 if (!current_function)
820 SPIRV_CROSS_THROW("No function currently in scope");
821 if (!current_block)
822 SPIRV_CROSS_THROW("No block currently in scope");
823
824 uint32_t result_type = ops[0];
825 uint32_t id = ops[1];
826
827 // Instead of a temporary, create a new function-wide temporary with this ID instead.
828 auto &var = set<SPIRVariable>(id, args&: result_type, args: spv::StorageClassFunction);
829 var.phi_variable = true;
830
831 current_function->add_local_variable(id);
832
833 for (uint32_t i = 2; i + 2 <= length; i += 2)
834 current_block->phi_variables.push_back(t: { .local_variable: ops[i], .parent: ops[i + 1], .function_variable: id });
835 break;
836 }
837
838 // Constants
839 case OpSpecConstant:
840 case OpConstant:
841 {
842 uint32_t id = ops[1];
843 auto &type = get<SPIRType>(id: ops[0]);
844
845 if (type.width > 32)
846 set<SPIRConstant>(id, args: ops[0], args: ops[2] | (uint64_t(ops[3]) << 32), args: op == OpSpecConstant);
847 else
848 set<SPIRConstant>(id, args: ops[0], args: ops[2], args: op == OpSpecConstant);
849 break;
850 }
851
852 case OpSpecConstantFalse:
853 case OpConstantFalse:
854 {
855 uint32_t id = ops[1];
856 set<SPIRConstant>(id, args: ops[0], args: uint32_t(0), args: op == OpSpecConstantFalse);
857 break;
858 }
859
860 case OpSpecConstantTrue:
861 case OpConstantTrue:
862 {
863 uint32_t id = ops[1];
864 set<SPIRConstant>(id, args: ops[0], args: uint32_t(1), args: op == OpSpecConstantTrue);
865 break;
866 }
867
868 case OpConstantNull:
869 {
870 uint32_t id = ops[1];
871 uint32_t type = ops[0];
872 ir.make_constant_null(id, type, add_to_typed_id_set: true);
873 break;
874 }
875
876 case OpSpecConstantComposite:
877 case OpConstantComposite:
878 {
879 uint32_t id = ops[1];
880 uint32_t type = ops[0];
881
882 auto &ctype = get<SPIRType>(id: type);
883
884 // We can have constants which are structs and arrays.
885 // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
886 // can refer to.
887 if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
888 {
889 set<SPIRConstant>(id, args&: type, args: ops + 2, args: length - 2, args: op == OpSpecConstantComposite);
890 }
891 else
892 {
893 uint32_t elements = length - 2;
894 if (elements > 4)
895 SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
896
897 SPIRConstant remapped_constant_ops[4];
898 const SPIRConstant *c[4];
899 for (uint32_t i = 0; i < elements; i++)
900 {
901 // Specialization constants operations can also be part of this.
902 // We do not know their value, so any attempt to query SPIRConstant later
903 // will fail. We can only propagate the ID of the expression and use to_expression on it.
904 auto *constant_op = maybe_get<SPIRConstantOp>(id: ops[2 + i]);
905 auto *undef_op = maybe_get<SPIRUndef>(id: ops[2 + i]);
906 if (constant_op)
907 {
908 if (op == OpConstantComposite)
909 SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
910
911 remapped_constant_ops[i].make_null(constant_type_: get<SPIRType>(id: constant_op->basetype));
912 remapped_constant_ops[i].self = constant_op->self;
913 remapped_constant_ops[i].constant_type = constant_op->basetype;
914 remapped_constant_ops[i].specialization = true;
915 c[i] = &remapped_constant_ops[i];
916 }
917 else if (undef_op)
918 {
919 // Undefined, just pick 0.
920 remapped_constant_ops[i].make_null(constant_type_: get<SPIRType>(id: undef_op->basetype));
921 remapped_constant_ops[i].constant_type = undef_op->basetype;
922 c[i] = &remapped_constant_ops[i];
923 }
924 else
925 c[i] = &get<SPIRConstant>(id: ops[2 + i]);
926 }
927 set<SPIRConstant>(id, args&: type, args&: c, args&: elements, args: op == OpSpecConstantComposite);
928 }
929 break;
930 }
931
932 // Functions
933 case OpFunction:
934 {
935 uint32_t res = ops[0];
936 uint32_t id = ops[1];
937 // Control
938 uint32_t type = ops[3];
939
940 if (current_function)
941 SPIRV_CROSS_THROW("Must end a function before starting a new one!");
942
943 current_function = &set<SPIRFunction>(id, args&: res, args&: type);
944 break;
945 }
946
947 case OpFunctionParameter:
948 {
949 uint32_t type = ops[0];
950 uint32_t id = ops[1];
951
952 if (!current_function)
953 SPIRV_CROSS_THROW("Must be in a function!");
954
955 current_function->add_parameter(parameter_type: type, id);
956 set<SPIRVariable>(id, args&: type, args: StorageClassFunction);
957 break;
958 }
959
960 case OpFunctionEnd:
961 {
962 if (current_block)
963 {
964 // Very specific error message, but seems to come up quite often.
965 SPIRV_CROSS_THROW(
966 "Cannot end a function before ending the current block.\n"
967 "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
968 }
969 current_function = nullptr;
970 break;
971 }
972
973 // Blocks
974 case OpLabel:
975 {
976 // OpLabel always starts a block.
977 if (!current_function)
978 SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
979
980 uint32_t id = ops[0];
981
982 current_function->blocks.push_back(t: id);
983 if (!current_function->entry_block)
984 current_function->entry_block = id;
985
986 if (current_block)
987 SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
988
989 current_block = &set<SPIRBlock>(id);
990 break;
991 }
992
993 // Branch instructions end blocks.
994 case OpBranch:
995 {
996 if (!current_block)
997 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
998
999 uint32_t target = ops[0];
1000 current_block->terminator = SPIRBlock::Direct;
1001 current_block->next_block = target;
1002 current_block = nullptr;
1003 break;
1004 }
1005
1006 case OpBranchConditional:
1007 {
1008 if (!current_block)
1009 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1010
1011 current_block->condition = ops[0];
1012 current_block->true_block = ops[1];
1013 current_block->false_block = ops[2];
1014
1015 current_block->terminator = SPIRBlock::Select;
1016
1017 if (current_block->true_block == current_block->false_block)
1018 {
1019 // Bogus conditional, translate to a direct branch.
1020 // Avoids some ugly edge cases later when analyzing CFGs.
1021
1022 // There are some super jank cases where the merge block is different from the true/false,
1023 // and later branches can "break" out of the selection construct this way.
1024 // This is complete nonsense, but CTS hits this case.
1025 // In this scenario, we should see the selection construct as more of a Switch with one default case.
1026 // The problem here is that this breaks any attempt to break out of outer switch statements,
1027 // but it's theoretically solvable if this ever comes up using the ladder breaking system ...
1028
1029 if (current_block->true_block != current_block->next_block &&
1030 current_block->merge == SPIRBlock::MergeSelection)
1031 {
1032 uint32_t ids = ir.increase_bound_by(count: 2);
1033
1034 auto &type = set<SPIRType>(id: ids, args: OpTypeInt);
1035 type.basetype = SPIRType::Int;
1036 type.width = 32;
1037 auto &c = set<SPIRConstant>(id: ids + 1, args&: ids);
1038
1039 current_block->condition = c.self;
1040 current_block->default_block = current_block->true_block;
1041 current_block->terminator = SPIRBlock::MultiSelect;
1042 ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1043 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
1044 }
1045 else
1046 {
1047 // Collapse loops if we have to.
1048 bool collapsed_loop = current_block->true_block == current_block->merge_block &&
1049 current_block->merge == SPIRBlock::MergeLoop;
1050
1051 if (collapsed_loop)
1052 {
1053 ir.block_meta[current_block->merge_block] &= ~ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1054 ir.block_meta[current_block->continue_block] &= ~ParsedIR::BLOCK_META_CONTINUE_BIT;
1055 }
1056
1057 current_block->next_block = current_block->true_block;
1058 current_block->condition = 0;
1059 current_block->true_block = 0;
1060 current_block->false_block = 0;
1061 current_block->merge_block = 0;
1062 current_block->merge = SPIRBlock::MergeNone;
1063 current_block->terminator = SPIRBlock::Direct;
1064 }
1065 }
1066
1067 current_block = nullptr;
1068 break;
1069 }
1070
1071 case OpSwitch:
1072 {
1073 if (!current_block)
1074 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1075
1076 current_block->terminator = SPIRBlock::MultiSelect;
1077
1078 current_block->condition = ops[0];
1079 current_block->default_block = ops[1];
1080
1081 uint32_t remaining_ops = length - 2;
1082 if ((remaining_ops % 2) == 0)
1083 {
1084 for (uint32_t i = 2; i + 2 <= length; i += 2)
1085 current_block->cases_32bit.push_back(t: { .value: ops[i], .block: ops[i + 1] });
1086 }
1087
1088 if ((remaining_ops % 3) == 0)
1089 {
1090 for (uint32_t i = 2; i + 3 <= length; i += 3)
1091 {
1092 uint64_t value = (static_cast<uint64_t>(ops[i + 1]) << 32) | ops[i];
1093 current_block->cases_64bit.push_back(t: { .value: value, .block: ops[i + 2] });
1094 }
1095 }
1096
1097 // If we jump to next block, make it break instead since we're inside a switch case block at that point.
1098 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
1099
1100 current_block = nullptr;
1101 break;
1102 }
1103
1104 case OpKill:
1105 case OpTerminateInvocation:
1106 {
1107 if (!current_block)
1108 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1109 current_block->terminator = SPIRBlock::Kill;
1110 current_block = nullptr;
1111 break;
1112 }
1113
1114 case OpTerminateRayKHR:
1115 // NV variant is not a terminator.
1116 if (!current_block)
1117 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1118 current_block->terminator = SPIRBlock::TerminateRay;
1119 current_block = nullptr;
1120 break;
1121
1122 case OpIgnoreIntersectionKHR:
1123 // NV variant is not a terminator.
1124 if (!current_block)
1125 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1126 current_block->terminator = SPIRBlock::IgnoreIntersection;
1127 current_block = nullptr;
1128 break;
1129
1130 case OpEmitMeshTasksEXT:
1131 if (!current_block)
1132 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1133 current_block->terminator = SPIRBlock::EmitMeshTasks;
1134 for (uint32_t i = 0; i < 3; i++)
1135 current_block->mesh.groups[i] = ops[i];
1136 current_block->mesh.payload = length >= 4 ? ops[3] : 0;
1137 current_block = nullptr;
1138 // Currently glslang is bugged and does not treat EmitMeshTasksEXT as a terminator.
1139 ignore_trailing_block_opcodes = true;
1140 break;
1141
1142 case OpReturn:
1143 {
1144 if (!current_block)
1145 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1146 current_block->terminator = SPIRBlock::Return;
1147 current_block = nullptr;
1148 break;
1149 }
1150
1151 case OpReturnValue:
1152 {
1153 if (!current_block)
1154 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1155 current_block->terminator = SPIRBlock::Return;
1156 current_block->return_value = ops[0];
1157 current_block = nullptr;
1158 break;
1159 }
1160
1161 case OpUnreachable:
1162 {
1163 if (!current_block)
1164 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1165 current_block->terminator = SPIRBlock::Unreachable;
1166 current_block = nullptr;
1167 break;
1168 }
1169
1170 case OpSelectionMerge:
1171 {
1172 if (!current_block)
1173 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1174
1175 current_block->next_block = ops[0];
1176 current_block->merge = SPIRBlock::MergeSelection;
1177 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1178
1179 if (length >= 2)
1180 {
1181 if (ops[1] & SelectionControlFlattenMask)
1182 current_block->hint = SPIRBlock::HintFlatten;
1183 else if (ops[1] & SelectionControlDontFlattenMask)
1184 current_block->hint = SPIRBlock::HintDontFlatten;
1185 }
1186 break;
1187 }
1188
1189 case OpLoopMerge:
1190 {
1191 if (!current_block)
1192 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1193
1194 current_block->merge_block = ops[0];
1195 current_block->continue_block = ops[1];
1196 current_block->merge = SPIRBlock::MergeLoop;
1197
1198 ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
1199 ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1200
1201 ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
1202
1203 // Don't add loop headers to continue blocks,
1204 // which would make it impossible branch into the loop header since
1205 // they are treated as continues.
1206 if (current_block->continue_block != BlockID(current_block->self))
1207 ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
1208
1209 if (length >= 3)
1210 {
1211 if (ops[2] & LoopControlUnrollMask)
1212 current_block->hint = SPIRBlock::HintUnroll;
1213 else if (ops[2] & LoopControlDontUnrollMask)
1214 current_block->hint = SPIRBlock::HintDontUnroll;
1215 }
1216 break;
1217 }
1218
1219 case OpSpecConstantOp:
1220 {
1221 if (length < 3)
1222 SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1223
1224 uint32_t result_type = ops[0];
1225 uint32_t id = ops[1];
1226 auto spec_op = static_cast<Op>(ops[2]);
1227
1228 set<SPIRConstantOp>(id, args&: result_type, args&: spec_op, args: ops + 3, args: length - 3);
1229 break;
1230 }
1231
1232 case OpLine:
1233 {
1234 // OpLine might come at global scope, but we don't care about those since they will not be declared in any
1235 // meaningful correct order.
1236 // Ignore all OpLine directives which live outside a function.
1237 if (current_block)
1238 current_block->ops.push_back(t: instruction);
1239
1240 // Line directives may arrive before first OpLabel.
1241 // Treat this as the line of the function declaration,
1242 // so warnings for arguments can propagate properly.
1243 if (current_function)
1244 {
1245 // Store the first one we find and emit it before creating the function prototype.
1246 if (current_function->entry_line.file_id == 0)
1247 {
1248 current_function->entry_line.file_id = ops[0];
1249 current_function->entry_line.line_literal = ops[1];
1250 }
1251 }
1252 break;
1253 }
1254
1255 case OpNoLine:
1256 {
1257 // OpNoLine might come at global scope.
1258 if (current_block)
1259 current_block->ops.push_back(t: instruction);
1260 break;
1261 }
1262
1263 // Actual opcodes.
1264 default:
1265 {
1266 if (length >= 2)
1267 {
1268 const auto *type = maybe_get<SPIRType>(id: ops[0]);
1269 if (type)
1270 ir.load_type_width.insert(x: { ops[1], type->width });
1271 }
1272
1273 if (!current_block)
1274 SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1275
1276 current_block->ops.push_back(t: instruction);
1277 break;
1278 }
1279 }
1280}
1281
1282bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1283{
1284 if (a.basetype != b.basetype)
1285 return false;
1286 if (a.width != b.width)
1287 return false;
1288 if (a.vecsize != b.vecsize)
1289 return false;
1290 if (a.columns != b.columns)
1291 return false;
1292 if (a.array.size() != b.array.size())
1293 return false;
1294
1295 size_t array_count = a.array.size();
1296 if (array_count && memcmp(s1: a.array.data(), s2: b.array.data(), n: array_count * sizeof(uint32_t)) != 0)
1297 return false;
1298
1299 if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1300 {
1301 if (memcmp(s1: &a.image, s2: &b.image, n: sizeof(SPIRType::Image)) != 0)
1302 return false;
1303 }
1304
1305 if (a.member_types.size() != b.member_types.size())
1306 return false;
1307
1308 size_t member_types = a.member_types.size();
1309 for (size_t i = 0; i < member_types; i++)
1310 {
1311 if (!types_are_logically_equivalent(a: get<SPIRType>(id: a.member_types[i]), b: get<SPIRType>(id: b.member_types[i])))
1312 return false;
1313 }
1314
1315 return true;
1316}
1317
1318bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1319{
1320 auto &type = get<SPIRType>(id: v.basetype);
1321
1322 auto *type_meta = ir.find_meta(id: type.self);
1323
1324 bool ssbo = v.storage == StorageClassStorageBuffer ||
1325 (type_meta && type_meta->decoration.decoration_flags.get(bit: DecorationBufferBlock));
1326 bool image = type.basetype == SPIRType::Image;
1327 bool counter = type.basetype == SPIRType::AtomicCounter;
1328
1329 bool is_restrict;
1330 if (ssbo)
1331 is_restrict = ir.get_buffer_block_flags(var: v).get(bit: DecorationRestrict);
1332 else
1333 is_restrict = ir.has_decoration(id: v.self, decoration: DecorationRestrict);
1334
1335 return !is_restrict && (ssbo || image || counter);
1336}
1337} // namespace SPIRV_CROSS_NAMESPACE
1338

Provided by KDAB

Privacy Policy
Start learning QML with our Intro Training
Find out more

source code of qtshadertools/src/3rdparty/SPIRV-Cross/spirv_parser.cpp