1 | /* |
2 | * Copyright 2015-2021 Arm Limited |
3 | * SPDX-License-Identifier: Apache-2.0 OR MIT |
4 | * |
5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
6 | * you may not use this file except in compliance with the License. |
7 | * You may obtain a copy of the License at |
8 | * |
9 | * http://www.apache.org/licenses/LICENSE-2.0 |
10 | * |
11 | * Unless required by applicable law or agreed to in writing, software |
12 | * distributed under the License is distributed on an "AS IS" BASIS, |
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
14 | * See the License for the specific language governing permissions and |
15 | * limitations under the License. |
16 | */ |
17 | |
18 | /* |
19 | * At your option, you may choose to accept this material under either: |
20 | * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or |
21 | * 2. The MIT License, found at <http://opensource.org/licenses/MIT>. |
22 | */ |
23 | |
24 | #ifndef SPIRV_CROSS_GLSL_HPP |
25 | #define SPIRV_CROSS_GLSL_HPP |
26 | |
27 | #include "GLSL.std.450.h" |
28 | #include "spirv_cross.hpp" |
29 | #include <unordered_map> |
30 | #include <unordered_set> |
31 | #include <utility> |
32 | |
33 | namespace SPIRV_CROSS_NAMESPACE |
34 | { |
35 | enum PlsFormat |
36 | { |
37 | PlsNone = 0, |
38 | |
39 | PlsR11FG11FB10F, |
40 | PlsR32F, |
41 | PlsRG16F, |
42 | PlsRGB10A2, |
43 | PlsRGBA8, |
44 | PlsRG16, |
45 | |
46 | PlsRGBA8I, |
47 | PlsRG16I, |
48 | |
49 | PlsRGB10A2UI, |
50 | PlsRGBA8UI, |
51 | PlsRG16UI, |
52 | PlsR32UI |
53 | }; |
54 | |
55 | struct PlsRemap |
56 | { |
57 | uint32_t id; |
58 | PlsFormat format; |
59 | }; |
60 | |
61 | enum AccessChainFlagBits |
62 | { |
63 | ACCESS_CHAIN_INDEX_IS_LITERAL_BIT = 1 << 0, |
64 | ACCESS_CHAIN_CHAIN_ONLY_BIT = 1 << 1, |
65 | ACCESS_CHAIN_PTR_CHAIN_BIT = 1 << 2, |
66 | ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT = 1 << 3, |
67 | ACCESS_CHAIN_LITERAL_MSB_FORCE_ID = 1 << 4, |
68 | ACCESS_CHAIN_FLATTEN_ALL_MEMBERS_BIT = 1 << 5, |
69 | ACCESS_CHAIN_FORCE_COMPOSITE_BIT = 1 << 6 |
70 | }; |
71 | typedef uint32_t AccessChainFlags; |
72 | |
73 | class CompilerGLSL : public Compiler |
74 | { |
75 | public: |
76 | struct Options |
77 | { |
78 | // The shading language version. Corresponds to #version $VALUE. |
79 | uint32_t version = 450; |
80 | |
81 | // Emit the OpenGL ES shading language instead of desktop OpenGL. |
82 | bool es = false; |
83 | |
84 | // Debug option to always emit temporary variables for all expressions. |
85 | bool force_temporary = false; |
86 | // Debug option, can be increased in an attempt to workaround SPIRV-Cross bugs temporarily. |
87 | // If this limit has to be increased, it points to an implementation bug. |
88 | // In certain scenarios, the maximum number of debug iterations may increase beyond this limit |
89 | // as long as we can prove we're making certain kinds of forward progress. |
90 | uint32_t force_recompile_max_debug_iterations = 3; |
91 | |
92 | // If true, Vulkan GLSL features are used instead of GL-compatible features. |
93 | // Mostly useful for debugging SPIR-V files. |
94 | bool vulkan_semantics = false; |
95 | |
96 | // If true, gl_PerVertex is explicitly redeclared in vertex, geometry and tessellation shaders. |
97 | // The members of gl_PerVertex is determined by which built-ins are declared by the shader. |
98 | // This option is ignored in ES versions, as redeclaration in ES is not required, and it depends on a different extension |
99 | // (EXT_shader_io_blocks) which makes things a bit more fuzzy. |
100 | bool separate_shader_objects = false; |
101 | |
102 | // Flattens multidimensional arrays, e.g. float foo[a][b][c] into single-dimensional arrays, |
103 | // e.g. float foo[a * b * c]. |
104 | // This function does not change the actual SPIRType of any object. |
105 | // Only the generated code, including declarations of interface variables are changed to be single array dimension. |
106 | bool flatten_multidimensional_arrays = false; |
107 | |
108 | // For older desktop GLSL targets than version 420, the |
109 | // GL_ARB_shading_language_420pack extensions is used to be able to support |
110 | // layout(binding) on UBOs and samplers. |
111 | // If disabled on older targets, binding decorations will be stripped. |
112 | bool enable_420pack_extension = true; |
113 | |
114 | // In non-Vulkan GLSL, emit push constant blocks as UBOs rather than plain uniforms. |
115 | bool emit_push_constant_as_uniform_buffer = false; |
116 | |
117 | // Always emit uniform blocks as plain uniforms, regardless of the GLSL version, even when UBOs are supported. |
118 | // Does not apply to shader storage or push constant blocks. |
119 | bool emit_uniform_buffer_as_plain_uniforms = false; |
120 | |
121 | // Emit OpLine directives if present in the module. |
122 | // May not correspond exactly to original source, but should be a good approximation. |
123 | bool emit_line_directives = false; |
124 | |
125 | // In cases where readonly/writeonly decoration are not used at all, |
126 | // we try to deduce which qualifier(s) we should actually used, since actually emitting |
127 | // read-write decoration is very rare, and older glslang/HLSL compilers tend to just emit readwrite as a matter of fact. |
128 | // The default (true) is to enable automatic deduction for these cases, but if you trust the decorations set |
129 | // by the SPIR-V, it's recommended to set this to false. |
130 | bool enable_storage_image_qualifier_deduction = true; |
131 | |
132 | // On some targets (WebGPU), uninitialized variables are banned. |
133 | // If this is enabled, all variables (temporaries, Private, Function) |
134 | // which would otherwise be uninitialized will now be initialized to 0 instead. |
135 | bool force_zero_initialized_variables = false; |
136 | |
137 | // In GLSL, force use of I/O block flattening, similar to |
138 | // what happens on legacy GLSL targets for blocks and structs. |
139 | bool force_flattened_io_blocks = false; |
140 | |
141 | // For opcodes where we have to perform explicit additional nan checks, very ugly code is generated. |
142 | // If we opt-in, ignore these requirements. |
143 | // In opcodes like NClamp/NMin/NMax and FP compare, ignore NaN behavior. |
144 | // Use FClamp/FMin/FMax semantics for clamps and lets implementation choose ordered or unordered |
145 | // compares. |
146 | bool relax_nan_checks = false; |
147 | |
148 | // If non-zero, controls layout(num_views = N) in; in GL_OVR_multiview2. |
149 | uint32_t ovr_multiview_view_count = 0; |
150 | |
151 | enum Precision |
152 | { |
153 | DontCare, |
154 | Lowp, |
155 | Mediump, |
156 | Highp |
157 | }; |
158 | |
159 | struct VertexOptions |
160 | { |
161 | // "Vertex-like shader" here is any shader stage that can write BuiltInPosition. |
162 | |
163 | // GLSL: In vertex-like shaders, rewrite [0, w] depth (Vulkan/D3D style) to [-w, w] depth (GL style). |
164 | // MSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth. |
165 | // HLSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth. |
166 | bool fixup_clipspace = false; |
167 | |
168 | // In vertex-like shaders, inverts gl_Position.y or equivalent. |
169 | bool flip_vert_y = false; |
170 | |
171 | // GLSL only, for HLSL version of this option, see CompilerHLSL. |
172 | // If true, the backend will assume that InstanceIndex will need to apply |
173 | // a base instance offset. Set to false if you know you will never use base instance |
174 | // functionality as it might remove some internal uniforms. |
175 | bool support_nonzero_base_instance = true; |
176 | } vertex; |
177 | |
178 | struct FragmentOptions |
179 | { |
180 | // Add precision mediump float in ES targets when emitting GLES source. |
181 | // Add precision highp int in ES targets when emitting GLES source. |
182 | Precision default_float_precision = Mediump; |
183 | Precision default_int_precision = Highp; |
184 | } fragment; |
185 | }; |
186 | |
187 | void remap_pixel_local_storage(std::vector<PlsRemap> inputs, std::vector<PlsRemap> outputs) |
188 | { |
189 | pls_inputs = std::move(inputs); |
190 | pls_outputs = std::move(outputs); |
191 | remap_pls_variables(); |
192 | } |
193 | |
194 | // Redirect a subpassInput reading from input_attachment_index to instead load its value from |
195 | // the color attachment at location = color_location. Requires ESSL. |
196 | // If coherent, uses GL_EXT_shader_framebuffer_fetch, if not, uses noncoherent variant. |
197 | void remap_ext_framebuffer_fetch(uint32_t input_attachment_index, uint32_t color_location, bool coherent); |
198 | |
199 | explicit CompilerGLSL(std::vector<uint32_t> spirv_) |
200 | : Compiler(std::move(spirv_)) |
201 | { |
202 | init(); |
203 | } |
204 | |
205 | CompilerGLSL(const uint32_t *ir_, size_t word_count) |
206 | : Compiler(ir_, word_count) |
207 | { |
208 | init(); |
209 | } |
210 | |
211 | explicit CompilerGLSL(const ParsedIR &ir_) |
212 | : Compiler(ir_) |
213 | { |
214 | init(); |
215 | } |
216 | |
217 | explicit CompilerGLSL(ParsedIR &&ir_) |
218 | : Compiler(std::move(ir_)) |
219 | { |
220 | init(); |
221 | } |
222 | |
223 | const Options &get_common_options() const |
224 | { |
225 | return options; |
226 | } |
227 | |
228 | void set_common_options(const Options &opts) |
229 | { |
230 | options = opts; |
231 | } |
232 | |
233 | std::string compile() override; |
234 | |
235 | // Returns the current string held in the conversion buffer. Useful for |
236 | // capturing what has been converted so far when compile() throws an error. |
237 | std::string get_partial_source(); |
238 | |
239 | // Adds a line to be added right after #version in GLSL backend. |
240 | // This is useful for enabling custom extensions which are outside the scope of SPIRV-Cross. |
241 | // This can be combined with variable remapping. |
242 | // A new-line will be added. |
243 | // |
244 | // While add_header_line() is a more generic way of adding arbitrary text to the header |
245 | // of a GLSL file, require_extension() should be used when adding extensions since it will |
246 | // avoid creating collisions with SPIRV-Cross generated extensions. |
247 | // |
248 | // Code added via add_header_line() is typically backend-specific. |
249 | void (const std::string &str); |
250 | |
251 | // Adds an extension which is required to run this shader, e.g. |
252 | // require_extension("GL_KHR_my_extension"); |
253 | void require_extension(const std::string &ext); |
254 | |
255 | // Legacy GLSL compatibility method. |
256 | // Takes a uniform or push constant variable and flattens it into a (i|u)vec4 array[N]; array instead. |
257 | // For this to work, all types in the block must be the same basic type, e.g. mixing vec2 and vec4 is fine, but |
258 | // mixing int and float is not. |
259 | // The name of the uniform array will be the same as the interface block name. |
260 | void flatten_buffer_block(VariableID id); |
261 | |
262 | // After compilation, query if a variable ID was used as a depth resource. |
263 | // This is meaningful for MSL since descriptor types depend on this knowledge. |
264 | // Cases which return true: |
265 | // - Images which are declared with depth = 1 image type. |
266 | // - Samplers which are statically used at least once with Dref opcodes. |
267 | // - Images which are statically used at least once with Dref opcodes. |
268 | bool variable_is_depth_or_compare(VariableID id) const; |
269 | |
270 | // If a shader output is active in this stage, but inactive in a subsequent stage, |
271 | // this can be signalled here. This can be used to work around certain cross-stage matching problems |
272 | // which plagues MSL and HLSL in certain scenarios. |
273 | // An output which matches one of these will not be emitted in stage output interfaces, but rather treated as a private |
274 | // variable. |
275 | // This option is only meaningful for MSL and HLSL, since GLSL matches by location directly. |
276 | // Masking builtins only takes effect if the builtin in question is part of the stage output interface. |
277 | void mask_stage_output_by_location(uint32_t location, uint32_t component); |
278 | void mask_stage_output_by_builtin(spv::BuiltIn builtin); |
279 | |
280 | protected: |
281 | struct ShaderSubgroupSupportHelper |
282 | { |
283 | // lower enum value = greater priority |
284 | enum Candidate |
285 | { |
286 | KHR_shader_subgroup_ballot, |
287 | KHR_shader_subgroup_basic, |
288 | KHR_shader_subgroup_vote, |
289 | NV_gpu_shader_5, |
290 | NV_shader_thread_group, |
291 | NV_shader_thread_shuffle, |
292 | ARB_shader_ballot, |
293 | ARB_shader_group_vote, |
294 | AMD_gcn_shader, |
295 | |
296 | CandidateCount |
297 | }; |
298 | |
299 | static const char *get_extension_name(Candidate c); |
300 | static SmallVector<std::string> get_extra_required_extension_names(Candidate c); |
301 | static const char *get_extra_required_extension_predicate(Candidate c); |
302 | |
303 | enum Feature |
304 | { |
305 | SubgroupMask = 0, |
306 | SubgroupSize = 1, |
307 | SubgroupInvocationID = 2, |
308 | SubgroupID = 3, |
309 | NumSubgroups = 4, |
310 | SubgroupBroadcast_First = 5, |
311 | SubgroupBallotFindLSB_MSB = 6, |
312 | SubgroupAll_Any_AllEqualBool = 7, |
313 | SubgroupAllEqualT = 8, |
314 | SubgroupElect = 9, |
315 | SubgroupBarrier = 10, |
316 | SubgroupMemBarrier = 11, |
317 | SubgroupBallot = 12, |
318 | SubgroupInverseBallot_InclBitCount_ExclBitCout = 13, |
319 | = 14, |
320 | SubgroupBallotBitCount = 15, |
321 | |
322 | FeatureCount |
323 | }; |
324 | |
325 | using FeatureMask = uint32_t; |
326 | static_assert(sizeof(FeatureMask) * 8u >= FeatureCount, "Mask type needs more bits." ); |
327 | |
328 | using CandidateVector = SmallVector<Candidate, CandidateCount>; |
329 | using FeatureVector = SmallVector<Feature>; |
330 | |
331 | static FeatureVector get_feature_dependencies(Feature feature); |
332 | static FeatureMask get_feature_dependency_mask(Feature feature); |
333 | static bool can_feature_be_implemented_without_extensions(Feature feature); |
334 | static Candidate get_KHR_extension_for_feature(Feature feature); |
335 | |
336 | struct Result |
337 | { |
338 | Result(); |
339 | uint32_t weights[CandidateCount]; |
340 | }; |
341 | |
342 | void request_feature(Feature feature); |
343 | bool is_feature_requested(Feature feature) const; |
344 | Result resolve() const; |
345 | |
346 | static CandidateVector get_candidates_for_feature(Feature ft, const Result &r); |
347 | |
348 | private: |
349 | static CandidateVector get_candidates_for_feature(Feature ft); |
350 | static FeatureMask build_mask(const SmallVector<Feature> &features); |
351 | FeatureMask feature_mask = 0; |
352 | }; |
353 | |
354 | // TODO remove this function when all subgroup ops are supported (or make it always return true) |
355 | static bool is_supported_subgroup_op_in_opengl(spv::Op op); |
356 | |
357 | void reset(uint32_t iteration_count); |
358 | void emit_function(SPIRFunction &func, const Bitset &return_flags); |
359 | |
360 | bool has_extension(const std::string &ext) const; |
361 | void require_extension_internal(const std::string &ext); |
362 | |
363 | // Virtualize methods which need to be overridden by subclass targets like C++ and such. |
364 | virtual void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags); |
365 | |
366 | SPIRBlock *current_emitting_block = nullptr; |
367 | SPIRBlock *current_emitting_switch = nullptr; |
368 | bool current_emitting_switch_fallthrough = false; |
369 | |
370 | virtual void emit_instruction(const Instruction &instr); |
371 | struct TemporaryCopy |
372 | { |
373 | uint32_t dst_id; |
374 | uint32_t src_id; |
375 | }; |
376 | TemporaryCopy handle_instruction_precision(const Instruction &instr); |
377 | void emit_block_instructions(SPIRBlock &block); |
378 | |
379 | // For relax_nan_checks. |
380 | GLSLstd450 get_remapped_glsl_op(GLSLstd450 std450_op) const; |
381 | spv::Op get_remapped_spirv_op(spv::Op op) const; |
382 | |
383 | virtual void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, |
384 | uint32_t count); |
385 | virtual void emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t result_id, uint32_t op, |
386 | const uint32_t *args, uint32_t count); |
387 | virtual void emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t result_id, uint32_t op, |
388 | const uint32_t *args, uint32_t count); |
389 | virtual void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op, |
390 | const uint32_t *args, uint32_t count); |
391 | virtual void emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, |
392 | uint32_t count); |
393 | virtual void (); |
394 | void emit_line_directive(uint32_t file_id, uint32_t line_literal); |
395 | void build_workgroup_size(SmallVector<std::string> &arguments, const SpecializationConstant &x, |
396 | const SpecializationConstant &y, const SpecializationConstant &z); |
397 | |
398 | void request_subgroup_feature(ShaderSubgroupSupportHelper::Feature feature); |
399 | |
400 | virtual void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id); |
401 | virtual void emit_texture_op(const Instruction &i, bool sparse); |
402 | virtual std::string to_texture_op(const Instruction &i, bool sparse, bool *forward, |
403 | SmallVector<uint32_t> &inherited_expressions); |
404 | virtual void emit_subgroup_op(const Instruction &i); |
405 | virtual std::string type_to_glsl(const SPIRType &type, uint32_t id = 0); |
406 | virtual std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage); |
407 | virtual void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, |
408 | const std::string &qualifier = "" , uint32_t base_offset = 0); |
409 | virtual void emit_struct_padding_target(const SPIRType &type); |
410 | virtual std::string image_type_glsl(const SPIRType &type, uint32_t id = 0); |
411 | std::string constant_expression(const SPIRConstant &c, bool inside_block_like_struct_scope = false); |
412 | virtual std::string constant_op_expression(const SPIRConstantOp &cop); |
413 | virtual std::string constant_expression_vector(const SPIRConstant &c, uint32_t vector); |
414 | virtual void emit_fixup(); |
415 | virtual std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0); |
416 | virtual bool variable_decl_is_remapped_storage(const SPIRVariable &var, spv::StorageClass storage) const; |
417 | virtual std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id); |
418 | |
419 | struct TextureFunctionBaseArguments |
420 | { |
421 | // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor. |
422 | TextureFunctionBaseArguments() = default; |
423 | VariableID img = 0; |
424 | const SPIRType *imgtype = nullptr; |
425 | bool is_fetch = false, is_gather = false, is_proj = false; |
426 | }; |
427 | |
428 | struct TextureFunctionNameArguments |
429 | { |
430 | // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor. |
431 | TextureFunctionNameArguments() = default; |
432 | TextureFunctionBaseArguments base; |
433 | bool has_array_offsets = false, has_offset = false, has_grad = false; |
434 | bool has_dref = false, is_sparse_feedback = false, has_min_lod = false; |
435 | uint32_t lod = 0; |
436 | }; |
437 | virtual std::string to_function_name(const TextureFunctionNameArguments &args); |
438 | |
439 | struct TextureFunctionArguments |
440 | { |
441 | // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor. |
442 | TextureFunctionArguments() = default; |
443 | TextureFunctionBaseArguments base; |
444 | uint32_t coord = 0, coord_components = 0, dref = 0; |
445 | uint32_t grad_x = 0, grad_y = 0, lod = 0, coffset = 0, offset = 0; |
446 | uint32_t bias = 0, component = 0, sample = 0, sparse_texel = 0, min_lod = 0; |
447 | bool nonuniform_expression = false; |
448 | }; |
449 | virtual std::string to_function_args(const TextureFunctionArguments &args, bool *p_forward); |
450 | |
451 | void emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id, uint32_t &feedback_id, |
452 | uint32_t &texel_id); |
453 | uint32_t get_sparse_feedback_texel_id(uint32_t id) const; |
454 | virtual void emit_buffer_block(const SPIRVariable &type); |
455 | virtual void emit_push_constant_block(const SPIRVariable &var); |
456 | virtual void emit_uniform(const SPIRVariable &var); |
457 | virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id, |
458 | bool packed_type, bool row_major); |
459 | |
460 | virtual bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const; |
461 | |
462 | void emit_copy_logical_type(uint32_t lhs_id, uint32_t lhs_type_id, uint32_t rhs_id, uint32_t rhs_type_id, |
463 | SmallVector<uint32_t> chain); |
464 | |
465 | StringStream<> buffer; |
466 | |
467 | template <typename T> |
468 | inline void statement_inner(T &&t) |
469 | { |
470 | buffer << std::forward<T>(t); |
471 | statement_count++; |
472 | } |
473 | |
474 | template <typename T, typename... Ts> |
475 | inline void statement_inner(T &&t, Ts &&... ts) |
476 | { |
477 | buffer << std::forward<T>(t); |
478 | statement_count++; |
479 | statement_inner(std::forward<Ts>(ts)...); |
480 | } |
481 | |
482 | template <typename... Ts> |
483 | inline void statement(Ts &&... ts) |
484 | { |
485 | if (is_forcing_recompilation()) |
486 | { |
487 | // Do not bother emitting code while force_recompile is active. |
488 | // We will compile again. |
489 | statement_count++; |
490 | return; |
491 | } |
492 | |
493 | if (redirect_statement) |
494 | { |
495 | redirect_statement->push_back(join(std::forward<Ts>(ts)...)); |
496 | statement_count++; |
497 | } |
498 | else |
499 | { |
500 | for (uint32_t i = 0; i < indent; i++) |
501 | buffer << " " ; |
502 | statement_inner(std::forward<Ts>(ts)...); |
503 | buffer << '\n'; |
504 | } |
505 | } |
506 | |
507 | template <typename... Ts> |
508 | inline void statement_no_indent(Ts &&... ts) |
509 | { |
510 | auto old_indent = indent; |
511 | indent = 0; |
512 | statement(std::forward<Ts>(ts)...); |
513 | indent = old_indent; |
514 | } |
515 | |
516 | // Used for implementing continue blocks where |
517 | // we want to obtain a list of statements we can merge |
518 | // on a single line separated by comma. |
519 | SmallVector<std::string> *redirect_statement = nullptr; |
520 | const SPIRBlock *current_continue_block = nullptr; |
521 | bool block_temporary_hoisting = false; |
522 | |
523 | void begin_scope(); |
524 | void end_scope(); |
525 | void end_scope(const std::string &trailer); |
526 | void end_scope_decl(); |
527 | void end_scope_decl(const std::string &decl); |
528 | |
529 | Options options; |
530 | |
531 | virtual std::string type_to_array_glsl( |
532 | const SPIRType &type); // Allow Metal to use the array<T> template to make arrays a value type |
533 | std::string to_array_size(const SPIRType &type, uint32_t index); |
534 | uint32_t to_array_size_literal(const SPIRType &type, uint32_t index) const; |
535 | uint32_t to_array_size_literal(const SPIRType &type) const; |
536 | virtual std::string variable_decl(const SPIRVariable &variable); // Threadgroup arrays can't have a wrapper type |
537 | std::string variable_decl_function_local(SPIRVariable &variable); |
538 | |
539 | void add_local_variable_name(uint32_t id); |
540 | void add_resource_name(uint32_t id); |
541 | void add_member_name(SPIRType &type, uint32_t name); |
542 | void add_function_overload(const SPIRFunction &func); |
543 | |
544 | virtual bool is_non_native_row_major_matrix(uint32_t id); |
545 | virtual bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index); |
546 | bool member_is_remapped_physical_type(const SPIRType &type, uint32_t index) const; |
547 | bool member_is_packed_physical_type(const SPIRType &type, uint32_t index) const; |
548 | virtual std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, |
549 | uint32_t physical_type_id, bool is_packed); |
550 | |
551 | std::unordered_set<std::string> local_variable_names; |
552 | std::unordered_set<std::string> resource_names; |
553 | std::unordered_set<std::string> block_input_names; |
554 | std::unordered_set<std::string> block_output_names; |
555 | std::unordered_set<std::string> block_ubo_names; |
556 | std::unordered_set<std::string> block_ssbo_names; |
557 | std::unordered_set<std::string> block_names; // A union of all block_*_names. |
558 | std::unordered_map<std::string, std::unordered_set<uint64_t>> function_overloads; |
559 | std::unordered_map<uint32_t, std::string> preserved_aliases; |
560 | void preserve_alias_on_reset(uint32_t id); |
561 | void reset_name_caches(); |
562 | |
563 | bool processing_entry_point = false; |
564 | |
565 | // Can be overriden by subclass backends for trivial things which |
566 | // shouldn't need polymorphism. |
567 | struct BackendVariations |
568 | { |
569 | std::string discard_literal = "discard" ; |
570 | std::string demote_literal = "demote" ; |
571 | std::string null_pointer_literal = "" ; |
572 | bool float_literal_suffix = false; |
573 | bool double_literal_suffix = true; |
574 | bool uint32_t_literal_suffix = true; |
575 | bool long_long_literal_suffix = false; |
576 | const char *basic_int_type = "int" ; |
577 | const char *basic_uint_type = "uint" ; |
578 | const char *basic_int8_type = "int8_t" ; |
579 | const char *basic_uint8_type = "uint8_t" ; |
580 | const char *basic_int16_type = "int16_t" ; |
581 | const char *basic_uint16_type = "uint16_t" ; |
582 | const char *int16_t_literal_suffix = "s" ; |
583 | const char *uint16_t_literal_suffix = "us" ; |
584 | const char *nonuniform_qualifier = "nonuniformEXT" ; |
585 | const char *boolean_mix_function = "mix" ; |
586 | bool swizzle_is_function = false; |
587 | bool shared_is_implied = false; |
588 | bool unsized_array_supported = true; |
589 | bool explicit_struct_type = false; |
590 | bool use_initializer_list = false; |
591 | bool use_typed_initializer_list = false; |
592 | bool can_declare_struct_inline = true; |
593 | bool can_declare_arrays_inline = true; |
594 | bool native_row_major_matrix = true; |
595 | bool use_constructor_splatting = true; |
596 | bool allow_precision_qualifiers = false; |
597 | bool can_swizzle_scalar = false; |
598 | bool force_gl_in_out_block = false; |
599 | bool can_return_array = true; |
600 | bool allow_truncated_access_chain = false; |
601 | bool supports_extensions = false; |
602 | bool supports_empty_struct = false; |
603 | bool array_is_value_type = true; |
604 | bool array_is_value_type_in_buffer_blocks = true; |
605 | bool comparison_image_samples_scalar = false; |
606 | bool native_pointers = false; |
607 | bool support_small_type_sampling_result = false; |
608 | bool support_case_fallthrough = true; |
609 | bool use_array_constructor = false; |
610 | bool needs_row_major_load_workaround = false; |
611 | bool support_pointer_to_pointer = false; |
612 | bool support_precise_qualifier = false; |
613 | bool support_64bit_switch = false; |
614 | bool workgroup_size_is_hidden = false; |
615 | bool requires_relaxed_precision_analysis = false; |
616 | } backend; |
617 | |
618 | void emit_struct(SPIRType &type); |
619 | void emit_resources(); |
620 | void emit_extension_workarounds(spv::ExecutionModel model); |
621 | void emit_buffer_block_native(const SPIRVariable &var); |
622 | void emit_buffer_reference_block(uint32_t type_id, bool forward_declaration); |
623 | void emit_buffer_block_legacy(const SPIRVariable &var); |
624 | void emit_buffer_block_flattened(const SPIRVariable &type); |
625 | void fixup_implicit_builtin_block_names(); |
626 | void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model); |
627 | bool should_force_emit_builtin_block(spv::StorageClass storage); |
628 | void emit_push_constant_block_vulkan(const SPIRVariable &var); |
629 | void emit_push_constant_block_glsl(const SPIRVariable &var); |
630 | void emit_interface_block(const SPIRVariable &type); |
631 | void emit_flattened_io_block(const SPIRVariable &var, const char *qual); |
632 | void emit_flattened_io_block_struct(const std::string &basename, const SPIRType &type, const char *qual, |
633 | const SmallVector<uint32_t> &indices); |
634 | void emit_flattened_io_block_member(const std::string &basename, const SPIRType &type, const char *qual, |
635 | const SmallVector<uint32_t> &indices); |
636 | void emit_block_chain(SPIRBlock &block); |
637 | void emit_hoisted_temporaries(SmallVector<std::pair<TypeID, ID>> &temporaries); |
638 | std::string constant_value_macro_name(uint32_t id); |
639 | int get_constant_mapping_to_workgroup_component(const SPIRConstant &constant) const; |
640 | void emit_constant(const SPIRConstant &constant); |
641 | void emit_specialization_constant_op(const SPIRConstantOp &constant); |
642 | std::string emit_continue_block(uint32_t continue_block, bool follow_true_block, bool follow_false_block); |
643 | bool (SPIRBlock &block, SPIRBlock::Method method); |
644 | |
645 | void branch(BlockID from, BlockID to); |
646 | void branch_to_continue(BlockID from, BlockID to); |
647 | void branch(BlockID from, uint32_t cond, BlockID true_block, BlockID false_block); |
648 | void flush_phi(BlockID from, BlockID to); |
649 | void flush_variable_declaration(uint32_t id); |
650 | void flush_undeclared_variables(SPIRBlock &block); |
651 | void emit_variable_temporary_copies(const SPIRVariable &var); |
652 | |
653 | bool should_dereference(uint32_t id); |
654 | bool should_forward(uint32_t id) const; |
655 | bool should_suppress_usage_tracking(uint32_t id) const; |
656 | void emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp); |
657 | void emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op0, uint32_t op1, GLSLstd450 op); |
658 | bool to_trivial_mix_op(const SPIRType &type, std::string &op, uint32_t left, uint32_t right, uint32_t lerp); |
659 | void emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, |
660 | uint32_t op3, const char *op); |
661 | void emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, |
662 | const char *op); |
663 | void emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); |
664 | void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); |
665 | void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, const char *op); |
666 | |
667 | void emit_unary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op, |
668 | SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type); |
669 | void emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, |
670 | SPIRType::BaseType input_type, bool skip_cast_if_equal_type); |
671 | void emit_binary_func_op_cast_clustered(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, |
672 | const char *op, SPIRType::BaseType input_type); |
673 | void emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, |
674 | const char *op, SPIRType::BaseType input_type); |
675 | void (uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, |
676 | uint32_t op2, const char *op, SPIRType::BaseType expected_result_type, |
677 | SPIRType::BaseType input_type0, SPIRType::BaseType input_type1, |
678 | SPIRType::BaseType input_type2); |
679 | void emit_bitfield_insert_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, |
680 | uint32_t op3, const char *op, SPIRType::BaseType offset_count_type); |
681 | |
682 | void emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); |
683 | void emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op); |
684 | void emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); |
685 | void emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, |
686 | bool negate, SPIRType::BaseType expected_type); |
687 | void emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, |
688 | SPIRType::BaseType input_type, bool skip_cast_if_equal_type); |
689 | |
690 | SPIRType binary_op_bitcast_helper(std::string &cast_op0, std::string &cast_op1, SPIRType::BaseType &input_type, |
691 | uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type); |
692 | |
693 | virtual bool emit_complex_bitcast(uint32_t result_type, uint32_t id, uint32_t op0); |
694 | |
695 | std::string to_ternary_expression(const SPIRType &result_type, uint32_t select, uint32_t true_value, |
696 | uint32_t false_value); |
697 | |
698 | void emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); |
699 | bool expression_is_forwarded(uint32_t id) const; |
700 | bool expression_suppresses_usage_tracking(uint32_t id) const; |
701 | bool expression_read_implies_multiple_reads(uint32_t id) const; |
702 | SPIRExpression &emit_op(uint32_t result_type, uint32_t result_id, const std::string &rhs, bool forward_rhs, |
703 | bool suppress_usage_tracking = false); |
704 | |
705 | void access_chain_internal_append_index(std::string &expr, uint32_t base, const SPIRType *type, |
706 | AccessChainFlags flags, bool &access_chain_is_arrayed, uint32_t index); |
707 | |
708 | std::string access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, AccessChainFlags flags, |
709 | AccessChainMeta *meta); |
710 | |
711 | spv::StorageClass get_expression_effective_storage_class(uint32_t ptr); |
712 | virtual bool access_chain_needs_stage_io_builtin_translation(uint32_t base); |
713 | |
714 | virtual void prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type, |
715 | spv::StorageClass storage, bool &is_packed); |
716 | |
717 | std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, |
718 | AccessChainMeta *meta = nullptr, bool ptr_chain = false); |
719 | |
720 | std::string flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, |
721 | const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, |
722 | uint32_t array_stride, bool need_transpose); |
723 | std::string flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, |
724 | const SPIRType &target_type, uint32_t offset); |
725 | std::string flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, |
726 | const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, |
727 | bool need_transpose); |
728 | std::string flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count, |
729 | const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, |
730 | bool need_transpose); |
731 | std::pair<std::string, uint32_t> flattened_access_chain_offset(const SPIRType &basetype, const uint32_t *indices, |
732 | uint32_t count, uint32_t offset, |
733 | uint32_t word_stride, bool *need_transpose = nullptr, |
734 | uint32_t *matrix_stride = nullptr, |
735 | uint32_t *array_stride = nullptr, |
736 | bool ptr_chain = false); |
737 | |
738 | const char *index_to_swizzle(uint32_t index); |
739 | std::string remap_swizzle(const SPIRType &result_type, uint32_t input_components, const std::string &expr); |
740 | std::string declare_temporary(uint32_t type, uint32_t id); |
741 | void emit_uninitialized_temporary(uint32_t type, uint32_t id); |
742 | SPIRExpression &emit_uninitialized_temporary_expression(uint32_t type, uint32_t id); |
743 | void append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector<std::string> &arglist); |
744 | std::string to_non_uniform_aware_expression(uint32_t id); |
745 | std::string to_expression(uint32_t id, bool register_expression_read = true); |
746 | std::string to_composite_constructor_expression(uint32_t id, bool block_like_type); |
747 | std::string to_rerolled_array_expression(const std::string &expr, const SPIRType &type); |
748 | std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true); |
749 | std::string to_unpacked_expression(uint32_t id, bool register_expression_read = true); |
750 | std::string to_unpacked_row_major_matrix_expression(uint32_t id); |
751 | std::string to_enclosed_unpacked_expression(uint32_t id, bool register_expression_read = true); |
752 | std::string to_dereferenced_expression(uint32_t id, bool register_expression_read = true); |
753 | std::string to_pointer_expression(uint32_t id, bool register_expression_read = true); |
754 | std::string to_enclosed_pointer_expression(uint32_t id, bool register_expression_read = true); |
755 | std::string (uint32_t id, uint32_t index); |
756 | std::string (uint32_t result_type, const SPIRConstant &c, |
757 | const uint32_t *chain, uint32_t length); |
758 | std::string enclose_expression(const std::string &expr); |
759 | std::string dereference_expression(const SPIRType &expression_type, const std::string &expr); |
760 | std::string address_of_expression(const std::string &expr); |
761 | void strip_enclosed_expression(std::string &expr); |
762 | std::string to_member_name(const SPIRType &type, uint32_t index); |
763 | virtual std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain); |
764 | std::string to_multi_member_reference(const SPIRType &type, const SmallVector<uint32_t> &indices); |
765 | std::string type_to_glsl_constructor(const SPIRType &type); |
766 | std::string argument_decl(const SPIRFunction::Parameter &arg); |
767 | virtual std::string to_qualifiers_glsl(uint32_t id); |
768 | void fixup_io_block_patch_qualifiers(const SPIRVariable &var); |
769 | void emit_output_variable_initializer(const SPIRVariable &var); |
770 | std::string to_precision_qualifiers_glsl(uint32_t id); |
771 | virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var); |
772 | std::string flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags); |
773 | const char *format_to_glsl(spv::ImageFormat format); |
774 | virtual std::string layout_for_member(const SPIRType &type, uint32_t index); |
775 | virtual std::string to_interpolation_qualifiers(const Bitset &flags); |
776 | std::string layout_for_variable(const SPIRVariable &variable); |
777 | std::string to_combined_image_sampler(VariableID image_id, VariableID samp_id); |
778 | virtual bool skip_argument(uint32_t id) const; |
779 | virtual void emit_array_copy(const std::string &lhs, uint32_t lhs_id, uint32_t rhs_id, |
780 | spv::StorageClass lhs_storage, spv::StorageClass rhs_storage); |
781 | virtual void emit_block_hints(const SPIRBlock &block); |
782 | virtual std::string to_initializer_expression(const SPIRVariable &var); |
783 | virtual std::string to_zero_initialized_expression(uint32_t type_id); |
784 | bool type_can_zero_initialize(const SPIRType &type) const; |
785 | |
786 | bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, |
787 | uint32_t *failed_index = nullptr, uint32_t start_offset = 0, |
788 | uint32_t end_offset = ~(0u)); |
789 | std::string buffer_to_packing_standard(const SPIRType &type, bool support_std430_without_scalar_layout); |
790 | |
791 | uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing); |
792 | uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); |
793 | uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); |
794 | uint32_t type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); |
795 | uint32_t type_to_location_count(const SPIRType &type) const; |
796 | |
797 | std::string bitcast_glsl(const SPIRType &result_type, uint32_t arg); |
798 | virtual std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type); |
799 | |
800 | std::string bitcast_expression(SPIRType::BaseType target_type, uint32_t arg); |
801 | std::string bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, const std::string &expr); |
802 | |
803 | std::string build_composite_combiner(uint32_t result_type, const uint32_t *elems, uint32_t length); |
804 | bool remove_duplicate_swizzle(std::string &op); |
805 | bool remove_unity_swizzle(uint32_t base, std::string &op); |
806 | |
807 | // Can modify flags to remote readonly/writeonly if image type |
808 | // and force recompile. |
809 | bool check_atomic_image(uint32_t id); |
810 | |
811 | virtual void replace_illegal_names(); |
812 | void replace_illegal_names(const std::unordered_set<std::string> &keywords); |
813 | virtual void emit_entry_point_declarations(); |
814 | |
815 | void replace_fragment_output(SPIRVariable &var); |
816 | void replace_fragment_outputs(); |
817 | std::string legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t id); |
818 | |
819 | void forward_relaxed_precision(uint32_t dst_id, const uint32_t *args, uint32_t length); |
820 | void analyze_precision_requirements(uint32_t type_id, uint32_t dst_id, uint32_t *args, uint32_t length); |
821 | Options::Precision analyze_expression_precision(const uint32_t *args, uint32_t length) const; |
822 | |
823 | uint32_t indent = 0; |
824 | |
825 | std::unordered_set<uint32_t> emitted_functions; |
826 | |
827 | // Ensure that we declare phi-variable copies even if the original declaration isn't deferred |
828 | std::unordered_set<uint32_t> flushed_phi_variables; |
829 | |
830 | std::unordered_set<uint32_t> flattened_buffer_blocks; |
831 | std::unordered_map<uint32_t, bool> flattened_structs; |
832 | |
833 | ShaderSubgroupSupportHelper shader_subgroup_supporter; |
834 | |
835 | std::string load_flattened_struct(const std::string &basename, const SPIRType &type); |
836 | std::string to_flattened_struct_member(const std::string &basename, const SPIRType &type, uint32_t index); |
837 | void store_flattened_struct(uint32_t lhs_id, uint32_t value); |
838 | void store_flattened_struct(const std::string &basename, uint32_t rhs, const SPIRType &type, |
839 | const SmallVector<uint32_t> &indices); |
840 | std::string to_flattened_access_chain_expression(uint32_t id); |
841 | |
842 | // Usage tracking. If a temporary is used more than once, use the temporary instead to |
843 | // avoid AST explosion when SPIRV is generated with pure SSA and doesn't write stuff to variables. |
844 | std::unordered_map<uint32_t, uint32_t> expression_usage_counts; |
845 | void track_expression_read(uint32_t id); |
846 | |
847 | SmallVector<std::string> forced_extensions; |
848 | SmallVector<std::string> ; |
849 | |
850 | // Used when expressions emit extra opcodes with their own unique IDs, |
851 | // and we need to reuse the IDs across recompilation loops. |
852 | // Currently used by NMin/Max/Clamp implementations. |
853 | std::unordered_map<uint32_t, uint32_t> ; |
854 | |
855 | SmallVector<TypeID> workaround_ubo_load_overload_types; |
856 | void request_workaround_wrapper_overload(TypeID id); |
857 | void rewrite_load_for_wrapped_row_major(std::string &expr, TypeID loaded_type, ID ptr); |
858 | |
859 | uint32_t statement_count = 0; |
860 | |
861 | inline bool is_legacy() const |
862 | { |
863 | return (options.es && options.version < 300) || (!options.es && options.version < 130); |
864 | } |
865 | |
866 | inline bool is_legacy_es() const |
867 | { |
868 | return options.es && options.version < 300; |
869 | } |
870 | |
871 | inline bool is_legacy_desktop() const |
872 | { |
873 | return !options.es && options.version < 130; |
874 | } |
875 | |
876 | bool requires_transpose_2x2 = false; |
877 | bool requires_transpose_3x3 = false; |
878 | bool requires_transpose_4x4 = false; |
879 | bool ray_tracing_is_khr = false; |
880 | bool barycentric_is_nv = false; |
881 | void ray_tracing_khr_fixup_locations(); |
882 | |
883 | bool args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure); |
884 | void register_call_out_argument(uint32_t id); |
885 | void register_impure_function_call(); |
886 | void register_control_dependent_expression(uint32_t expr); |
887 | |
888 | // GL_EXT_shader_pixel_local_storage support. |
889 | std::vector<PlsRemap> pls_inputs; |
890 | std::vector<PlsRemap> pls_outputs; |
891 | std::string pls_decl(const PlsRemap &variable); |
892 | const char *to_pls_qualifiers_glsl(const SPIRVariable &variable); |
893 | void emit_pls(); |
894 | void remap_pls_variables(); |
895 | |
896 | // GL_EXT_shader_framebuffer_fetch support. |
897 | std::vector<std::pair<uint32_t, uint32_t>> subpass_to_framebuffer_fetch_attachment; |
898 | std::vector<std::pair<uint32_t, bool>> inout_color_attachments; |
899 | bool location_is_framebuffer_fetch(uint32_t location) const; |
900 | bool location_is_non_coherent_framebuffer_fetch(uint32_t location) const; |
901 | bool subpass_input_is_framebuffer_fetch(uint32_t id) const; |
902 | void emit_inout_fragment_outputs_copy_to_subpass_inputs(); |
903 | const SPIRVariable *find_subpass_input_by_attachment_index(uint32_t index) const; |
904 | const SPIRVariable *find_color_output_by_location(uint32_t location) const; |
905 | |
906 | // A variant which takes two sets of name. The secondary is only used to verify there are no collisions, |
907 | // but the set is not updated when we have found a new name. |
908 | // Used primarily when adding block interface names. |
909 | void add_variable(std::unordered_set<std::string> &variables_primary, |
910 | const std::unordered_set<std::string> &variables_secondary, std::string &name); |
911 | |
912 | void check_function_call_constraints(const uint32_t *args, uint32_t length); |
913 | void handle_invalid_expression(uint32_t id); |
914 | void force_temporary_and_recompile(uint32_t id); |
915 | void find_static_extensions(); |
916 | |
917 | uint32_t consume_temporary_in_precision_context(uint32_t type_id, uint32_t id, Options::Precision precision); |
918 | std::unordered_map<uint32_t, uint32_t> temporary_to_mirror_precision_alias; |
919 | std::unordered_set<uint32_t> composite_insert_overwritten; |
920 | std::unordered_set<uint32_t> block_composite_insert_overwrite; |
921 | |
922 | std::string emit_for_loop_initializers(const SPIRBlock &block); |
923 | void emit_while_loop_initializers(const SPIRBlock &block); |
924 | bool for_loop_initializers_are_same_type(const SPIRBlock &block); |
925 | bool optimize_read_modify_write(const SPIRType &type, const std::string &lhs, const std::string &rhs); |
926 | void fixup_image_load_store_access(); |
927 | |
928 | bool type_is_empty(const SPIRType &type); |
929 | |
930 | virtual void declare_undefined_values(); |
931 | |
932 | bool can_use_io_location(spv::StorageClass storage, bool block); |
933 | const Instruction *get_next_instruction_in_block(const Instruction &instr); |
934 | static uint32_t mask_relevant_memory_semantics(uint32_t semantics); |
935 | |
936 | std::string convert_half_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); |
937 | std::string convert_float_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); |
938 | std::string convert_double_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); |
939 | |
940 | std::string convert_separate_image_to_expression(uint32_t id); |
941 | |
942 | // Builtins in GLSL are always specific signedness, but the SPIR-V can declare them |
943 | // as either unsigned or signed. |
944 | // Sometimes we will need to automatically perform casts on load and store to make this work. |
945 | virtual void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type); |
946 | virtual void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type); |
947 | void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr); |
948 | bool unroll_array_to_complex_store(uint32_t target_id, uint32_t source_id); |
949 | void convert_non_uniform_expression(std::string &expr, uint32_t ptr_id); |
950 | |
951 | void handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id); |
952 | void disallow_forwarding_in_expression_chain(const SPIRExpression &expr); |
953 | |
954 | bool expression_is_constant_null(uint32_t id) const; |
955 | bool expression_is_non_value_type_array(uint32_t ptr); |
956 | virtual void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression); |
957 | |
958 | uint32_t get_integer_width_for_instruction(const Instruction &instr) const; |
959 | uint32_t get_integer_width_for_glsl_instruction(GLSLstd450 op, const uint32_t *arguments, uint32_t length) const; |
960 | |
961 | bool variable_is_lut(const SPIRVariable &var) const; |
962 | |
963 | char current_locale_radix_character = '.'; |
964 | |
965 | void fixup_type_alias(); |
966 | void reorder_type_alias(); |
967 | void fixup_anonymous_struct_names(); |
968 | void fixup_anonymous_struct_names(std::unordered_set<uint32_t> &visited, const SPIRType &type); |
969 | |
970 | static const char *vector_swizzle(int vecsize, int index); |
971 | |
972 | bool is_stage_output_location_masked(uint32_t location, uint32_t component) const; |
973 | bool is_stage_output_builtin_masked(spv::BuiltIn builtin) const; |
974 | bool is_stage_output_variable_masked(const SPIRVariable &var) const; |
975 | bool is_stage_output_block_member_masked(const SPIRVariable &var, uint32_t index, bool strip_array) const; |
976 | uint32_t get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const; |
977 | uint32_t get_declared_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const; |
978 | std::unordered_set<LocationComponentPair, InternalHasher> masked_output_locations; |
979 | std::unordered_set<uint32_t> masked_output_builtins; |
980 | |
981 | private: |
982 | void init(); |
983 | }; |
984 | } // namespace SPIRV_CROSS_NAMESPACE |
985 | |
986 | #endif |
987 | |