1 | //===- SPIR.cpp -----------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "ABIInfoImpl.h" |
10 | #include "HLSLBufferLayoutBuilder.h" |
11 | #include "TargetInfo.h" |
12 | |
13 | using namespace clang; |
14 | using namespace clang::CodeGen; |
15 | |
16 | //===----------------------------------------------------------------------===// |
17 | // Base ABI and target codegen info implementation common between SPIR and |
18 | // SPIR-V. |
19 | //===----------------------------------------------------------------------===// |
20 | |
21 | namespace { |
22 | class CommonSPIRABIInfo : public DefaultABIInfo { |
23 | public: |
24 | CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); } |
25 | |
26 | private: |
27 | void setCCs(); |
28 | }; |
29 | |
30 | class SPIRVABIInfo : public CommonSPIRABIInfo { |
31 | public: |
32 | SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {} |
33 | void computeInfo(CGFunctionInfo &FI) const override; |
34 | |
35 | private: |
36 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
37 | ABIArgInfo classifyKernelArgumentType(QualType Ty) const; |
38 | ABIArgInfo classifyArgumentType(QualType Ty) const; |
39 | }; |
40 | } // end anonymous namespace |
41 | namespace { |
42 | class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo { |
43 | public: |
44 | CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
45 | : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(args&: CGT)) {} |
46 | CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo) |
47 | : TargetCodeGenInfo(std::move(ABIInfo)) {} |
48 | |
49 | LangAS getASTAllocaAddressSpace() const override { |
50 | return getLangASFromTargetAS( |
51 | TargetAS: getABIInfo().getDataLayout().getAllocaAddrSpace()); |
52 | } |
53 | |
54 | unsigned getOpenCLKernelCallingConv() const override; |
55 | llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override; |
56 | llvm::Type * |
57 | getHLSLType(CodeGenModule &CGM, const Type *Ty, |
58 | const SmallVector<int32_t> *Packoffsets = nullptr) const override; |
59 | llvm::Type *getSPIRVImageTypeFromHLSLResource( |
60 | const HLSLAttributedResourceType::Attributes &attributes, |
61 | llvm::Type *ElementType, llvm::LLVMContext &Ctx) const; |
62 | void |
63 | setOCLKernelStubCallingConvention(const FunctionType *&FT) const override; |
64 | }; |
65 | class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo { |
66 | public: |
67 | SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) |
68 | : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(args&: CGT)) {} |
69 | void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; |
70 | LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, |
71 | const VarDecl *D) const override; |
72 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
73 | CodeGen::CodeGenModule &M) const override; |
74 | llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, |
75 | SyncScope Scope, |
76 | llvm::AtomicOrdering Ordering, |
77 | llvm::LLVMContext &Ctx) const override; |
78 | }; |
79 | |
80 | inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) { |
81 | switch (Scope) { |
82 | case SyncScope::HIPSingleThread: |
83 | case SyncScope::SingleScope: |
84 | return "singlethread" ; |
85 | case SyncScope::HIPWavefront: |
86 | case SyncScope::OpenCLSubGroup: |
87 | case SyncScope::WavefrontScope: |
88 | return "subgroup" ; |
89 | case SyncScope::HIPWorkgroup: |
90 | case SyncScope::OpenCLWorkGroup: |
91 | case SyncScope::WorkgroupScope: |
92 | return "workgroup" ; |
93 | case SyncScope::HIPAgent: |
94 | case SyncScope::OpenCLDevice: |
95 | case SyncScope::DeviceScope: |
96 | return "device" ; |
97 | case SyncScope::SystemScope: |
98 | case SyncScope::HIPSystem: |
99 | case SyncScope::OpenCLAllSVMDevices: |
100 | return "" ; |
101 | } |
102 | return "" ; |
103 | } |
104 | } // End anonymous namespace. |
105 | |
106 | void CommonSPIRABIInfo::setCCs() { |
107 | assert(getRuntimeCC() == llvm::CallingConv::C); |
108 | RuntimeCC = llvm::CallingConv::SPIR_FUNC; |
109 | } |
110 | |
111 | ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const { |
112 | if (getTarget().getTriple().getVendor() != llvm::Triple::AMD) |
113 | return DefaultABIInfo::classifyReturnType(RetTy); |
114 | if (!isAggregateTypeForABI(T: RetTy) || getRecordArgABI(T: RetTy, CXXABI&: getCXXABI())) |
115 | return DefaultABIInfo::classifyReturnType(RetTy); |
116 | |
117 | if (const RecordType *RT = RetTy->getAs<RecordType>()) { |
118 | const RecordDecl *RD = RT->getDecl(); |
119 | if (RD->hasFlexibleArrayMember()) |
120 | return DefaultABIInfo::classifyReturnType(RetTy); |
121 | } |
122 | |
123 | // TODO: The AMDGPU ABI is non-trivial to represent in SPIR-V; in order to |
124 | // avoid encoding various architecture specific bits here we return everything |
125 | // as direct to retain type info for things like aggregates, for later perusal |
126 | // when translating back to LLVM/lowering in the BE. This is also why we |
127 | // disable flattening as the outcomes can mismatch between SPIR-V and AMDGPU. |
128 | // This will be revisited / optimised in the future. |
129 | return ABIArgInfo::getDirect(T: CGT.ConvertType(T: RetTy), Offset: 0u, Padding: nullptr, CanBeFlattened: false); |
130 | } |
131 | |
132 | ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const { |
133 | if (getContext().getLangOpts().CUDAIsDevice) { |
134 | // Coerce pointer arguments with default address space to CrossWorkGroup |
135 | // pointers for HIPSPV/CUDASPV. When the language mode is HIP/CUDA, the |
136 | // SPIRTargetInfo maps cuda_device to SPIR-V's CrossWorkGroup address space. |
137 | llvm::Type *LTy = CGT.ConvertType(T: Ty); |
138 | auto DefaultAS = getContext().getTargetAddressSpace(AS: LangAS::Default); |
139 | auto GlobalAS = getContext().getTargetAddressSpace(AS: LangAS::cuda_device); |
140 | auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Val: LTy); |
141 | if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) { |
142 | LTy = llvm::PointerType::get(C&: PtrTy->getContext(), AddressSpace: GlobalAS); |
143 | return ABIArgInfo::getDirect(T: LTy, Offset: 0, Padding: nullptr, CanBeFlattened: false); |
144 | } |
145 | |
146 | if (isAggregateTypeForABI(T: Ty)) { |
147 | if (getTarget().getTriple().getVendor() == llvm::Triple::AMD) |
148 | // TODO: The AMDGPU kernel ABI passes aggregates byref, which is not |
149 | // currently expressible in SPIR-V; SPIR-V passes aggregates byval, |
150 | // which the AMDGPU kernel ABI does not allow. Passing aggregates as |
151 | // direct works around this impedance mismatch, as it retains type info |
152 | // and can be correctly handled, post reverse-translation, by the AMDGPU |
153 | // BE, which has to support this CC for legacy OpenCL purposes. It can |
154 | // be brittle and does lead to performance degradation in certain |
155 | // pathological cases. This will be revisited / optimised in the future, |
156 | // once a way to deal with the byref/byval impedance mismatch is |
157 | // identified. |
158 | return ABIArgInfo::getDirect(T: LTy, Offset: 0, Padding: nullptr, CanBeFlattened: false); |
159 | // Force copying aggregate type in kernel arguments by value when |
160 | // compiling CUDA targeting SPIR-V. This is required for the object |
161 | // copied to be valid on the device. |
162 | // This behavior follows the CUDA spec |
163 | // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing, |
164 | // and matches the NVPTX implementation. TODO: hardcoding to 0 should be |
165 | // revisited if HIPSPV / byval starts making use of the AS of an indirect |
166 | // arg. |
167 | return getNaturalAlignIndirect(Ty, /*AddrSpace=*/0, /*byval=*/ByVal: true); |
168 | } |
169 | } |
170 | return classifyArgumentType(Ty); |
171 | } |
172 | |
173 | ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty) const { |
174 | if (getTarget().getTriple().getVendor() != llvm::Triple::AMD) |
175 | return DefaultABIInfo::classifyArgumentType(RetTy: Ty); |
176 | if (!isAggregateTypeForABI(T: Ty)) |
177 | return DefaultABIInfo::classifyArgumentType(RetTy: Ty); |
178 | |
179 | // Records with non-trivial destructors/copy-constructors should not be |
180 | // passed by value. |
181 | if (auto RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) |
182 | return getNaturalAlignIndirect(Ty, AddrSpace: getDataLayout().getAllocaAddrSpace(), |
183 | ByVal: RAA == CGCXXABI::RAA_DirectInMemory); |
184 | |
185 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
186 | const RecordDecl *RD = RT->getDecl(); |
187 | if (RD->hasFlexibleArrayMember()) |
188 | return DefaultABIInfo::classifyArgumentType(RetTy: Ty); |
189 | } |
190 | |
191 | return ABIArgInfo::getDirect(T: CGT.ConvertType(T: Ty), Offset: 0u, Padding: nullptr, CanBeFlattened: false); |
192 | } |
193 | |
194 | void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const { |
195 | // The logic is same as in DefaultABIInfo with an exception on the kernel |
196 | // arguments handling. |
197 | llvm::CallingConv::ID CC = FI.getCallingConvention(); |
198 | |
199 | if (!getCXXABI().classifyReturnType(FI)) |
200 | FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType()); |
201 | |
202 | for (auto &I : FI.arguments()) { |
203 | if (CC == llvm::CallingConv::SPIR_KERNEL) { |
204 | I.info = classifyKernelArgumentType(Ty: I.type); |
205 | } else { |
206 | I.info = classifyArgumentType(Ty: I.type); |
207 | } |
208 | } |
209 | } |
210 | |
211 | namespace clang { |
212 | namespace CodeGen { |
213 | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { |
214 | if (CGM.getTarget().getTriple().isSPIRV()) |
215 | SPIRVABIInfo(CGM.getTypes()).computeInfo(FI); |
216 | else |
217 | CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI); |
218 | } |
219 | } |
220 | } |
221 | |
222 | unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { |
223 | return llvm::CallingConv::SPIR_KERNEL; |
224 | } |
225 | |
226 | void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention( |
227 | const FunctionType *&FT) const { |
228 | // Convert HIP kernels to SPIR-V kernels. |
229 | if (getABIInfo().getContext().getLangOpts().HIP) { |
230 | FT = getABIInfo().getContext().adjustFunctionType( |
231 | Fn: FT, EInfo: FT->getExtInfo().withCallingConv(cc: CC_DeviceKernel)); |
232 | return; |
233 | } |
234 | } |
235 | |
236 | void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention( |
237 | const FunctionType *&FT) const { |
238 | FT = getABIInfo().getContext().adjustFunctionType( |
239 | Fn: FT, EInfo: FT->getExtInfo().withCallingConv(cc: CC_SpirFunction)); |
240 | } |
241 | |
242 | LangAS |
243 | SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, |
244 | const VarDecl *D) const { |
245 | assert(!CGM.getLangOpts().OpenCL && |
246 | !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && |
247 | "Address space agnostic languages only" ); |
248 | // If we're here it means that we're using the SPIRDefIsGen ASMap, hence for |
249 | // the global AS we can rely on either cuda_device or sycl_global to be |
250 | // correct; however, since this is not a CUDA Device context, we use |
251 | // sycl_global to prevent confusion with the assertion. |
252 | LangAS DefaultGlobalAS = getLangASFromTargetAS( |
253 | TargetAS: CGM.getContext().getTargetAddressSpace(AS: LangAS::sycl_global)); |
254 | if (!D) |
255 | return DefaultGlobalAS; |
256 | |
257 | LangAS AddrSpace = D->getType().getAddressSpace(); |
258 | if (AddrSpace != LangAS::Default) |
259 | return AddrSpace; |
260 | |
261 | return DefaultGlobalAS; |
262 | } |
263 | |
264 | void SPIRVTargetCodeGenInfo::setTargetAttributes( |
265 | const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { |
266 | if (!M.getLangOpts().HIP || |
267 | M.getTarget().getTriple().getVendor() != llvm::Triple::AMD) |
268 | return; |
269 | if (GV->isDeclaration()) |
270 | return; |
271 | |
272 | auto F = dyn_cast<llvm::Function>(Val: GV); |
273 | if (!F) |
274 | return; |
275 | |
276 | auto FD = dyn_cast_or_null<FunctionDecl>(Val: D); |
277 | if (!FD) |
278 | return; |
279 | if (!FD->hasAttr<CUDAGlobalAttr>()) |
280 | return; |
281 | |
282 | unsigned N = M.getLangOpts().GPUMaxThreadsPerBlock; |
283 | if (auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) |
284 | N = FlatWGS->getMax()->EvaluateKnownConstInt(M.getContext()).getExtValue(); |
285 | |
286 | // We encode the maximum flat WG size in the first component of the 3D |
287 | // max_work_group_size attribute, which will get reverse translated into the |
288 | // original AMDGPU attribute when targeting AMDGPU. |
289 | auto Int32Ty = llvm::IntegerType::getInt32Ty(C&: M.getLLVMContext()); |
290 | llvm::Metadata *AttrMDArgs[] = { |
291 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(Ty: Int32Ty, V: N)), |
292 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(Ty: Int32Ty, V: 1)), |
293 | llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(Ty: Int32Ty, V: 1))}; |
294 | |
295 | F->setMetadata(Kind: "max_work_group_size" , |
296 | Node: llvm::MDNode::get(Context&: M.getLLVMContext(), MDs: AttrMDArgs)); |
297 | } |
298 | |
299 | llvm::SyncScope::ID |
300 | SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &, SyncScope Scope, |
301 | llvm::AtomicOrdering, |
302 | llvm::LLVMContext &Ctx) const { |
303 | return Ctx.getOrInsertSyncScopeID(SSN: mapClangSyncScopeToLLVM(Scope)); |
304 | } |
305 | |
306 | /// Construct a SPIR-V target extension type for the given OpenCL image type. |
307 | static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType, |
308 | StringRef OpenCLName, |
309 | unsigned AccessQualifier) { |
310 | // These parameters compare to the operands of OpTypeImage (see |
311 | // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage |
312 | // for more details). The first 6 integer parameters all default to 0, and |
313 | // will be changed to 1 only for the image type(s) that set the parameter to |
314 | // one. The 7th integer parameter is the access qualifier, which is tacked on |
315 | // at the end. |
316 | SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0}; |
317 | |
318 | // Choose the dimension of the image--this corresponds to the Dim enum in |
319 | // SPIR-V (first integer parameter of OpTypeImage). |
320 | if (OpenCLName.starts_with(Prefix: "image2d" )) |
321 | IntParams[0] = 1; // 1D |
322 | else if (OpenCLName.starts_with(Prefix: "image3d" )) |
323 | IntParams[0] = 2; // 2D |
324 | else if (OpenCLName == "image1d_buffer" ) |
325 | IntParams[0] = 5; // Buffer |
326 | else |
327 | assert(OpenCLName.starts_with("image1d" ) && "Unknown image type" ); |
328 | |
329 | // Set the other integer parameters of OpTypeImage if necessary. Note that the |
330 | // OpenCL image types don't provide any information for the Sampled or |
331 | // Image Format parameters. |
332 | if (OpenCLName.contains(Other: "_depth" )) |
333 | IntParams[1] = 1; |
334 | if (OpenCLName.contains(Other: "_array" )) |
335 | IntParams[2] = 1; |
336 | if (OpenCLName.contains(Other: "_msaa" )) |
337 | IntParams[3] = 1; |
338 | |
339 | // Access qualifier |
340 | IntParams.push_back(Elt: AccessQualifier); |
341 | |
342 | return llvm::TargetExtType::get(Context&: Ctx, Name: BaseType, Types: {llvm::Type::getVoidTy(C&: Ctx)}, |
343 | Ints: IntParams); |
344 | } |
345 | |
346 | llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM, |
347 | const Type *Ty) const { |
348 | llvm::LLVMContext &Ctx = CGM.getLLVMContext(); |
349 | if (auto *PipeTy = dyn_cast<PipeType>(Val: Ty)) |
350 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Pipe" , Types: {}, |
351 | Ints: {!PipeTy->isReadOnly()}); |
352 | if (auto *BuiltinTy = dyn_cast<BuiltinType>(Val: Ty)) { |
353 | enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 }; |
354 | switch (BuiltinTy->getKind()) { |
355 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
356 | case BuiltinType::Id: \ |
357 | return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix); |
358 | #include "clang/Basic/OpenCLImageTypes.def" |
359 | case BuiltinType::OCLSampler: |
360 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Sampler" ); |
361 | case BuiltinType::OCLEvent: |
362 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Event" ); |
363 | case BuiltinType::OCLClkEvent: |
364 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.DeviceEvent" ); |
365 | case BuiltinType::OCLQueue: |
366 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Queue" ); |
367 | case BuiltinType::OCLReserveID: |
368 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.ReserveId" ); |
369 | #define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \ |
370 | case BuiltinType::OCLIntelSubgroupAVC##Id: \ |
371 | return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL"); |
372 | #include "clang/Basic/OpenCLExtensionTypes.def" |
373 | default: |
374 | return nullptr; |
375 | } |
376 | } |
377 | |
378 | return nullptr; |
379 | } |
380 | |
381 | // Gets a spirv.IntegralConstant or spirv.Literal. If IntegralType is present, |
382 | // returns an IntegralConstant, otherwise returns a Literal. |
383 | static llvm::Type *getInlineSpirvConstant(CodeGenModule &CGM, |
384 | llvm::Type *IntegralType, |
385 | llvm::APInt Value) { |
386 | llvm::LLVMContext &Ctx = CGM.getLLVMContext(); |
387 | |
388 | // Convert the APInt value to an array of uint32_t words |
389 | llvm::SmallVector<uint32_t> Words; |
390 | |
391 | while (Value.ugt(RHS: 0)) { |
392 | uint32_t Word = Value.trunc(width: 32).getZExtValue(); |
393 | Value.lshrInPlace(ShiftAmt: 32); |
394 | |
395 | Words.push_back(Elt: Word); |
396 | } |
397 | if (Words.size() == 0) |
398 | Words.push_back(Elt: 0); |
399 | |
400 | if (IntegralType) |
401 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.IntegralConstant" , |
402 | Types: {IntegralType}, Ints: Words); |
403 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Literal" , Types: {}, Ints: Words); |
404 | } |
405 | |
406 | static llvm::Type *getInlineSpirvType(CodeGenModule &CGM, |
407 | const HLSLInlineSpirvType *SpirvType) { |
408 | llvm::LLVMContext &Ctx = CGM.getLLVMContext(); |
409 | |
410 | llvm::SmallVector<llvm::Type *> Operands; |
411 | |
412 | for (auto &Operand : SpirvType->getOperands()) { |
413 | using SpirvOperandKind = SpirvOperand::SpirvOperandKind; |
414 | |
415 | llvm::Type *Result = nullptr; |
416 | switch (Operand.getKind()) { |
417 | case SpirvOperandKind::ConstantId: { |
418 | llvm::Type *IntegralType = |
419 | CGM.getTypes().ConvertType(T: Operand.getResultType()); |
420 | llvm::APInt Value = Operand.getValue(); |
421 | |
422 | Result = getInlineSpirvConstant(CGM, IntegralType, Value); |
423 | break; |
424 | } |
425 | case SpirvOperandKind::Literal: { |
426 | llvm::APInt Value = Operand.getValue(); |
427 | Result = getInlineSpirvConstant(CGM, IntegralType: nullptr, Value); |
428 | break; |
429 | } |
430 | case SpirvOperandKind::TypeId: { |
431 | QualType TypeOperand = Operand.getResultType(); |
432 | if (auto *RT = TypeOperand->getAs<RecordType>()) { |
433 | auto *RD = RT->getDecl(); |
434 | assert(RD->isCompleteDefinition() && |
435 | "Type completion should have been required in Sema" ); |
436 | |
437 | const FieldDecl *HandleField = RD->findFirstNamedDataMember(); |
438 | if (HandleField) { |
439 | QualType ResourceType = HandleField->getType(); |
440 | if (ResourceType->getAs<HLSLAttributedResourceType>()) { |
441 | TypeOperand = ResourceType; |
442 | } |
443 | } |
444 | } |
445 | Result = CGM.getTypes().ConvertType(T: TypeOperand); |
446 | break; |
447 | } |
448 | default: |
449 | llvm_unreachable("HLSLInlineSpirvType had invalid operand!" ); |
450 | break; |
451 | } |
452 | |
453 | assert(Result); |
454 | Operands.push_back(Elt: Result); |
455 | } |
456 | |
457 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Type" , Types: Operands, |
458 | Ints: {SpirvType->getOpcode(), SpirvType->getSize(), |
459 | SpirvType->getAlignment()}); |
460 | } |
461 | |
462 | llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType( |
463 | CodeGenModule &CGM, const Type *Ty, |
464 | const SmallVector<int32_t> *Packoffsets) const { |
465 | llvm::LLVMContext &Ctx = CGM.getLLVMContext(); |
466 | |
467 | if (auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Val: Ty)) |
468 | return getInlineSpirvType(CGM, SpirvType); |
469 | |
470 | auto *ResType = dyn_cast<HLSLAttributedResourceType>(Val: Ty); |
471 | if (!ResType) |
472 | return nullptr; |
473 | |
474 | const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs(); |
475 | switch (ResAttrs.ResourceClass) { |
476 | case llvm::dxil::ResourceClass::UAV: |
477 | case llvm::dxil::ResourceClass::SRV: { |
478 | // TypedBuffer and RawBuffer both need element type |
479 | QualType ContainedTy = ResType->getContainedType(); |
480 | if (ContainedTy.isNull()) |
481 | return nullptr; |
482 | |
483 | assert(!ResAttrs.IsROV && |
484 | "Rasterizer order views not implemented for SPIR-V yet" ); |
485 | |
486 | llvm::Type *ElemType = CGM.getTypes().ConvertType(T: ContainedTy); |
487 | if (!ResAttrs.RawBuffer) { |
488 | // convert element type |
489 | return getSPIRVImageTypeFromHLSLResource(attributes: ResAttrs, ElementType: ElemType, Ctx); |
490 | } |
491 | |
492 | llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElementType: ElemType, NumElements: 0); |
493 | uint32_t StorageClass = /* StorageBuffer storage class */ 12; |
494 | bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV; |
495 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.VulkanBuffer" , |
496 | Types: {RuntimeArrayType}, |
497 | Ints: {StorageClass, IsWritable}); |
498 | } |
499 | case llvm::dxil::ResourceClass::CBuffer: { |
500 | QualType ContainedTy = ResType->getContainedType(); |
501 | if (ContainedTy.isNull() || !ContainedTy->isStructureType()) |
502 | return nullptr; |
503 | |
504 | llvm::Type *BufferLayoutTy = |
505 | HLSLBufferLayoutBuilder(CGM, "spirv.Layout" ) |
506 | .createLayoutType(StructType: ContainedTy->getAsStructureType(), Packoffsets); |
507 | uint32_t StorageClass = /* Uniform storage class */ 2; |
508 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.VulkanBuffer" , Types: {BufferLayoutTy}, |
509 | Ints: {StorageClass, false}); |
510 | break; |
511 | } |
512 | case llvm::dxil::ResourceClass::Sampler: |
513 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Sampler" ); |
514 | } |
515 | return nullptr; |
516 | } |
517 | |
518 | llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource( |
519 | const HLSLAttributedResourceType::Attributes &attributes, |
520 | llvm::Type *ElementType, llvm::LLVMContext &Ctx) const { |
521 | |
522 | if (ElementType->isVectorTy()) |
523 | ElementType = ElementType->getScalarType(); |
524 | |
525 | assert((ElementType->isIntegerTy() || ElementType->isFloatingPointTy()) && |
526 | "The element type for a SPIR-V resource must be a scalar integer or " |
527 | "floating point type." ); |
528 | |
529 | // These parameters correspond to the operands to the OpTypeImage SPIR-V |
530 | // instruction. See |
531 | // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage. |
532 | SmallVector<unsigned, 6> IntParams(6, 0); |
533 | |
534 | // Dim |
535 | // For now we assume everything is a buffer. |
536 | IntParams[0] = 5; |
537 | |
538 | // Depth |
539 | // HLSL does not indicate if it is a depth texture or not, so we use unknown. |
540 | IntParams[1] = 2; |
541 | |
542 | // Arrayed |
543 | IntParams[2] = 0; |
544 | |
545 | // MS |
546 | IntParams[3] = 0; |
547 | |
548 | // Sampled |
549 | IntParams[4] = |
550 | attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1; |
551 | |
552 | // Image format. |
553 | // Setting to unknown for now. |
554 | IntParams[5] = 0; |
555 | |
556 | return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Image" , Types: {ElementType}, Ints: IntParams); |
557 | } |
558 | |
559 | std::unique_ptr<TargetCodeGenInfo> |
560 | CodeGen::createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM) { |
561 | return std::make_unique<CommonSPIRTargetCodeGenInfo>(args&: CGM.getTypes()); |
562 | } |
563 | |
564 | std::unique_ptr<TargetCodeGenInfo> |
565 | CodeGen::createSPIRVTargetCodeGenInfo(CodeGenModule &CGM) { |
566 | return std::make_unique<SPIRVTargetCodeGenInfo>(args&: CGM.getTypes()); |
567 | } |
568 | |