1 | //===- Hexagon.cpp --------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "ABIInfoImpl.h" |
10 | #include "TargetInfo.h" |
11 | |
12 | using namespace clang; |
13 | using namespace clang::CodeGen; |
14 | |
15 | //===----------------------------------------------------------------------===// |
16 | // Hexagon ABI Implementation |
17 | //===----------------------------------------------------------------------===// |
18 | |
19 | namespace { |
20 | |
21 | class HexagonABIInfo : public DefaultABIInfo { |
22 | public: |
23 | HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
24 | |
25 | private: |
26 | ABIArgInfo classifyReturnType(QualType RetTy) const; |
27 | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
28 | ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const; |
29 | |
30 | void computeInfo(CGFunctionInfo &FI) const override; |
31 | |
32 | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
33 | QualType Ty) const override; |
34 | Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr, |
35 | QualType Ty) const; |
36 | Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr, |
37 | QualType Ty) const; |
38 | Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr, |
39 | QualType Ty) const; |
40 | }; |
41 | |
42 | class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { |
43 | public: |
44 | HexagonTargetCodeGenInfo(CodeGenTypes &CGT) |
45 | : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(args&: CGT)) {} |
46 | |
47 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
48 | return 29; |
49 | } |
50 | |
51 | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
52 | CodeGen::CodeGenModule &GCM) const override { |
53 | if (GV->isDeclaration()) |
54 | return; |
55 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: D); |
56 | if (!FD) |
57 | return; |
58 | } |
59 | }; |
60 | |
61 | } // namespace |
62 | |
63 | void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { |
64 | unsigned RegsLeft = 6; |
65 | if (!getCXXABI().classifyReturnType(FI)) |
66 | FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType()); |
67 | for (auto &I : FI.arguments()) |
68 | I.info = classifyArgumentType(I.type, &RegsLeft); |
69 | } |
70 | |
71 | static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) { |
72 | assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits" |
73 | " through registers" ); |
74 | |
75 | if (*RegsLeft == 0) |
76 | return false; |
77 | |
78 | if (Size <= 32) { |
79 | (*RegsLeft)--; |
80 | return true; |
81 | } |
82 | |
83 | if (2 <= (*RegsLeft & (~1U))) { |
84 | *RegsLeft = (*RegsLeft & (~1U)) - 2; |
85 | return true; |
86 | } |
87 | |
88 | // Next available register was r5 but candidate was greater than 32-bits so it |
89 | // has to go on the stack. However we still consume r5 |
90 | if (*RegsLeft == 1) |
91 | *RegsLeft = 0; |
92 | |
93 | return false; |
94 | } |
95 | |
96 | ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty, |
97 | unsigned *RegsLeft) const { |
98 | if (!isAggregateTypeForABI(T: Ty)) { |
99 | // Treat an enum type as its underlying type. |
100 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
101 | Ty = EnumTy->getDecl()->getIntegerType(); |
102 | |
103 | uint64_t Size = getContext().getTypeSize(T: Ty); |
104 | if (Size <= 64) |
105 | HexagonAdjustRegsLeft(Size, RegsLeft); |
106 | |
107 | if (Size > 64 && Ty->isBitIntType()) |
108 | return getNaturalAlignIndirect(Ty, /*ByVal=*/true); |
109 | |
110 | return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
111 | : ABIArgInfo::getDirect(); |
112 | } |
113 | |
114 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) |
115 | return getNaturalAlignIndirect(Ty, ByVal: RAA == CGCXXABI::RAA_DirectInMemory); |
116 | |
117 | // Ignore empty records. |
118 | if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true)) |
119 | return ABIArgInfo::getIgnore(); |
120 | |
121 | uint64_t Size = getContext().getTypeSize(T: Ty); |
122 | unsigned Align = getContext().getTypeAlign(T: Ty); |
123 | |
124 | if (Size > 64) |
125 | return getNaturalAlignIndirect(Ty, /*ByVal=*/true); |
126 | |
127 | if (HexagonAdjustRegsLeft(Size, RegsLeft)) |
128 | Align = Size <= 32 ? 32 : 64; |
129 | if (Size <= Align) { |
130 | // Pass in the smallest viable integer type. |
131 | Size = llvm::bit_ceil(Value: Size); |
132 | return ABIArgInfo::getDirect(T: llvm::Type::getIntNTy(C&: getVMContext(), N: Size)); |
133 | } |
134 | return DefaultABIInfo::classifyArgumentType(RetTy: Ty); |
135 | } |
136 | |
137 | ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { |
138 | if (RetTy->isVoidType()) |
139 | return ABIArgInfo::getIgnore(); |
140 | |
141 | const TargetInfo &T = CGT.getTarget(); |
142 | uint64_t Size = getContext().getTypeSize(T: RetTy); |
143 | |
144 | if (RetTy->getAs<VectorType>()) { |
145 | // HVX vectors are returned in vector registers or register pairs. |
146 | if (T.hasFeature(Feature: "hvx" )) { |
147 | assert(T.hasFeature("hvx-length64b" ) || T.hasFeature("hvx-length128b" )); |
148 | uint64_t VecSize = T.hasFeature(Feature: "hvx-length64b" ) ? 64*8 : 128*8; |
149 | if (Size == VecSize || Size == 2*VecSize) |
150 | return ABIArgInfo::getDirectInReg(); |
151 | } |
152 | // Large vector types should be returned via memory. |
153 | if (Size > 64) |
154 | return getNaturalAlignIndirect(Ty: RetTy); |
155 | } |
156 | |
157 | if (!isAggregateTypeForABI(T: RetTy)) { |
158 | // Treat an enum type as its underlying type. |
159 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
160 | RetTy = EnumTy->getDecl()->getIntegerType(); |
161 | |
162 | if (Size > 64 && RetTy->isBitIntType()) |
163 | return getNaturalAlignIndirect(Ty: RetTy, /*ByVal=*/false); |
164 | |
165 | return isPromotableIntegerTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy) |
166 | : ABIArgInfo::getDirect(); |
167 | } |
168 | |
169 | if (isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: true)) |
170 | return ABIArgInfo::getIgnore(); |
171 | |
172 | // Aggregates <= 8 bytes are returned in registers, other aggregates |
173 | // are returned indirectly. |
174 | if (Size <= 64) { |
175 | // Return in the smallest viable integer type. |
176 | Size = llvm::bit_ceil(Value: Size); |
177 | return ABIArgInfo::getDirect(T: llvm::Type::getIntNTy(C&: getVMContext(), N: Size)); |
178 | } |
179 | return getNaturalAlignIndirect(Ty: RetTy, /*ByVal=*/true); |
180 | } |
181 | |
182 | Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF, |
183 | Address VAListAddr, |
184 | QualType Ty) const { |
185 | // Load the overflow area pointer. |
186 | Address __overflow_area_pointer_p = |
187 | CGF.Builder.CreateStructGEP(Addr: VAListAddr, Index: 2, Name: "__overflow_area_pointer_p" ); |
188 | llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( |
189 | Addr: __overflow_area_pointer_p, Name: "__overflow_area_pointer" ); |
190 | |
191 | uint64_t Align = CGF.getContext().getTypeAlign(T: Ty) / 8; |
192 | if (Align > 4) { |
193 | // Alignment should be a power of 2. |
194 | assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!" ); |
195 | |
196 | // overflow_arg_area = (overflow_arg_area + align - 1) & -align; |
197 | llvm::Value *Offset = llvm::ConstantInt::get(Ty: CGF.Int64Ty, V: Align - 1); |
198 | |
199 | // Add offset to the current pointer to access the argument. |
200 | __overflow_area_pointer = |
201 | CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: __overflow_area_pointer, IdxList: Offset); |
202 | llvm::Value *AsInt = |
203 | CGF.Builder.CreatePtrToInt(V: __overflow_area_pointer, DestTy: CGF.Int32Ty); |
204 | |
205 | // Create a mask which should be "AND"ed |
206 | // with (overflow_arg_area + align - 1) |
207 | llvm::Value *Mask = llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: -(int)Align); |
208 | __overflow_area_pointer = CGF.Builder.CreateIntToPtr( |
209 | V: CGF.Builder.CreateAnd(LHS: AsInt, RHS: Mask), DestTy: __overflow_area_pointer->getType(), |
210 | Name: "__overflow_area_pointer.align" ); |
211 | } |
212 | |
213 | // Get the type of the argument from memory and bitcast |
214 | // overflow area pointer to the argument type. |
215 | llvm::Type *PTy = CGF.ConvertTypeForMem(T: Ty); |
216 | Address AddrTyped = |
217 | Address(__overflow_area_pointer, PTy, CharUnits::fromQuantity(Quantity: Align)); |
218 | |
219 | // Round up to the minimum stack alignment for varargs which is 4 bytes. |
220 | uint64_t Offset = llvm::alignTo(Value: CGF.getContext().getTypeSize(T: Ty) / 8, Align: 4); |
221 | |
222 | __overflow_area_pointer = CGF.Builder.CreateGEP( |
223 | Ty: CGF.Int8Ty, Ptr: __overflow_area_pointer, |
224 | IdxList: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: Offset), |
225 | Name: "__overflow_area_pointer.next" ); |
226 | CGF.Builder.CreateStore(Val: __overflow_area_pointer, Addr: __overflow_area_pointer_p); |
227 | |
228 | return AddrTyped; |
229 | } |
230 | |
231 | Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF, |
232 | Address VAListAddr, |
233 | QualType Ty) const { |
234 | // FIXME: Need to handle alignment |
235 | llvm::Type *BP = CGF.Int8PtrTy; |
236 | CGBuilderTy &Builder = CGF.Builder; |
237 | Address VAListAddrAsBPP = VAListAddr.withElementType(ElemTy: BP); |
238 | llvm::Value *Addr = Builder.CreateLoad(Addr: VAListAddrAsBPP, Name: "ap.cur" ); |
239 | // Handle address alignment for type alignment > 32 bits |
240 | uint64_t TyAlign = CGF.getContext().getTypeAlign(T: Ty) / 8; |
241 | if (TyAlign > 4) { |
242 | assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!" ); |
243 | llvm::Value *AddrAsInt = Builder.CreatePtrToInt(V: Addr, DestTy: CGF.Int32Ty); |
244 | AddrAsInt = Builder.CreateAdd(LHS: AddrAsInt, RHS: Builder.getInt32(C: TyAlign - 1)); |
245 | AddrAsInt = Builder.CreateAnd(LHS: AddrAsInt, RHS: Builder.getInt32(C: ~(TyAlign - 1))); |
246 | Addr = Builder.CreateIntToPtr(V: AddrAsInt, DestTy: BP); |
247 | } |
248 | Address AddrTyped = |
249 | Address(Addr, CGF.ConvertType(T: Ty), CharUnits::fromQuantity(Quantity: TyAlign)); |
250 | |
251 | uint64_t Offset = llvm::alignTo(Value: CGF.getContext().getTypeSize(T: Ty) / 8, Align: 4); |
252 | llvm::Value *NextAddr = Builder.CreateGEP( |
253 | Ty: CGF.Int8Ty, Ptr: Addr, IdxList: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: Offset), Name: "ap.next" ); |
254 | Builder.CreateStore(Val: NextAddr, Addr: VAListAddrAsBPP); |
255 | |
256 | return AddrTyped; |
257 | } |
258 | |
259 | Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, |
260 | Address VAListAddr, |
261 | QualType Ty) const { |
262 | int ArgSize = CGF.getContext().getTypeSize(T: Ty) / 8; |
263 | |
264 | if (ArgSize > 8) |
265 | return EmitVAArgFromMemory(CGF, VAListAddr, Ty); |
266 | |
267 | // Here we have check if the argument is in register area or |
268 | // in overflow area. |
269 | // If the saved register area pointer + argsize rounded up to alignment > |
270 | // saved register area end pointer, argument is in overflow area. |
271 | unsigned RegsLeft = 6; |
272 | Ty = CGF.getContext().getCanonicalType(T: Ty); |
273 | (void)classifyArgumentType(Ty, RegsLeft: &RegsLeft); |
274 | |
275 | llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock(name: "vaarg.maybe_reg" ); |
276 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock(name: "vaarg.in_reg" ); |
277 | llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock(name: "vaarg.on_stack" ); |
278 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "vaarg.end" ); |
279 | |
280 | // Get rounded size of the argument.GCC does not allow vararg of |
281 | // size < 4 bytes. We follow the same logic here. |
282 | ArgSize = (CGF.getContext().getTypeSize(T: Ty) <= 32) ? 4 : 8; |
283 | int ArgAlign = (CGF.getContext().getTypeSize(T: Ty) <= 32) ? 4 : 8; |
284 | |
285 | // Argument may be in saved register area |
286 | CGF.EmitBlock(BB: MaybeRegBlock); |
287 | |
288 | // Load the current saved register area pointer. |
289 | Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP( |
290 | Addr: VAListAddr, Index: 0, Name: "__current_saved_reg_area_pointer_p" ); |
291 | llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad( |
292 | Addr: __current_saved_reg_area_pointer_p, Name: "__current_saved_reg_area_pointer" ); |
293 | |
294 | // Load the saved register area end pointer. |
295 | Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP( |
296 | Addr: VAListAddr, Index: 1, Name: "__saved_reg_area_end_pointer_p" ); |
297 | llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad( |
298 | Addr: __saved_reg_area_end_pointer_p, Name: "__saved_reg_area_end_pointer" ); |
299 | |
300 | // If the size of argument is > 4 bytes, check if the stack |
301 | // location is aligned to 8 bytes |
302 | if (ArgAlign > 4) { |
303 | |
304 | llvm::Value *__current_saved_reg_area_pointer_int = |
305 | CGF.Builder.CreatePtrToInt(V: __current_saved_reg_area_pointer, |
306 | DestTy: CGF.Int32Ty); |
307 | |
308 | __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd( |
309 | LHS: __current_saved_reg_area_pointer_int, |
310 | RHS: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: (ArgAlign - 1)), |
311 | Name: "align_current_saved_reg_area_pointer" ); |
312 | |
313 | __current_saved_reg_area_pointer_int = |
314 | CGF.Builder.CreateAnd(LHS: __current_saved_reg_area_pointer_int, |
315 | RHS: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: -ArgAlign), |
316 | Name: "align_current_saved_reg_area_pointer" ); |
317 | |
318 | __current_saved_reg_area_pointer = |
319 | CGF.Builder.CreateIntToPtr(V: __current_saved_reg_area_pointer_int, |
320 | DestTy: __current_saved_reg_area_pointer->getType(), |
321 | Name: "align_current_saved_reg_area_pointer" ); |
322 | } |
323 | |
324 | llvm::Value *__new_saved_reg_area_pointer = |
325 | CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: __current_saved_reg_area_pointer, |
326 | IdxList: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: ArgSize), |
327 | Name: "__new_saved_reg_area_pointer" ); |
328 | |
329 | llvm::Value *UsingStack = nullptr; |
330 | UsingStack = CGF.Builder.CreateICmpSGT(LHS: __new_saved_reg_area_pointer, |
331 | RHS: __saved_reg_area_end_pointer); |
332 | |
333 | CGF.Builder.CreateCondBr(Cond: UsingStack, True: OnStackBlock, False: InRegBlock); |
334 | |
335 | // Argument in saved register area |
336 | // Implement the block where argument is in register saved area |
337 | CGF.EmitBlock(BB: InRegBlock); |
338 | |
339 | llvm::Type *PTy = CGF.ConvertType(T: Ty); |
340 | llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( |
341 | V: __current_saved_reg_area_pointer, DestTy: llvm::PointerType::getUnqual(ElementType: PTy)); |
342 | |
343 | CGF.Builder.CreateStore(Val: __new_saved_reg_area_pointer, |
344 | Addr: __current_saved_reg_area_pointer_p); |
345 | |
346 | CGF.EmitBranch(Block: ContBlock); |
347 | |
348 | // Argument in overflow area |
349 | // Implement the block where the argument is in overflow area. |
350 | CGF.EmitBlock(BB: OnStackBlock); |
351 | |
352 | // Load the overflow area pointer |
353 | Address __overflow_area_pointer_p = |
354 | CGF.Builder.CreateStructGEP(Addr: VAListAddr, Index: 2, Name: "__overflow_area_pointer_p" ); |
355 | llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( |
356 | Addr: __overflow_area_pointer_p, Name: "__overflow_area_pointer" ); |
357 | |
358 | // Align the overflow area pointer according to the alignment of the argument |
359 | if (ArgAlign > 4) { |
360 | llvm::Value *__overflow_area_pointer_int = |
361 | CGF.Builder.CreatePtrToInt(V: __overflow_area_pointer, DestTy: CGF.Int32Ty); |
362 | |
363 | __overflow_area_pointer_int = |
364 | CGF.Builder.CreateAdd(LHS: __overflow_area_pointer_int, |
365 | RHS: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: ArgAlign - 1), |
366 | Name: "align_overflow_area_pointer" ); |
367 | |
368 | __overflow_area_pointer_int = |
369 | CGF.Builder.CreateAnd(LHS: __overflow_area_pointer_int, |
370 | RHS: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: -ArgAlign), |
371 | Name: "align_overflow_area_pointer" ); |
372 | |
373 | __overflow_area_pointer = CGF.Builder.CreateIntToPtr( |
374 | V: __overflow_area_pointer_int, DestTy: __overflow_area_pointer->getType(), |
375 | Name: "align_overflow_area_pointer" ); |
376 | } |
377 | |
378 | // Get the pointer for next argument in overflow area and store it |
379 | // to overflow area pointer. |
380 | llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( |
381 | Ty: CGF.Int8Ty, Ptr: __overflow_area_pointer, |
382 | IdxList: llvm::ConstantInt::get(Ty: CGF.Int32Ty, V: ArgSize), |
383 | Name: "__overflow_area_pointer.next" ); |
384 | |
385 | CGF.Builder.CreateStore(Val: __new_overflow_area_pointer, |
386 | Addr: __overflow_area_pointer_p); |
387 | |
388 | CGF.Builder.CreateStore(Val: __new_overflow_area_pointer, |
389 | Addr: __current_saved_reg_area_pointer_p); |
390 | |
391 | // Bitcast the overflow area pointer to the type of argument. |
392 | llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(T: Ty); |
393 | llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( |
394 | V: __overflow_area_pointer, DestTy: llvm::PointerType::getUnqual(ElementType: OverflowPTy)); |
395 | |
396 | CGF.EmitBranch(Block: ContBlock); |
397 | |
398 | // Get the correct pointer to load the variable argument |
399 | // Implement the ContBlock |
400 | CGF.EmitBlock(BB: ContBlock); |
401 | |
402 | llvm::Type *MemTy = CGF.ConvertTypeForMem(T: Ty); |
403 | llvm::Type *MemPTy = llvm::PointerType::getUnqual(ElementType: MemTy); |
404 | llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(Ty: MemPTy, NumReservedValues: 2, Name: "vaarg.addr" ); |
405 | ArgAddr->addIncoming(V: __saved_reg_area_p, BB: InRegBlock); |
406 | ArgAddr->addIncoming(V: __overflow_area_p, BB: OnStackBlock); |
407 | |
408 | return Address(ArgAddr, MemTy, CharUnits::fromQuantity(Quantity: ArgAlign)); |
409 | } |
410 | |
411 | Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
412 | QualType Ty) const { |
413 | |
414 | if (getTarget().getTriple().isMusl()) |
415 | return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty); |
416 | |
417 | return EmitVAArgForHexagon(CGF, VAListAddr, Ty); |
418 | } |
419 | |
420 | std::unique_ptr<TargetCodeGenInfo> |
421 | CodeGen::createHexagonTargetCodeGenInfo(CodeGenModule &CGM) { |
422 | return std::make_unique<HexagonTargetCodeGenInfo>(args&: CGM.getTypes()); |
423 | } |
424 | |