1 | //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Objective-C code as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGDebugInfo.h" |
14 | #include "CGObjCRuntime.h" |
15 | #include "CodeGenFunction.h" |
16 | #include "CodeGenModule.h" |
17 | #include "ConstantEmitter.h" |
18 | #include "TargetInfo.h" |
19 | #include "clang/AST/ASTContext.h" |
20 | #include "clang/AST/Attr.h" |
21 | #include "clang/AST/DeclObjC.h" |
22 | #include "clang/AST/StmtObjC.h" |
23 | #include "clang/Basic/Diagnostic.h" |
24 | #include "clang/CodeGen/CGFunctionInfo.h" |
25 | #include "clang/CodeGen/CodeGenABITypes.h" |
26 | #include "llvm/ADT/STLExtras.h" |
27 | #include "llvm/Analysis/ObjCARCUtil.h" |
28 | #include "llvm/BinaryFormat/MachO.h" |
29 | #include "llvm/IR/Constants.h" |
30 | #include "llvm/IR/DataLayout.h" |
31 | #include "llvm/IR/InlineAsm.h" |
32 | #include <optional> |
33 | using namespace clang; |
34 | using namespace CodeGen; |
35 | |
36 | typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; |
37 | static TryEmitResult |
38 | tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); |
39 | static RValue AdjustObjCObjectType(CodeGenFunction &CGF, |
40 | QualType ET, |
41 | RValue Result); |
42 | |
43 | /// Given the address of a variable of pointer type, find the correct |
44 | /// null to store into it. |
45 | static llvm::Constant *getNullForVariable(Address addr) { |
46 | llvm::Type *type = addr.getElementType(); |
47 | return llvm::ConstantPointerNull::get(T: cast<llvm::PointerType>(Val: type)); |
48 | } |
49 | |
50 | /// Emits an instance of NSConstantString representing the object. |
51 | llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) |
52 | { |
53 | llvm::Constant *C = |
54 | CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); |
55 | return C; |
56 | } |
57 | |
58 | /// EmitObjCBoxedExpr - This routine generates code to call |
59 | /// the appropriate expression boxing method. This will either be |
60 | /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], |
61 | /// or [NSValue valueWithBytes:objCType:]. |
62 | /// |
63 | llvm::Value * |
64 | CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { |
65 | // Generate the correct selector for this literal's concrete type. |
66 | // Get the method. |
67 | const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); |
68 | const Expr *SubExpr = E->getSubExpr(); |
69 | |
70 | if (E->isExpressibleAsConstantInitializer()) { |
71 | ConstantEmitter ConstEmitter(CGM); |
72 | return ConstEmitter.tryEmitAbstract(E, E->getType()); |
73 | } |
74 | |
75 | assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method" ); |
76 | Selector Sel = BoxingMethod->getSelector(); |
77 | |
78 | // Generate a reference to the class pointer, which will be the receiver. |
79 | // Assumes that the method was introduced in the class that should be |
80 | // messaged (avoids pulling it out of the result type). |
81 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
82 | const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); |
83 | llvm::Value *Receiver = Runtime.GetClass(CGF&: *this, OID: ClassDecl); |
84 | |
85 | CallArgList Args; |
86 | const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); |
87 | QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); |
88 | |
89 | // ObjCBoxedExpr supports boxing of structs and unions |
90 | // via [NSValue valueWithBytes:objCType:] |
91 | const QualType ValueType(SubExpr->getType().getCanonicalType()); |
92 | if (ValueType->isObjCBoxableRecordType()) { |
93 | // Emit CodeGen for first parameter |
94 | // and cast value to correct type |
95 | Address Temporary = CreateMemTemp(T: SubExpr->getType()); |
96 | EmitAnyExprToMem(E: SubExpr, Location: Temporary, Quals: Qualifiers(), /*isInit*/ IsInitializer: true); |
97 | llvm::Value *BitCast = Builder.CreateBitCast( |
98 | V: Temporary.emitRawPointer(CGF&: *this), DestTy: ConvertType(T: ArgQT)); |
99 | Args.add(rvalue: RValue::get(V: BitCast), type: ArgQT); |
100 | |
101 | // Create char array to store type encoding |
102 | std::string Str; |
103 | getContext().getObjCEncodingForType(T: ValueType, S&: Str); |
104 | llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); |
105 | |
106 | // Cast type encoding to correct type |
107 | const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; |
108 | QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); |
109 | llvm::Value *Cast = Builder.CreateBitCast(V: GV, DestTy: ConvertType(T: EncodingQT)); |
110 | |
111 | Args.add(rvalue: RValue::get(V: Cast), type: EncodingQT); |
112 | } else { |
113 | Args.add(rvalue: EmitAnyExpr(E: SubExpr), type: ArgQT); |
114 | } |
115 | |
116 | RValue result = Runtime.GenerateMessageSend( |
117 | CGF&: *this, ReturnSlot: ReturnValueSlot(), ResultType: BoxingMethod->getReturnType(), Sel, Receiver, |
118 | CallArgs: Args, Class: ClassDecl, Method: BoxingMethod); |
119 | return Builder.CreateBitCast(V: result.getScalarVal(), |
120 | DestTy: ConvertType(E->getType())); |
121 | } |
122 | |
123 | llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, |
124 | const ObjCMethodDecl *MethodWithObjects) { |
125 | ASTContext &Context = CGM.getContext(); |
126 | const ObjCDictionaryLiteral *DLE = nullptr; |
127 | const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(Val: E); |
128 | if (!ALE) |
129 | DLE = cast<ObjCDictionaryLiteral>(Val: E); |
130 | |
131 | // Optimize empty collections by referencing constants, when available. |
132 | uint64_t NumElements = |
133 | ALE ? ALE->getNumElements() : DLE->getNumElements(); |
134 | if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { |
135 | StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__" ; |
136 | QualType IdTy(CGM.getContext().getObjCIdType()); |
137 | llvm::Constant *Constant = |
138 | CGM.CreateRuntimeVariable(Ty: ConvertType(T: IdTy), Name: ConstantName); |
139 | LValue LV = MakeNaturalAlignAddrLValue(V: Constant, T: IdTy); |
140 | llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); |
141 | cast<llvm::LoadInst>(Val: Ptr)->setMetadata( |
142 | KindID: llvm::LLVMContext::MD_invariant_load, |
143 | Node: llvm::MDNode::get(Context&: getLLVMContext(), MDs: std::nullopt)); |
144 | return Builder.CreateBitCast(V: Ptr, DestTy: ConvertType(T: E->getType())); |
145 | } |
146 | |
147 | // Compute the type of the array we're initializing. |
148 | llvm::APInt APNumElements(Context.getTypeSize(T: Context.getSizeType()), |
149 | NumElements); |
150 | QualType ElementType = Context.getObjCIdType().withConst(); |
151 | QualType ElementArrayType = Context.getConstantArrayType( |
152 | EltTy: ElementType, ArySize: APNumElements, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, |
153 | /*IndexTypeQuals=*/0); |
154 | |
155 | // Allocate the temporary array(s). |
156 | Address Objects = CreateMemTemp(T: ElementArrayType, Name: "objects" ); |
157 | Address Keys = Address::invalid(); |
158 | if (DLE) |
159 | Keys = CreateMemTemp(T: ElementArrayType, Name: "keys" ); |
160 | |
161 | // In ARC, we may need to do extra work to keep all the keys and |
162 | // values alive until after the call. |
163 | SmallVector<llvm::Value *, 16> NeededObjects; |
164 | bool TrackNeededObjects = |
165 | (getLangOpts().ObjCAutoRefCount && |
166 | CGM.getCodeGenOpts().OptimizationLevel != 0); |
167 | |
168 | // Perform the actual initialialization of the array(s). |
169 | for (uint64_t i = 0; i < NumElements; i++) { |
170 | if (ALE) { |
171 | // Emit the element and store it to the appropriate array slot. |
172 | const Expr *Rhs = ALE->getElement(Index: i); |
173 | LValue LV = MakeAddrLValue(Addr: Builder.CreateConstArrayGEP(Addr: Objects, Index: i), |
174 | T: ElementType, Source: AlignmentSource::Decl); |
175 | |
176 | llvm::Value *value = EmitScalarExpr(E: Rhs); |
177 | EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV, isInit: true); |
178 | if (TrackNeededObjects) { |
179 | NeededObjects.push_back(Elt: value); |
180 | } |
181 | } else { |
182 | // Emit the key and store it to the appropriate array slot. |
183 | const Expr *Key = DLE->getKeyValueElement(Index: i).Key; |
184 | LValue KeyLV = MakeAddrLValue(Addr: Builder.CreateConstArrayGEP(Addr: Keys, Index: i), |
185 | T: ElementType, Source: AlignmentSource::Decl); |
186 | llvm::Value *keyValue = EmitScalarExpr(E: Key); |
187 | EmitStoreThroughLValue(Src: RValue::get(V: keyValue), Dst: KeyLV, /*isInit=*/true); |
188 | |
189 | // Emit the value and store it to the appropriate array slot. |
190 | const Expr *Value = DLE->getKeyValueElement(Index: i).Value; |
191 | LValue ValueLV = MakeAddrLValue(Addr: Builder.CreateConstArrayGEP(Addr: Objects, Index: i), |
192 | T: ElementType, Source: AlignmentSource::Decl); |
193 | llvm::Value *valueValue = EmitScalarExpr(E: Value); |
194 | EmitStoreThroughLValue(Src: RValue::get(V: valueValue), Dst: ValueLV, /*isInit=*/true); |
195 | if (TrackNeededObjects) { |
196 | NeededObjects.push_back(Elt: keyValue); |
197 | NeededObjects.push_back(Elt: valueValue); |
198 | } |
199 | } |
200 | } |
201 | |
202 | // Generate the argument list. |
203 | CallArgList Args; |
204 | ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); |
205 | const ParmVarDecl *argDecl = *PI++; |
206 | QualType ArgQT = argDecl->getType().getUnqualifiedType(); |
207 | Args.add(rvalue: RValue::get(Addr: Objects, CGF&: *this), type: ArgQT); |
208 | if (DLE) { |
209 | argDecl = *PI++; |
210 | ArgQT = argDecl->getType().getUnqualifiedType(); |
211 | Args.add(rvalue: RValue::get(Addr: Keys, CGF&: *this), type: ArgQT); |
212 | } |
213 | argDecl = *PI; |
214 | ArgQT = argDecl->getType().getUnqualifiedType(); |
215 | llvm::Value *Count = |
216 | llvm::ConstantInt::get(Ty: CGM.getTypes().ConvertType(T: ArgQT), V: NumElements); |
217 | Args.add(rvalue: RValue::get(V: Count), type: ArgQT); |
218 | |
219 | // Generate a reference to the class pointer, which will be the receiver. |
220 | Selector Sel = MethodWithObjects->getSelector(); |
221 | QualType ResultType = E->getType(); |
222 | const ObjCObjectPointerType *InterfacePointerType |
223 | = ResultType->getAsObjCInterfacePointerType(); |
224 | assert(InterfacePointerType && "Unexpected InterfacePointerType - null" ); |
225 | ObjCInterfaceDecl *Class |
226 | = InterfacePointerType->getObjectType()->getInterface(); |
227 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
228 | llvm::Value *Receiver = Runtime.GetClass(CGF&: *this, OID: Class); |
229 | |
230 | // Generate the message send. |
231 | RValue result = Runtime.GenerateMessageSend( |
232 | CGF&: *this, ReturnSlot: ReturnValueSlot(), ResultType: MethodWithObjects->getReturnType(), Sel, |
233 | Receiver, CallArgs: Args, Class, Method: MethodWithObjects); |
234 | |
235 | // The above message send needs these objects, but in ARC they are |
236 | // passed in a buffer that is essentially __unsafe_unretained. |
237 | // Therefore we must prevent the optimizer from releasing them until |
238 | // after the call. |
239 | if (TrackNeededObjects) { |
240 | EmitARCIntrinsicUse(values: NeededObjects); |
241 | } |
242 | |
243 | return Builder.CreateBitCast(V: result.getScalarVal(), |
244 | DestTy: ConvertType(T: E->getType())); |
245 | } |
246 | |
247 | llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { |
248 | return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); |
249 | } |
250 | |
251 | llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( |
252 | const ObjCDictionaryLiteral *E) { |
253 | return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); |
254 | } |
255 | |
256 | /// Emit a selector. |
257 | llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { |
258 | // Untyped selector. |
259 | // Note that this implementation allows for non-constant strings to be passed |
260 | // as arguments to @selector(). Currently, the only thing preventing this |
261 | // behaviour is the type checking in the front end. |
262 | return CGM.getObjCRuntime().GetSelector(CGF&: *this, Sel: E->getSelector()); |
263 | } |
264 | |
265 | llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { |
266 | // FIXME: This should pass the Decl not the name. |
267 | return CGM.getObjCRuntime().GenerateProtocolRef(CGF&: *this, OPD: E->getProtocol()); |
268 | } |
269 | |
270 | /// Adjust the type of an Objective-C object that doesn't match up due |
271 | /// to type erasure at various points, e.g., related result types or the use |
272 | /// of parameterized classes. |
273 | static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, |
274 | RValue Result) { |
275 | if (!ExpT->isObjCRetainableType()) |
276 | return Result; |
277 | |
278 | // If the converted types are the same, we're done. |
279 | llvm::Type *ExpLLVMTy = CGF.ConvertType(T: ExpT); |
280 | if (ExpLLVMTy == Result.getScalarVal()->getType()) |
281 | return Result; |
282 | |
283 | // We have applied a substitution. Cast the rvalue appropriately. |
284 | return RValue::get(V: CGF.Builder.CreateBitCast(V: Result.getScalarVal(), |
285 | DestTy: ExpLLVMTy)); |
286 | } |
287 | |
288 | /// Decide whether to extend the lifetime of the receiver of a |
289 | /// returns-inner-pointer message. |
290 | static bool |
291 | shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { |
292 | switch (message->getReceiverKind()) { |
293 | |
294 | // For a normal instance message, we should extend unless the |
295 | // receiver is loaded from a variable with precise lifetime. |
296 | case ObjCMessageExpr::Instance: { |
297 | const Expr *receiver = message->getInstanceReceiver(); |
298 | |
299 | // Look through OVEs. |
300 | if (auto opaque = dyn_cast<OpaqueValueExpr>(Val: receiver)) { |
301 | if (opaque->getSourceExpr()) |
302 | receiver = opaque->getSourceExpr()->IgnoreParens(); |
303 | } |
304 | |
305 | const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(Val: receiver); |
306 | if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; |
307 | receiver = ice->getSubExpr()->IgnoreParens(); |
308 | |
309 | // Look through OVEs. |
310 | if (auto opaque = dyn_cast<OpaqueValueExpr>(Val: receiver)) { |
311 | if (opaque->getSourceExpr()) |
312 | receiver = opaque->getSourceExpr()->IgnoreParens(); |
313 | } |
314 | |
315 | // Only __strong variables. |
316 | if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) |
317 | return true; |
318 | |
319 | // All ivars and fields have precise lifetime. |
320 | if (isa<MemberExpr>(Val: receiver) || isa<ObjCIvarRefExpr>(Val: receiver)) |
321 | return false; |
322 | |
323 | // Otherwise, check for variables. |
324 | const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); |
325 | if (!declRef) return true; |
326 | const VarDecl *var = dyn_cast<VarDecl>(Val: declRef->getDecl()); |
327 | if (!var) return true; |
328 | |
329 | // All variables have precise lifetime except local variables with |
330 | // automatic storage duration that aren't specially marked. |
331 | return (var->hasLocalStorage() && |
332 | !var->hasAttr<ObjCPreciseLifetimeAttr>()); |
333 | } |
334 | |
335 | case ObjCMessageExpr::Class: |
336 | case ObjCMessageExpr::SuperClass: |
337 | // It's never necessary for class objects. |
338 | return false; |
339 | |
340 | case ObjCMessageExpr::SuperInstance: |
341 | // We generally assume that 'self' lives throughout a method call. |
342 | return false; |
343 | } |
344 | |
345 | llvm_unreachable("invalid receiver kind" ); |
346 | } |
347 | |
348 | /// Given an expression of ObjC pointer type, check whether it was |
349 | /// immediately loaded from an ARC __weak l-value. |
350 | static const Expr *findWeakLValue(const Expr *E) { |
351 | assert(E->getType()->isObjCRetainableType()); |
352 | E = E->IgnoreParens(); |
353 | if (auto CE = dyn_cast<CastExpr>(Val: E)) { |
354 | if (CE->getCastKind() == CK_LValueToRValue) { |
355 | if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) |
356 | return CE->getSubExpr(); |
357 | } |
358 | } |
359 | |
360 | return nullptr; |
361 | } |
362 | |
363 | /// The ObjC runtime may provide entrypoints that are likely to be faster |
364 | /// than an ordinary message send of the appropriate selector. |
365 | /// |
366 | /// The entrypoints are guaranteed to be equivalent to just sending the |
367 | /// corresponding message. If the entrypoint is implemented naively as just a |
368 | /// message send, using it is a trade-off: it sacrifices a few cycles of |
369 | /// overhead to save a small amount of code. However, it's possible for |
370 | /// runtimes to detect and special-case classes that use "standard" |
371 | /// behavior; if that's dynamically a large proportion of all objects, using |
372 | /// the entrypoint will also be faster than using a message send. |
373 | /// |
374 | /// If the runtime does support a required entrypoint, then this method will |
375 | /// generate a call and return the resulting value. Otherwise it will return |
376 | /// std::nullopt and the caller can generate a msgSend instead. |
377 | static std::optional<llvm::Value *> tryGenerateSpecializedMessageSend( |
378 | CodeGenFunction &CGF, QualType ResultType, llvm::Value *Receiver, |
379 | const CallArgList &Args, Selector Sel, const ObjCMethodDecl *method, |
380 | bool isClassMessage) { |
381 | auto &CGM = CGF.CGM; |
382 | if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) |
383 | return std::nullopt; |
384 | |
385 | auto &Runtime = CGM.getLangOpts().ObjCRuntime; |
386 | switch (Sel.getMethodFamily()) { |
387 | case OMF_alloc: |
388 | if (isClassMessage && |
389 | Runtime.shouldUseRuntimeFunctionsForAlloc() && |
390 | ResultType->isObjCObjectPointerType()) { |
391 | // [Foo alloc] -> objc_alloc(Foo) or |
392 | // [self alloc] -> objc_alloc(self) |
393 | if (Sel.isUnarySelector() && Sel.getNameForSlot(argIndex: 0) == "alloc" ) |
394 | return CGF.EmitObjCAlloc(value: Receiver, returnType: CGF.ConvertType(T: ResultType)); |
395 | // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or |
396 | // [self allocWithZone:nil] -> objc_allocWithZone(self) |
397 | if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 && |
398 | Args.size() == 1 && Args.front().getType()->isPointerType() && |
399 | Sel.getNameForSlot(argIndex: 0) == "allocWithZone" ) { |
400 | const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); |
401 | if (isa<llvm::ConstantPointerNull>(Val: arg)) |
402 | return CGF.EmitObjCAllocWithZone(value: Receiver, |
403 | returnType: CGF.ConvertType(T: ResultType)); |
404 | return std::nullopt; |
405 | } |
406 | } |
407 | break; |
408 | |
409 | case OMF_autorelease: |
410 | if (ResultType->isObjCObjectPointerType() && |
411 | CGM.getLangOpts().getGC() == LangOptions::NonGC && |
412 | Runtime.shouldUseARCFunctionsForRetainRelease()) |
413 | return CGF.EmitObjCAutorelease(value: Receiver, returnType: CGF.ConvertType(T: ResultType)); |
414 | break; |
415 | |
416 | case OMF_retain: |
417 | if (ResultType->isObjCObjectPointerType() && |
418 | CGM.getLangOpts().getGC() == LangOptions::NonGC && |
419 | Runtime.shouldUseARCFunctionsForRetainRelease()) |
420 | return CGF.EmitObjCRetainNonBlock(value: Receiver, returnType: CGF.ConvertType(T: ResultType)); |
421 | break; |
422 | |
423 | case OMF_release: |
424 | if (ResultType->isVoidType() && |
425 | CGM.getLangOpts().getGC() == LangOptions::NonGC && |
426 | Runtime.shouldUseARCFunctionsForRetainRelease()) { |
427 | CGF.EmitObjCRelease(value: Receiver, precise: ARCPreciseLifetime); |
428 | return nullptr; |
429 | } |
430 | break; |
431 | |
432 | default: |
433 | break; |
434 | } |
435 | return std::nullopt; |
436 | } |
437 | |
438 | CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend( |
439 | CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, |
440 | Selector Sel, llvm::Value *Receiver, const CallArgList &Args, |
441 | const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method, |
442 | bool isClassMessage) { |
443 | if (std::optional<llvm::Value *> SpecializedResult = |
444 | tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args, |
445 | Sel, method: Method, isClassMessage)) { |
446 | return RValue::get(V: *SpecializedResult); |
447 | } |
448 | return GenerateMessageSend(CGF, ReturnSlot: Return, ResultType, Sel, Receiver, CallArgs: Args, Class: OID, |
449 | Method); |
450 | } |
451 | |
452 | static void AppendFirstImpliedRuntimeProtocols( |
453 | const ObjCProtocolDecl *PD, |
454 | llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) { |
455 | if (!PD->isNonRuntimeProtocol()) { |
456 | const auto *Can = PD->getCanonicalDecl(); |
457 | PDs.insert(Entry: Can); |
458 | return; |
459 | } |
460 | |
461 | for (const auto *ParentPD : PD->protocols()) |
462 | AppendFirstImpliedRuntimeProtocols(PD: ParentPD, PDs); |
463 | } |
464 | |
465 | std::vector<const ObjCProtocolDecl *> |
466 | CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin, |
467 | ObjCProtocolDecl::protocol_iterator end) { |
468 | std::vector<const ObjCProtocolDecl *> RuntimePds; |
469 | llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs; |
470 | |
471 | for (; begin != end; ++begin) { |
472 | const auto *It = *begin; |
473 | const auto *Can = It->getCanonicalDecl(); |
474 | if (Can->isNonRuntimeProtocol()) |
475 | NonRuntimePDs.insert(V: Can); |
476 | else |
477 | RuntimePds.push_back(x: Can); |
478 | } |
479 | |
480 | // If there are no non-runtime protocols then we can just stop now. |
481 | if (NonRuntimePDs.empty()) |
482 | return RuntimePds; |
483 | |
484 | // Else we have to search through the non-runtime protocol's inheritancy |
485 | // hierarchy DAG stopping whenever a branch either finds a runtime protocol or |
486 | // a non-runtime protocol without any parents. These are the "first-implied" |
487 | // protocols from a non-runtime protocol. |
488 | llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos; |
489 | for (const auto *PD : NonRuntimePDs) |
490 | AppendFirstImpliedRuntimeProtocols(PD, PDs&: FirstImpliedProtos); |
491 | |
492 | // Walk the Runtime list to get all protocols implied via the inclusion of |
493 | // this protocol, e.g. all protocols it inherits from including itself. |
494 | llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols; |
495 | for (const auto *PD : RuntimePds) { |
496 | const auto *Can = PD->getCanonicalDecl(); |
497 | AllImpliedProtocols.insert(V: Can); |
498 | Can->getImpliedProtocols(IPs&: AllImpliedProtocols); |
499 | } |
500 | |
501 | // Similar to above, walk the list of first-implied protocols to find the set |
502 | // all the protocols implied excluding the listed protocols themselves since |
503 | // they are not yet a part of the `RuntimePds` list. |
504 | for (const auto *PD : FirstImpliedProtos) { |
505 | PD->getImpliedProtocols(IPs&: AllImpliedProtocols); |
506 | } |
507 | |
508 | // From the first-implied list we have to finish building the final protocol |
509 | // list. If a protocol in the first-implied list was already implied via some |
510 | // inheritance path through some other protocols then it would be redundant to |
511 | // add it here and so we skip over it. |
512 | for (const auto *PD : FirstImpliedProtos) { |
513 | if (!AllImpliedProtocols.contains(V: PD)) { |
514 | RuntimePds.push_back(x: PD); |
515 | } |
516 | } |
517 | |
518 | return RuntimePds; |
519 | } |
520 | |
521 | /// Instead of '[[MyClass alloc] init]', try to generate |
522 | /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the |
523 | /// caller side, as well as the optimized objc_alloc. |
524 | static std::optional<llvm::Value *> |
525 | tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) { |
526 | auto &Runtime = CGF.getLangOpts().ObjCRuntime; |
527 | if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit()) |
528 | return std::nullopt; |
529 | |
530 | // Match the exact pattern '[[MyClass alloc] init]'. |
531 | Selector Sel = OME->getSelector(); |
532 | if (OME->getReceiverKind() != ObjCMessageExpr::Instance || |
533 | !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() || |
534 | Sel.getNameForSlot(argIndex: 0) != "init" ) |
535 | return std::nullopt; |
536 | |
537 | // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' |
538 | // with 'cls' a Class. |
539 | auto *SubOME = |
540 | dyn_cast<ObjCMessageExpr>(Val: OME->getInstanceReceiver()->IgnoreParenCasts()); |
541 | if (!SubOME) |
542 | return std::nullopt; |
543 | Selector SubSel = SubOME->getSelector(); |
544 | |
545 | if (!SubOME->getType()->isObjCObjectPointerType() || |
546 | !SubSel.isUnarySelector() || SubSel.getNameForSlot(argIndex: 0) != "alloc" ) |
547 | return std::nullopt; |
548 | |
549 | llvm::Value *Receiver = nullptr; |
550 | switch (SubOME->getReceiverKind()) { |
551 | case ObjCMessageExpr::Instance: |
552 | if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType()) |
553 | return std::nullopt; |
554 | Receiver = CGF.EmitScalarExpr(E: SubOME->getInstanceReceiver()); |
555 | break; |
556 | |
557 | case ObjCMessageExpr::Class: { |
558 | QualType ReceiverType = SubOME->getClassReceiver(); |
559 | const ObjCObjectType *ObjTy = ReceiverType->castAs<ObjCObjectType>(); |
560 | const ObjCInterfaceDecl *ID = ObjTy->getInterface(); |
561 | assert(ID && "null interface should be impossible here" ); |
562 | Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, OID: ID); |
563 | break; |
564 | } |
565 | case ObjCMessageExpr::SuperInstance: |
566 | case ObjCMessageExpr::SuperClass: |
567 | return std::nullopt; |
568 | } |
569 | |
570 | return CGF.EmitObjCAllocInit(value: Receiver, resultType: CGF.ConvertType(OME->getType())); |
571 | } |
572 | |
573 | RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, |
574 | ReturnValueSlot Return) { |
575 | // Only the lookup mechanism and first two arguments of the method |
576 | // implementation vary between runtimes. We can get the receiver and |
577 | // arguments in generic code. |
578 | |
579 | bool isDelegateInit = E->isDelegateInitCall(); |
580 | |
581 | const ObjCMethodDecl *method = E->getMethodDecl(); |
582 | |
583 | // If the method is -retain, and the receiver's being loaded from |
584 | // a __weak variable, peephole the entire operation to objc_loadWeakRetained. |
585 | if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && |
586 | method->getMethodFamily() == OMF_retain) { |
587 | if (auto lvalueExpr = findWeakLValue(E: E->getInstanceReceiver())) { |
588 | LValue lvalue = EmitLValue(E: lvalueExpr); |
589 | llvm::Value *result = EmitARCLoadWeakRetained(addr: lvalue.getAddress(CGF&: *this)); |
590 | return AdjustObjCObjectType(*this, E->getType(), RValue::get(V: result)); |
591 | } |
592 | } |
593 | |
594 | if (std::optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(CGF&: *this, OME: E)) |
595 | return AdjustObjCObjectType(*this, E->getType(), RValue::get(V: *Val)); |
596 | |
597 | // We don't retain the receiver in delegate init calls, and this is |
598 | // safe because the receiver value is always loaded from 'self', |
599 | // which we zero out. We don't want to Block_copy block receivers, |
600 | // though. |
601 | bool retainSelf = |
602 | (!isDelegateInit && |
603 | CGM.getLangOpts().ObjCAutoRefCount && |
604 | method && |
605 | method->hasAttr<NSConsumesSelfAttr>()); |
606 | |
607 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
608 | bool isSuperMessage = false; |
609 | bool isClassMessage = false; |
610 | ObjCInterfaceDecl *OID = nullptr; |
611 | // Find the receiver |
612 | QualType ReceiverType; |
613 | llvm::Value *Receiver = nullptr; |
614 | switch (E->getReceiverKind()) { |
615 | case ObjCMessageExpr::Instance: |
616 | ReceiverType = E->getInstanceReceiver()->getType(); |
617 | isClassMessage = ReceiverType->isObjCClassType(); |
618 | if (retainSelf) { |
619 | TryEmitResult ter = tryEmitARCRetainScalarExpr(CGF&: *this, |
620 | e: E->getInstanceReceiver()); |
621 | Receiver = ter.getPointer(); |
622 | if (ter.getInt()) retainSelf = false; |
623 | } else |
624 | Receiver = EmitScalarExpr(E: E->getInstanceReceiver()); |
625 | break; |
626 | |
627 | case ObjCMessageExpr::Class: { |
628 | ReceiverType = E->getClassReceiver(); |
629 | OID = ReceiverType->castAs<ObjCObjectType>()->getInterface(); |
630 | assert(OID && "Invalid Objective-C class message send" ); |
631 | Receiver = Runtime.GetClass(CGF&: *this, OID); |
632 | isClassMessage = true; |
633 | break; |
634 | } |
635 | |
636 | case ObjCMessageExpr::SuperInstance: |
637 | ReceiverType = E->getSuperType(); |
638 | Receiver = LoadObjCSelf(); |
639 | isSuperMessage = true; |
640 | break; |
641 | |
642 | case ObjCMessageExpr::SuperClass: |
643 | ReceiverType = E->getSuperType(); |
644 | Receiver = LoadObjCSelf(); |
645 | isSuperMessage = true; |
646 | isClassMessage = true; |
647 | break; |
648 | } |
649 | |
650 | if (retainSelf) |
651 | Receiver = EmitARCRetainNonBlock(value: Receiver); |
652 | |
653 | // In ARC, we sometimes want to "extend the lifetime" |
654 | // (i.e. retain+autorelease) of receivers of returns-inner-pointer |
655 | // messages. |
656 | if (getLangOpts().ObjCAutoRefCount && method && |
657 | method->hasAttr<ObjCReturnsInnerPointerAttr>() && |
658 | shouldExtendReceiverForInnerPointerMessage(E)) |
659 | Receiver = EmitARCRetainAutorelease(type: ReceiverType, value: Receiver); |
660 | |
661 | QualType ResultType = method ? method->getReturnType() : E->getType(); |
662 | |
663 | CallArgList Args; |
664 | EmitCallArgs(Args, Prototype: method, ArgRange: E->arguments(), /*AC*/AbstractCallee(method)); |
665 | |
666 | // For delegate init calls in ARC, do an unsafe store of null into |
667 | // self. This represents the call taking direct ownership of that |
668 | // value. We have to do this after emitting the other call |
669 | // arguments because they might also reference self, but we don't |
670 | // have to worry about any of them modifying self because that would |
671 | // be an undefined read and write of an object in unordered |
672 | // expressions. |
673 | if (isDelegateInit) { |
674 | assert(getLangOpts().ObjCAutoRefCount && |
675 | "delegate init calls should only be marked in ARC" ); |
676 | |
677 | // Do an unsafe store of null into self. |
678 | Address selfAddr = |
679 | GetAddrOfLocalVar(cast<ObjCMethodDecl>(Val: CurCodeDecl)->getSelfDecl()); |
680 | Builder.CreateStore(Val: getNullForVariable(addr: selfAddr), Addr: selfAddr); |
681 | } |
682 | |
683 | RValue result; |
684 | if (isSuperMessage) { |
685 | // super is only valid in an Objective-C method |
686 | const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(Val: CurFuncDecl); |
687 | bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); |
688 | result = Runtime.GenerateMessageSendSuper(CGF&: *this, ReturnSlot: Return, ResultType, |
689 | Sel: E->getSelector(), |
690 | Class: OMD->getClassInterface(), |
691 | isCategoryImpl, |
692 | Self: Receiver, |
693 | IsClassMessage: isClassMessage, |
694 | CallArgs: Args, |
695 | Method: method); |
696 | } else { |
697 | // Call runtime methods directly if we can. |
698 | result = Runtime.GeneratePossiblySpecializedMessageSend( |
699 | CGF&: *this, Return, ResultType, Sel: E->getSelector(), Receiver, Args, OID, |
700 | Method: method, isClassMessage); |
701 | } |
702 | |
703 | // For delegate init calls in ARC, implicitly store the result of |
704 | // the call back into self. This takes ownership of the value. |
705 | if (isDelegateInit) { |
706 | Address selfAddr = |
707 | GetAddrOfLocalVar(cast<ObjCMethodDecl>(Val: CurCodeDecl)->getSelfDecl()); |
708 | llvm::Value *newSelf = result.getScalarVal(); |
709 | |
710 | // The delegate return type isn't necessarily a matching type; in |
711 | // fact, it's quite likely to be 'id'. |
712 | llvm::Type *selfTy = selfAddr.getElementType(); |
713 | newSelf = Builder.CreateBitCast(V: newSelf, DestTy: selfTy); |
714 | |
715 | Builder.CreateStore(Val: newSelf, Addr: selfAddr); |
716 | } |
717 | |
718 | return AdjustObjCObjectType(*this, E->getType(), result); |
719 | } |
720 | |
721 | namespace { |
722 | struct FinishARCDealloc final : EHScopeStack::Cleanup { |
723 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
724 | const ObjCMethodDecl *method = cast<ObjCMethodDecl>(Val: CGF.CurCodeDecl); |
725 | |
726 | const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); |
727 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
728 | if (!iface->getSuperClass()) return; |
729 | |
730 | bool isCategory = isa<ObjCCategoryImplDecl>(Val: impl); |
731 | |
732 | // Call [super dealloc] if we have a superclass. |
733 | llvm::Value *self = CGF.LoadObjCSelf(); |
734 | |
735 | CallArgList args; |
736 | CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnSlot: ReturnValueSlot(), |
737 | ResultType: CGF.getContext().VoidTy, |
738 | Sel: method->getSelector(), |
739 | Class: iface, |
740 | isCategoryImpl: isCategory, |
741 | Self: self, |
742 | /*is class msg*/ IsClassMessage: false, |
743 | CallArgs: args, |
744 | Method: method); |
745 | } |
746 | }; |
747 | } |
748 | |
749 | /// StartObjCMethod - Begin emission of an ObjCMethod. This generates |
750 | /// the LLVM function and sets the other context used by |
751 | /// CodeGenFunction. |
752 | void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, |
753 | const ObjCContainerDecl *CD) { |
754 | SourceLocation StartLoc = OMD->getBeginLoc(); |
755 | FunctionArgList args; |
756 | // Check if we should generate debug info for this method. |
757 | if (OMD->hasAttr<NoDebugAttr>()) |
758 | DebugInfo = nullptr; // disable debug info indefinitely for this function |
759 | |
760 | llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); |
761 | |
762 | const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(MD: OMD); |
763 | if (OMD->isDirectMethod()) { |
764 | Fn->setVisibility(llvm::Function::HiddenVisibility); |
765 | CGM.SetLLVMFunctionAttributes(GD: OMD, Info: FI, F: Fn, /*IsThunk=*/false); |
766 | CGM.SetLLVMFunctionAttributesForDefinition(OMD, Fn); |
767 | } else { |
768 | CGM.SetInternalFunctionAttributes(GD: OMD, F: Fn, FI); |
769 | } |
770 | |
771 | args.push_back(OMD->getSelfDecl()); |
772 | if (!OMD->isDirectMethod()) |
773 | args.push_back(OMD->getCmdDecl()); |
774 | |
775 | args.append(in_start: OMD->param_begin(), in_end: OMD->param_end()); |
776 | |
777 | CurGD = OMD; |
778 | CurEHLocation = OMD->getEndLoc(); |
779 | |
780 | StartFunction(GD: OMD, RetTy: OMD->getReturnType(), Fn, FnInfo: FI, Args: args, |
781 | Loc: OMD->getLocation(), StartLoc); |
782 | |
783 | if (OMD->isDirectMethod()) { |
784 | // This function is a direct call, it has to implement a nil check |
785 | // on entry. |
786 | // |
787 | // TODO: possibly have several entry points to elide the check |
788 | CGM.getObjCRuntime().GenerateDirectMethodPrologue(CGF&: *this, Fn, OMD, CD); |
789 | } |
790 | |
791 | // In ARC, certain methods get an extra cleanup. |
792 | if (CGM.getLangOpts().ObjCAutoRefCount && |
793 | OMD->isInstanceMethod() && |
794 | OMD->getSelector().isUnarySelector()) { |
795 | const IdentifierInfo *ident = |
796 | OMD->getSelector().getIdentifierInfoForSlot(argIndex: 0); |
797 | if (ident->isStr(Str: "dealloc" )) |
798 | EHStack.pushCleanup<FinishARCDealloc>(Kind: getARCCleanupKind()); |
799 | } |
800 | } |
801 | |
802 | static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
803 | LValue lvalue, QualType type); |
804 | |
805 | /// Generate an Objective-C method. An Objective-C method is a C function with |
806 | /// its pointer, name, and types registered in the class structure. |
807 | void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { |
808 | StartObjCMethod(OMD, OMD->getClassInterface()); |
809 | PGO.assignRegionCounters(GD: GlobalDecl(OMD), Fn: CurFn); |
810 | assert(isa<CompoundStmt>(OMD->getBody())); |
811 | incrementProfileCounter(S: OMD->getBody()); |
812 | EmitCompoundStmtWithoutScope(S: *cast<CompoundStmt>(Val: OMD->getBody())); |
813 | FinishFunction(EndLoc: OMD->getBodyRBrace()); |
814 | } |
815 | |
816 | /// emitStructGetterCall - Call the runtime function to load a property |
817 | /// into the return value slot. |
818 | static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, |
819 | bool isAtomic, bool hasStrong) { |
820 | ASTContext &Context = CGF.getContext(); |
821 | |
822 | llvm::Value *src = |
823 | CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0) |
824 | .getPointer(CGF); |
825 | |
826 | // objc_copyStruct (ReturnValue, &structIvar, |
827 | // sizeof (Type of Ivar), isAtomic, false); |
828 | CallArgList args; |
829 | |
830 | llvm::Value *dest = CGF.ReturnValue.emitRawPointer(CGF); |
831 | args.add(rvalue: RValue::get(V: dest), type: Context.VoidPtrTy); |
832 | args.add(rvalue: RValue::get(V: src), type: Context.VoidPtrTy); |
833 | |
834 | CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); |
835 | args.add(rvalue: RValue::get(V: CGF.CGM.getSize(numChars: size)), type: Context.getSizeType()); |
836 | args.add(rvalue: RValue::get(V: CGF.Builder.getInt1(V: isAtomic)), type: Context.BoolTy); |
837 | args.add(rvalue: RValue::get(V: CGF.Builder.getInt1(V: hasStrong)), type: Context.BoolTy); |
838 | |
839 | llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); |
840 | CGCallee callee = CGCallee::forDirect(functionPtr: fn); |
841 | CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(resultType: Context.VoidTy, args), |
842 | callee, ReturnValueSlot(), args); |
843 | } |
844 | |
845 | /// Determine whether the given architecture supports unaligned atomic |
846 | /// accesses. They don't have to be fast, just faster than a function |
847 | /// call and a mutex. |
848 | static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { |
849 | // FIXME: Allow unaligned atomic load/store on x86. (It is not |
850 | // currently supported by the backend.) |
851 | return false; |
852 | } |
853 | |
854 | /// Return the maximum size that permits atomic accesses for the given |
855 | /// architecture. |
856 | static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, |
857 | llvm::Triple::ArchType arch) { |
858 | // ARM has 8-byte atomic accesses, but it's not clear whether we |
859 | // want to rely on them here. |
860 | |
861 | // In the default case, just assume that any size up to a pointer is |
862 | // fine given adequate alignment. |
863 | return CharUnits::fromQuantity(Quantity: CGM.PointerSizeInBytes); |
864 | } |
865 | |
866 | namespace { |
867 | class PropertyImplStrategy { |
868 | public: |
869 | enum StrategyKind { |
870 | /// The 'native' strategy is to use the architecture's provided |
871 | /// reads and writes. |
872 | Native, |
873 | |
874 | /// Use objc_setProperty and objc_getProperty. |
875 | GetSetProperty, |
876 | |
877 | /// Use objc_setProperty for the setter, but use expression |
878 | /// evaluation for the getter. |
879 | SetPropertyAndExpressionGet, |
880 | |
881 | /// Use objc_copyStruct. |
882 | CopyStruct, |
883 | |
884 | /// The 'expression' strategy is to emit normal assignment or |
885 | /// lvalue-to-rvalue expressions. |
886 | Expression |
887 | }; |
888 | |
889 | StrategyKind getKind() const { return StrategyKind(Kind); } |
890 | |
891 | bool hasStrongMember() const { return HasStrong; } |
892 | bool isAtomic() const { return IsAtomic; } |
893 | bool isCopy() const { return IsCopy; } |
894 | |
895 | CharUnits getIvarSize() const { return IvarSize; } |
896 | CharUnits getIvarAlignment() const { return IvarAlignment; } |
897 | |
898 | PropertyImplStrategy(CodeGenModule &CGM, |
899 | const ObjCPropertyImplDecl *propImpl); |
900 | |
901 | private: |
902 | LLVM_PREFERRED_TYPE(StrategyKind) |
903 | unsigned Kind : 8; |
904 | LLVM_PREFERRED_TYPE(bool) |
905 | unsigned IsAtomic : 1; |
906 | LLVM_PREFERRED_TYPE(bool) |
907 | unsigned IsCopy : 1; |
908 | LLVM_PREFERRED_TYPE(bool) |
909 | unsigned HasStrong : 1; |
910 | |
911 | CharUnits IvarSize; |
912 | CharUnits IvarAlignment; |
913 | }; |
914 | } |
915 | |
916 | /// Pick an implementation strategy for the given property synthesis. |
917 | PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, |
918 | const ObjCPropertyImplDecl *propImpl) { |
919 | const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); |
920 | ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); |
921 | |
922 | IsCopy = (setterKind == ObjCPropertyDecl::Copy); |
923 | IsAtomic = prop->isAtomic(); |
924 | HasStrong = false; // doesn't matter here. |
925 | |
926 | // Evaluate the ivar's size and alignment. |
927 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
928 | QualType ivarType = ivar->getType(); |
929 | auto TInfo = CGM.getContext().getTypeInfoInChars(T: ivarType); |
930 | IvarSize = TInfo.Width; |
931 | IvarAlignment = TInfo.Align; |
932 | |
933 | // If we have a copy property, we always have to use setProperty. |
934 | // If the property is atomic we need to use getProperty, but in |
935 | // the nonatomic case we can just use expression. |
936 | if (IsCopy) { |
937 | Kind = IsAtomic ? GetSetProperty : SetPropertyAndExpressionGet; |
938 | return; |
939 | } |
940 | |
941 | // Handle retain. |
942 | if (setterKind == ObjCPropertyDecl::Retain) { |
943 | // In GC-only, there's nothing special that needs to be done. |
944 | if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { |
945 | // fallthrough |
946 | |
947 | // In ARC, if the property is non-atomic, use expression emission, |
948 | // which translates to objc_storeStrong. This isn't required, but |
949 | // it's slightly nicer. |
950 | } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { |
951 | // Using standard expression emission for the setter is only |
952 | // acceptable if the ivar is __strong, which won't be true if |
953 | // the property is annotated with __attribute__((NSObject)). |
954 | // TODO: falling all the way back to objc_setProperty here is |
955 | // just laziness, though; we could still use objc_storeStrong |
956 | // if we hacked it right. |
957 | if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) |
958 | Kind = Expression; |
959 | else |
960 | Kind = SetPropertyAndExpressionGet; |
961 | return; |
962 | |
963 | // Otherwise, we need to at least use setProperty. However, if |
964 | // the property isn't atomic, we can use normal expression |
965 | // emission for the getter. |
966 | } else if (!IsAtomic) { |
967 | Kind = SetPropertyAndExpressionGet; |
968 | return; |
969 | |
970 | // Otherwise, we have to use both setProperty and getProperty. |
971 | } else { |
972 | Kind = GetSetProperty; |
973 | return; |
974 | } |
975 | } |
976 | |
977 | // If we're not atomic, just use expression accesses. |
978 | if (!IsAtomic) { |
979 | Kind = Expression; |
980 | return; |
981 | } |
982 | |
983 | // Properties on bitfield ivars need to be emitted using expression |
984 | // accesses even if they're nominally atomic. |
985 | if (ivar->isBitField()) { |
986 | Kind = Expression; |
987 | return; |
988 | } |
989 | |
990 | // GC-qualified or ARC-qualified ivars need to be emitted as |
991 | // expressions. This actually works out to being atomic anyway, |
992 | // except for ARC __strong, but that should trigger the above code. |
993 | if (ivarType.hasNonTrivialObjCLifetime() || |
994 | (CGM.getLangOpts().getGC() && |
995 | CGM.getContext().getObjCGCAttrKind(Ty: ivarType))) { |
996 | Kind = Expression; |
997 | return; |
998 | } |
999 | |
1000 | // Compute whether the ivar has strong members. |
1001 | if (CGM.getLangOpts().getGC()) |
1002 | if (const RecordType *recordType = ivarType->getAs<RecordType>()) |
1003 | HasStrong = recordType->getDecl()->hasObjectMember(); |
1004 | |
1005 | // We can never access structs with object members with a native |
1006 | // access, because we need to use write barriers. This is what |
1007 | // objc_copyStruct is for. |
1008 | if (HasStrong) { |
1009 | Kind = CopyStruct; |
1010 | return; |
1011 | } |
1012 | |
1013 | // Otherwise, this is target-dependent and based on the size and |
1014 | // alignment of the ivar. |
1015 | |
1016 | // If the size of the ivar is not a power of two, give up. We don't |
1017 | // want to get into the business of doing compare-and-swaps. |
1018 | if (!IvarSize.isPowerOfTwo()) { |
1019 | Kind = CopyStruct; |
1020 | return; |
1021 | } |
1022 | |
1023 | llvm::Triple::ArchType arch = |
1024 | CGM.getTarget().getTriple().getArch(); |
1025 | |
1026 | // Most architectures require memory to fit within a single cache |
1027 | // line, so the alignment has to be at least the size of the access. |
1028 | // Otherwise we have to grab a lock. |
1029 | if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { |
1030 | Kind = CopyStruct; |
1031 | return; |
1032 | } |
1033 | |
1034 | // If the ivar's size exceeds the architecture's maximum atomic |
1035 | // access size, we have to use CopyStruct. |
1036 | if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { |
1037 | Kind = CopyStruct; |
1038 | return; |
1039 | } |
1040 | |
1041 | // Otherwise, we can use native loads and stores. |
1042 | Kind = Native; |
1043 | } |
1044 | |
1045 | /// Generate an Objective-C property getter function. |
1046 | /// |
1047 | /// The given Decl must be an ObjCImplementationDecl. \@synthesize |
1048 | /// is illegal within a category. |
1049 | void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, |
1050 | const ObjCPropertyImplDecl *PID) { |
1051 | llvm::Constant *AtomicHelperFn = |
1052 | CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); |
1053 | ObjCMethodDecl *OMD = PID->getGetterMethodDecl(); |
1054 | assert(OMD && "Invalid call to generate getter (empty method)" ); |
1055 | StartObjCMethod(OMD, CD: IMP->getClassInterface()); |
1056 | |
1057 | generateObjCGetterBody(classImpl: IMP, propImpl: PID, GetterMothodDecl: OMD, AtomicHelperFn); |
1058 | |
1059 | FinishFunction(EndLoc: OMD->getEndLoc()); |
1060 | } |
1061 | |
1062 | static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { |
1063 | const Expr *getter = propImpl->getGetterCXXConstructor(); |
1064 | if (!getter) return true; |
1065 | |
1066 | // Sema only makes only of these when the ivar has a C++ class type, |
1067 | // so the form is pretty constrained. |
1068 | |
1069 | // If the property has a reference type, we might just be binding a |
1070 | // reference, in which case the result will be a gl-value. We should |
1071 | // treat this as a non-trivial operation. |
1072 | if (getter->isGLValue()) |
1073 | return false; |
1074 | |
1075 | // If we selected a trivial copy-constructor, we're okay. |
1076 | if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(Val: getter)) |
1077 | return (construct->getConstructor()->isTrivial()); |
1078 | |
1079 | // The constructor might require cleanups (in which case it's never |
1080 | // trivial). |
1081 | assert(isa<ExprWithCleanups>(getter)); |
1082 | return false; |
1083 | } |
1084 | |
1085 | /// emitCPPObjectAtomicGetterCall - Call the runtime function to |
1086 | /// copy the ivar into the resturn slot. |
1087 | static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, |
1088 | llvm::Value *returnAddr, |
1089 | ObjCIvarDecl *ivar, |
1090 | llvm::Constant *AtomicHelperFn) { |
1091 | // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, |
1092 | // AtomicHelperFn); |
1093 | CallArgList args; |
1094 | |
1095 | // The 1st argument is the return Slot. |
1096 | args.add(rvalue: RValue::get(V: returnAddr), type: CGF.getContext().VoidPtrTy); |
1097 | |
1098 | // The 2nd argument is the address of the ivar. |
1099 | llvm::Value *ivarAddr = |
1100 | CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0) |
1101 | .getPointer(CGF); |
1102 | args.add(rvalue: RValue::get(V: ivarAddr), type: CGF.getContext().VoidPtrTy); |
1103 | |
1104 | // Third argument is the helper function. |
1105 | args.add(rvalue: RValue::get(V: AtomicHelperFn), type: CGF.getContext().VoidPtrTy); |
1106 | |
1107 | llvm::FunctionCallee copyCppAtomicObjectFn = |
1108 | CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); |
1109 | CGCallee callee = CGCallee::forDirect(functionPtr: copyCppAtomicObjectFn); |
1110 | CGF.EmitCall( |
1111 | CGF.getTypes().arrangeBuiltinFunctionCall(resultType: CGF.getContext().VoidTy, args), |
1112 | callee, ReturnValueSlot(), args); |
1113 | } |
1114 | |
1115 | // emitCmdValueForGetterSetterBody - Handle emitting the load necessary for |
1116 | // the `_cmd` selector argument for getter/setter bodies. For direct methods, |
1117 | // this returns an undefined/poison value; this matches behavior prior to `_cmd` |
1118 | // being removed from the direct method ABI as the getter/setter caller would |
1119 | // never load one. For non-direct methods, this emits a load of the implicit |
1120 | // `_cmd` storage. |
1121 | static llvm::Value *emitCmdValueForGetterSetterBody(CodeGenFunction &CGF, |
1122 | ObjCMethodDecl *MD) { |
1123 | if (MD->isDirectMethod()) { |
1124 | // Direct methods do not have a `_cmd` argument. Emit an undefined/poison |
1125 | // value. This will be passed to objc_getProperty/objc_setProperty, which |
1126 | // has not appeared bothered by the `_cmd` argument being undefined before. |
1127 | llvm::Type *selType = CGF.ConvertType(T: CGF.getContext().getObjCSelType()); |
1128 | return llvm::PoisonValue::get(T: selType); |
1129 | } |
1130 | |
1131 | return CGF.Builder.CreateLoad(Addr: CGF.GetAddrOfLocalVar(MD->getCmdDecl()), Name: "cmd" ); |
1132 | } |
1133 | |
1134 | void |
1135 | CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, |
1136 | const ObjCPropertyImplDecl *propImpl, |
1137 | const ObjCMethodDecl *GetterMethodDecl, |
1138 | llvm::Constant *AtomicHelperFn) { |
1139 | |
1140 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1141 | |
1142 | if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
1143 | if (!AtomicHelperFn) { |
1144 | LValue Src = |
1145 | EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0); |
1146 | LValue Dst = MakeAddrLValue(ReturnValue, ivar->getType()); |
1147 | callCStructCopyConstructor(Dst, Src); |
1148 | } else { |
1149 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1150 | emitCPPObjectAtomicGetterCall(CGF&: *this, returnAddr: ReturnValue.emitRawPointer(CGF&: *this), |
1151 | ivar, AtomicHelperFn); |
1152 | } |
1153 | return; |
1154 | } |
1155 | |
1156 | // If there's a non-trivial 'get' expression, we just have to emit that. |
1157 | if (!hasTrivialGetExpr(propImpl)) { |
1158 | if (!AtomicHelperFn) { |
1159 | auto *ret = ReturnStmt::Create(Ctx: getContext(), RL: SourceLocation(), |
1160 | E: propImpl->getGetterCXXConstructor(), |
1161 | /* NRVOCandidate=*/nullptr); |
1162 | EmitReturnStmt(S: *ret); |
1163 | } |
1164 | else { |
1165 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1166 | emitCPPObjectAtomicGetterCall(CGF&: *this, returnAddr: ReturnValue.emitRawPointer(CGF&: *this), |
1167 | ivar, AtomicHelperFn); |
1168 | } |
1169 | return; |
1170 | } |
1171 | |
1172 | const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); |
1173 | QualType propType = prop->getType(); |
1174 | ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl(); |
1175 | |
1176 | // Pick an implementation strategy. |
1177 | PropertyImplStrategy strategy(CGM, propImpl); |
1178 | switch (strategy.getKind()) { |
1179 | case PropertyImplStrategy::Native: { |
1180 | // We don't need to do anything for a zero-size struct. |
1181 | if (strategy.getIvarSize().isZero()) |
1182 | return; |
1183 | |
1184 | LValue LV = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0); |
1185 | |
1186 | // Currently, all atomic accesses have to be through integer |
1187 | // types, so there's no point in trying to pick a prettier type. |
1188 | uint64_t ivarSize = getContext().toBits(CharSize: strategy.getIvarSize()); |
1189 | llvm::Type *bitcastType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: ivarSize); |
1190 | |
1191 | // Perform an atomic load. This does not impose ordering constraints. |
1192 | Address ivarAddr = LV.getAddress(CGF&: *this); |
1193 | ivarAddr = ivarAddr.withElementType(ElemTy: bitcastType); |
1194 | llvm::LoadInst *load = Builder.CreateLoad(Addr: ivarAddr, Name: "load" ); |
1195 | load->setAtomic(Ordering: llvm::AtomicOrdering::Unordered); |
1196 | |
1197 | // Store that value into the return address. Doing this with a |
1198 | // bitcast is likely to produce some pretty ugly IR, but it's not |
1199 | // the *most* terrible thing in the world. |
1200 | llvm::Type *retTy = ConvertType(T: getterMethod->getReturnType()); |
1201 | uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(Ty: retTy); |
1202 | llvm::Value *ivarVal = load; |
1203 | if (ivarSize > retTySize) { |
1204 | bitcastType = llvm::Type::getIntNTy(C&: getLLVMContext(), N: retTySize); |
1205 | ivarVal = Builder.CreateTrunc(V: load, DestTy: bitcastType); |
1206 | } |
1207 | Builder.CreateStore(Val: ivarVal, Addr: ReturnValue.withElementType(ElemTy: bitcastType)); |
1208 | |
1209 | // Make sure we don't do an autorelease. |
1210 | AutoreleaseResult = false; |
1211 | return; |
1212 | } |
1213 | |
1214 | case PropertyImplStrategy::GetSetProperty: { |
1215 | llvm::FunctionCallee getPropertyFn = |
1216 | CGM.getObjCRuntime().GetPropertyGetFunction(); |
1217 | if (!getPropertyFn) { |
1218 | CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy" ); |
1219 | return; |
1220 | } |
1221 | CGCallee callee = CGCallee::forDirect(functionPtr: getPropertyFn); |
1222 | |
1223 | // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). |
1224 | // FIXME: Can't this be simpler? This might even be worse than the |
1225 | // corresponding gcc code. |
1226 | llvm::Value *cmd = emitCmdValueForGetterSetterBody(CGF&: *this, MD: getterMethod); |
1227 | llvm::Value *self = Builder.CreateBitCast(V: LoadObjCSelf(), DestTy: VoidPtrTy); |
1228 | llvm::Value *ivarOffset = |
1229 | EmitIvarOffsetAsPointerDiff(Interface: classImpl->getClassInterface(), Ivar: ivar); |
1230 | |
1231 | CallArgList args; |
1232 | args.add(rvalue: RValue::get(V: self), type: getContext().getObjCIdType()); |
1233 | args.add(rvalue: RValue::get(V: cmd), type: getContext().getObjCSelType()); |
1234 | args.add(rvalue: RValue::get(V: ivarOffset), type: getContext().getPointerDiffType()); |
1235 | args.add(rvalue: RValue::get(V: Builder.getInt1(V: strategy.isAtomic())), |
1236 | type: getContext().BoolTy); |
1237 | |
1238 | // FIXME: We shouldn't need to get the function info here, the |
1239 | // runtime already should have computed it to build the function. |
1240 | llvm::CallBase *CallInstruction; |
1241 | RValue RV = EmitCall(CallInfo: getTypes().arrangeBuiltinFunctionCall( |
1242 | resultType: getContext().getObjCIdType(), args), |
1243 | Callee: callee, ReturnValue: ReturnValueSlot(), Args: args, callOrInvoke: &CallInstruction); |
1244 | if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(Val: CallInstruction)) |
1245 | call->setTailCall(); |
1246 | |
1247 | // We need to fix the type here. Ivars with copy & retain are |
1248 | // always objects so we don't need to worry about complex or |
1249 | // aggregates. |
1250 | RV = RValue::get(V: Builder.CreateBitCast( |
1251 | V: RV.getScalarVal(), |
1252 | DestTy: getTypes().ConvertType(T: getterMethod->getReturnType()))); |
1253 | |
1254 | EmitReturnOfRValue(RV, Ty: propType); |
1255 | |
1256 | // objc_getProperty does an autorelease, so we should suppress ours. |
1257 | AutoreleaseResult = false; |
1258 | |
1259 | return; |
1260 | } |
1261 | |
1262 | case PropertyImplStrategy::CopyStruct: |
1263 | emitStructGetterCall(CGF&: *this, ivar, isAtomic: strategy.isAtomic(), |
1264 | hasStrong: strategy.hasStrongMember()); |
1265 | return; |
1266 | |
1267 | case PropertyImplStrategy::Expression: |
1268 | case PropertyImplStrategy::SetPropertyAndExpressionGet: { |
1269 | LValue LV = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0); |
1270 | |
1271 | QualType ivarType = ivar->getType(); |
1272 | switch (getEvaluationKind(T: ivarType)) { |
1273 | case TEK_Complex: { |
1274 | ComplexPairTy pair = EmitLoadOfComplex(src: LV, loc: SourceLocation()); |
1275 | EmitStoreOfComplex(V: pair, dest: MakeAddrLValue(Addr: ReturnValue, T: ivarType), |
1276 | /*init*/ isInit: true); |
1277 | return; |
1278 | } |
1279 | case TEK_Aggregate: { |
1280 | // The return value slot is guaranteed to not be aliased, but |
1281 | // that's not necessarily the same as "on the stack", so |
1282 | // we still potentially need objc_memmove_collectable. |
1283 | EmitAggregateCopy(/* Dest= */ MakeAddrLValue(Addr: ReturnValue, T: ivarType), |
1284 | /* Src= */ LV, EltTy: ivarType, MayOverlap: getOverlapForReturnValue()); |
1285 | return; |
1286 | } |
1287 | case TEK_Scalar: { |
1288 | llvm::Value *value; |
1289 | if (propType->isReferenceType()) { |
1290 | value = LV.getAddress(CGF&: *this).emitRawPointer(CGF&: *this); |
1291 | } else { |
1292 | // We want to load and autoreleaseReturnValue ARC __weak ivars. |
1293 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
1294 | if (getLangOpts().ObjCAutoRefCount) { |
1295 | value = emitARCRetainLoadOfScalar(CGF&: *this, lvalue: LV, type: ivarType); |
1296 | } else { |
1297 | value = EmitARCLoadWeak(addr: LV.getAddress(CGF&: *this)); |
1298 | } |
1299 | |
1300 | // Otherwise we want to do a simple load, suppressing the |
1301 | // final autorelease. |
1302 | } else { |
1303 | value = EmitLoadOfLValue(V: LV, Loc: SourceLocation()).getScalarVal(); |
1304 | AutoreleaseResult = false; |
1305 | } |
1306 | |
1307 | value = Builder.CreateBitCast( |
1308 | V: value, DestTy: ConvertType(T: GetterMethodDecl->getReturnType())); |
1309 | } |
1310 | |
1311 | EmitReturnOfRValue(RV: RValue::get(V: value), Ty: propType); |
1312 | return; |
1313 | } |
1314 | } |
1315 | llvm_unreachable("bad evaluation kind" ); |
1316 | } |
1317 | |
1318 | } |
1319 | llvm_unreachable("bad @property implementation strategy!" ); |
1320 | } |
1321 | |
1322 | /// emitStructSetterCall - Call the runtime function to store the value |
1323 | /// from the first formal parameter into the given ivar. |
1324 | static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, |
1325 | ObjCIvarDecl *ivar) { |
1326 | // objc_copyStruct (&structIvar, &Arg, |
1327 | // sizeof (struct something), true, false); |
1328 | CallArgList args; |
1329 | |
1330 | // The first argument is the address of the ivar. |
1331 | llvm::Value *ivarAddr = |
1332 | CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0) |
1333 | .getPointer(CGF); |
1334 | ivarAddr = CGF.Builder.CreateBitCast(V: ivarAddr, DestTy: CGF.Int8PtrTy); |
1335 | args.add(rvalue: RValue::get(V: ivarAddr), type: CGF.getContext().VoidPtrTy); |
1336 | |
1337 | // The second argument is the address of the parameter variable. |
1338 | ParmVarDecl *argVar = *OMD->param_begin(); |
1339 | DeclRefExpr argRef(CGF.getContext(), argVar, false, |
1340 | argVar->getType().getNonReferenceType(), VK_LValue, |
1341 | SourceLocation()); |
1342 | llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); |
1343 | args.add(rvalue: RValue::get(V: argAddr), type: CGF.getContext().VoidPtrTy); |
1344 | |
1345 | // The third argument is the sizeof the type. |
1346 | llvm::Value *size = |
1347 | CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(ivar->getType())); |
1348 | args.add(rvalue: RValue::get(V: size), type: CGF.getContext().getSizeType()); |
1349 | |
1350 | // The fourth argument is the 'isAtomic' flag. |
1351 | args.add(rvalue: RValue::get(V: CGF.Builder.getTrue()), type: CGF.getContext().BoolTy); |
1352 | |
1353 | // The fifth argument is the 'hasStrong' flag. |
1354 | // FIXME: should this really always be false? |
1355 | args.add(rvalue: RValue::get(V: CGF.Builder.getFalse()), type: CGF.getContext().BoolTy); |
1356 | |
1357 | llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); |
1358 | CGCallee callee = CGCallee::forDirect(functionPtr: fn); |
1359 | CGF.EmitCall( |
1360 | CGF.getTypes().arrangeBuiltinFunctionCall(resultType: CGF.getContext().VoidTy, args), |
1361 | callee, ReturnValueSlot(), args); |
1362 | } |
1363 | |
1364 | /// emitCPPObjectAtomicSetterCall - Call the runtime function to store |
1365 | /// the value from the first formal parameter into the given ivar, using |
1366 | /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. |
1367 | static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, |
1368 | ObjCMethodDecl *OMD, |
1369 | ObjCIvarDecl *ivar, |
1370 | llvm::Constant *AtomicHelperFn) { |
1371 | // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, |
1372 | // AtomicHelperFn); |
1373 | CallArgList args; |
1374 | |
1375 | // The first argument is the address of the ivar. |
1376 | llvm::Value *ivarAddr = |
1377 | CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: CGF.LoadObjCSelf(), Ivar: ivar, CVRQualifiers: 0) |
1378 | .getPointer(CGF); |
1379 | args.add(rvalue: RValue::get(V: ivarAddr), type: CGF.getContext().VoidPtrTy); |
1380 | |
1381 | // The second argument is the address of the parameter variable. |
1382 | ParmVarDecl *argVar = *OMD->param_begin(); |
1383 | DeclRefExpr argRef(CGF.getContext(), argVar, false, |
1384 | argVar->getType().getNonReferenceType(), VK_LValue, |
1385 | SourceLocation()); |
1386 | llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); |
1387 | args.add(rvalue: RValue::get(V: argAddr), type: CGF.getContext().VoidPtrTy); |
1388 | |
1389 | // Third argument is the helper function. |
1390 | args.add(rvalue: RValue::get(V: AtomicHelperFn), type: CGF.getContext().VoidPtrTy); |
1391 | |
1392 | llvm::FunctionCallee fn = |
1393 | CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); |
1394 | CGCallee callee = CGCallee::forDirect(functionPtr: fn); |
1395 | CGF.EmitCall( |
1396 | CGF.getTypes().arrangeBuiltinFunctionCall(resultType: CGF.getContext().VoidTy, args), |
1397 | callee, ReturnValueSlot(), args); |
1398 | } |
1399 | |
1400 | |
1401 | static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { |
1402 | Expr *setter = PID->getSetterCXXAssignment(); |
1403 | if (!setter) return true; |
1404 | |
1405 | // Sema only makes only of these when the ivar has a C++ class type, |
1406 | // so the form is pretty constrained. |
1407 | |
1408 | // An operator call is trivial if the function it calls is trivial. |
1409 | // This also implies that there's nothing non-trivial going on with |
1410 | // the arguments, because operator= can only be trivial if it's a |
1411 | // synthesized assignment operator and therefore both parameters are |
1412 | // references. |
1413 | if (CallExpr *call = dyn_cast<CallExpr>(Val: setter)) { |
1414 | if (const FunctionDecl *callee |
1415 | = dyn_cast_or_null<FunctionDecl>(Val: call->getCalleeDecl())) |
1416 | if (callee->isTrivial()) |
1417 | return true; |
1418 | return false; |
1419 | } |
1420 | |
1421 | assert(isa<ExprWithCleanups>(setter)); |
1422 | return false; |
1423 | } |
1424 | |
1425 | static bool UseOptimizedSetter(CodeGenModule &CGM) { |
1426 | if (CGM.getLangOpts().getGC() != LangOptions::NonGC) |
1427 | return false; |
1428 | return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); |
1429 | } |
1430 | |
1431 | void |
1432 | CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, |
1433 | const ObjCPropertyImplDecl *propImpl, |
1434 | llvm::Constant *AtomicHelperFn) { |
1435 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1436 | ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl(); |
1437 | |
1438 | if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
1439 | ParmVarDecl *PVD = *setterMethod->param_begin(); |
1440 | if (!AtomicHelperFn) { |
1441 | // Call the move assignment operator instead of calling the copy |
1442 | // assignment operator and destructor. |
1443 | LValue Dst = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, |
1444 | /*quals*/ CVRQualifiers: 0); |
1445 | LValue Src = MakeAddrLValue(GetAddrOfLocalVar(PVD), ivar->getType()); |
1446 | callCStructMoveAssignmentOperator(Dst, Src); |
1447 | } else { |
1448 | // If atomic, assignment is called via a locking api. |
1449 | emitCPPObjectAtomicSetterCall(CGF&: *this, OMD: setterMethod, ivar, AtomicHelperFn); |
1450 | } |
1451 | // Decativate the destructor for the setter parameter. |
1452 | DeactivateCleanupBlock(Cleanup: CalleeDestructedParamCleanups[PVD], DominatingIP: AllocaInsertPt); |
1453 | return; |
1454 | } |
1455 | |
1456 | // Just use the setter expression if Sema gave us one and it's |
1457 | // non-trivial. |
1458 | if (!hasTrivialSetExpr(PID: propImpl)) { |
1459 | if (!AtomicHelperFn) |
1460 | // If non-atomic, assignment is called directly. |
1461 | EmitStmt(propImpl->getSetterCXXAssignment()); |
1462 | else |
1463 | // If atomic, assignment is called via a locking api. |
1464 | emitCPPObjectAtomicSetterCall(CGF&: *this, OMD: setterMethod, ivar, |
1465 | AtomicHelperFn); |
1466 | return; |
1467 | } |
1468 | |
1469 | PropertyImplStrategy strategy(CGM, propImpl); |
1470 | switch (strategy.getKind()) { |
1471 | case PropertyImplStrategy::Native: { |
1472 | // We don't need to do anything for a zero-size struct. |
1473 | if (strategy.getIvarSize().isZero()) |
1474 | return; |
1475 | |
1476 | Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); |
1477 | |
1478 | LValue ivarLValue = |
1479 | EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), Base: LoadObjCSelf(), Ivar: ivar, /*quals*/ CVRQualifiers: 0); |
1480 | Address ivarAddr = ivarLValue.getAddress(CGF&: *this); |
1481 | |
1482 | // Currently, all atomic accesses have to be through integer |
1483 | // types, so there's no point in trying to pick a prettier type. |
1484 | llvm::Type *castType = llvm::Type::getIntNTy( |
1485 | C&: getLLVMContext(), N: getContext().toBits(CharSize: strategy.getIvarSize())); |
1486 | |
1487 | // Cast both arguments to the chosen operation type. |
1488 | argAddr = argAddr.withElementType(ElemTy: castType); |
1489 | ivarAddr = ivarAddr.withElementType(ElemTy: castType); |
1490 | |
1491 | llvm::Value *load = Builder.CreateLoad(Addr: argAddr); |
1492 | |
1493 | // Perform an atomic store. There are no memory ordering requirements. |
1494 | llvm::StoreInst *store = Builder.CreateStore(Val: load, Addr: ivarAddr); |
1495 | store->setAtomic(Ordering: llvm::AtomicOrdering::Unordered); |
1496 | return; |
1497 | } |
1498 | |
1499 | case PropertyImplStrategy::GetSetProperty: |
1500 | case PropertyImplStrategy::SetPropertyAndExpressionGet: { |
1501 | |
1502 | llvm::FunctionCallee setOptimizedPropertyFn = nullptr; |
1503 | llvm::FunctionCallee setPropertyFn = nullptr; |
1504 | if (UseOptimizedSetter(CGM)) { |
1505 | // 10.8 and iOS 6.0 code and GC is off |
1506 | setOptimizedPropertyFn = |
1507 | CGM.getObjCRuntime().GetOptimizedPropertySetFunction( |
1508 | atomic: strategy.isAtomic(), copy: strategy.isCopy()); |
1509 | if (!setOptimizedPropertyFn) { |
1510 | CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI" ); |
1511 | return; |
1512 | } |
1513 | } |
1514 | else { |
1515 | setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); |
1516 | if (!setPropertyFn) { |
1517 | CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy" ); |
1518 | return; |
1519 | } |
1520 | } |
1521 | |
1522 | // Emit objc_setProperty((id) self, _cmd, offset, arg, |
1523 | // <is-atomic>, <is-copy>). |
1524 | llvm::Value *cmd = emitCmdValueForGetterSetterBody(CGF&: *this, MD: setterMethod); |
1525 | llvm::Value *self = |
1526 | Builder.CreateBitCast(V: LoadObjCSelf(), DestTy: VoidPtrTy); |
1527 | llvm::Value *ivarOffset = |
1528 | EmitIvarOffsetAsPointerDiff(Interface: classImpl->getClassInterface(), Ivar: ivar); |
1529 | Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); |
1530 | llvm::Value *arg = Builder.CreateLoad(Addr: argAddr, Name: "arg" ); |
1531 | arg = Builder.CreateBitCast(V: arg, DestTy: VoidPtrTy); |
1532 | |
1533 | CallArgList args; |
1534 | args.add(rvalue: RValue::get(V: self), type: getContext().getObjCIdType()); |
1535 | args.add(rvalue: RValue::get(V: cmd), type: getContext().getObjCSelType()); |
1536 | if (setOptimizedPropertyFn) { |
1537 | args.add(rvalue: RValue::get(V: arg), type: getContext().getObjCIdType()); |
1538 | args.add(rvalue: RValue::get(V: ivarOffset), type: getContext().getPointerDiffType()); |
1539 | CGCallee callee = CGCallee::forDirect(functionPtr: setOptimizedPropertyFn); |
1540 | EmitCall(getTypes().arrangeBuiltinFunctionCall(resultType: getContext().VoidTy, args), |
1541 | callee, ReturnValueSlot(), args); |
1542 | } else { |
1543 | args.add(rvalue: RValue::get(V: ivarOffset), type: getContext().getPointerDiffType()); |
1544 | args.add(rvalue: RValue::get(V: arg), type: getContext().getObjCIdType()); |
1545 | args.add(rvalue: RValue::get(V: Builder.getInt1(V: strategy.isAtomic())), |
1546 | type: getContext().BoolTy); |
1547 | args.add(rvalue: RValue::get(V: Builder.getInt1(V: strategy.isCopy())), |
1548 | type: getContext().BoolTy); |
1549 | // FIXME: We shouldn't need to get the function info here, the runtime |
1550 | // already should have computed it to build the function. |
1551 | CGCallee callee = CGCallee::forDirect(functionPtr: setPropertyFn); |
1552 | EmitCall(getTypes().arrangeBuiltinFunctionCall(resultType: getContext().VoidTy, args), |
1553 | callee, ReturnValueSlot(), args); |
1554 | } |
1555 | |
1556 | return; |
1557 | } |
1558 | |
1559 | case PropertyImplStrategy::CopyStruct: |
1560 | emitStructSetterCall(CGF&: *this, OMD: setterMethod, ivar); |
1561 | return; |
1562 | |
1563 | case PropertyImplStrategy::Expression: |
1564 | break; |
1565 | } |
1566 | |
1567 | // Otherwise, fake up some ASTs and emit a normal assignment. |
1568 | ValueDecl *selfDecl = setterMethod->getSelfDecl(); |
1569 | DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(), |
1570 | VK_LValue, SourceLocation()); |
1571 | ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(), |
1572 | CK_LValueToRValue, &self, VK_PRValue, |
1573 | FPOptionsOverride()); |
1574 | ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), |
1575 | SourceLocation(), SourceLocation(), |
1576 | &selfLoad, true, true); |
1577 | |
1578 | ParmVarDecl *argDecl = *setterMethod->param_begin(); |
1579 | QualType argType = argDecl->getType().getNonReferenceType(); |
1580 | DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue, |
1581 | SourceLocation()); |
1582 | ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, |
1583 | argType.getUnqualifiedType(), CK_LValueToRValue, |
1584 | &arg, VK_PRValue, FPOptionsOverride()); |
1585 | |
1586 | // The property type can differ from the ivar type in some situations with |
1587 | // Objective-C pointer types, we can always bit cast the RHS in these cases. |
1588 | // The following absurdity is just to ensure well-formed IR. |
1589 | CastKind argCK = CK_NoOp; |
1590 | if (ivarRef.getType()->isObjCObjectPointerType()) { |
1591 | if (argLoad.getType()->isObjCObjectPointerType()) |
1592 | argCK = CK_BitCast; |
1593 | else if (argLoad.getType()->isBlockPointerType()) |
1594 | argCK = CK_BlockPointerToObjCPointerCast; |
1595 | else |
1596 | argCK = CK_CPointerToObjCPointerCast; |
1597 | } else if (ivarRef.getType()->isBlockPointerType()) { |
1598 | if (argLoad.getType()->isBlockPointerType()) |
1599 | argCK = CK_BitCast; |
1600 | else |
1601 | argCK = CK_AnyPointerToBlockPointerCast; |
1602 | } else if (ivarRef.getType()->isPointerType()) { |
1603 | argCK = CK_BitCast; |
1604 | } else if (argLoad.getType()->isAtomicType() && |
1605 | !ivarRef.getType()->isAtomicType()) { |
1606 | argCK = CK_AtomicToNonAtomic; |
1607 | } else if (!argLoad.getType()->isAtomicType() && |
1608 | ivarRef.getType()->isAtomicType()) { |
1609 | argCK = CK_NonAtomicToAtomic; |
1610 | } |
1611 | ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK, |
1612 | &argLoad, VK_PRValue, FPOptionsOverride()); |
1613 | Expr *finalArg = &argLoad; |
1614 | if (!getContext().hasSameUnqualifiedType(T1: ivarRef.getType(), |
1615 | T2: argLoad.getType())) |
1616 | finalArg = &argCast; |
1617 | |
1618 | BinaryOperator *assign = BinaryOperator::Create( |
1619 | C: getContext(), lhs: &ivarRef, rhs: finalArg, opc: BO_Assign, ResTy: ivarRef.getType(), |
1620 | VK: VK_PRValue, OK: OK_Ordinary, opLoc: SourceLocation(), FPFeatures: FPOptionsOverride()); |
1621 | EmitStmt(assign); |
1622 | } |
1623 | |
1624 | /// Generate an Objective-C property setter function. |
1625 | /// |
1626 | /// The given Decl must be an ObjCImplementationDecl. \@synthesize |
1627 | /// is illegal within a category. |
1628 | void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, |
1629 | const ObjCPropertyImplDecl *PID) { |
1630 | llvm::Constant *AtomicHelperFn = |
1631 | CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); |
1632 | ObjCMethodDecl *OMD = PID->getSetterMethodDecl(); |
1633 | assert(OMD && "Invalid call to generate setter (empty method)" ); |
1634 | StartObjCMethod(OMD, CD: IMP->getClassInterface()); |
1635 | |
1636 | generateObjCSetterBody(classImpl: IMP, propImpl: PID, AtomicHelperFn); |
1637 | |
1638 | FinishFunction(EndLoc: OMD->getEndLoc()); |
1639 | } |
1640 | |
1641 | namespace { |
1642 | struct DestroyIvar final : EHScopeStack::Cleanup { |
1643 | private: |
1644 | llvm::Value *addr; |
1645 | const ObjCIvarDecl *ivar; |
1646 | CodeGenFunction::Destroyer *destroyer; |
1647 | bool useEHCleanupForArray; |
1648 | public: |
1649 | DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, |
1650 | CodeGenFunction::Destroyer *destroyer, |
1651 | bool useEHCleanupForArray) |
1652 | : addr(addr), ivar(ivar), destroyer(destroyer), |
1653 | useEHCleanupForArray(useEHCleanupForArray) {} |
1654 | |
1655 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1656 | LValue lvalue |
1657 | = CGF.EmitLValueForIvar(ObjectTy: CGF.TypeOfSelfObject(), Base: addr, Ivar: ivar, /*CVR*/ CVRQualifiers: 0); |
1658 | CGF.emitDestroy(addr: lvalue.getAddress(CGF), type: ivar->getType(), destroyer, |
1659 | useEHCleanupForArray: flags.isForNormalCleanup() && useEHCleanupForArray); |
1660 | } |
1661 | }; |
1662 | } |
1663 | |
1664 | /// Like CodeGenFunction::destroyARCStrong, but do it with a call. |
1665 | static void destroyARCStrongWithStore(CodeGenFunction &CGF, |
1666 | Address addr, |
1667 | QualType type) { |
1668 | llvm::Value *null = getNullForVariable(addr); |
1669 | CGF.EmitARCStoreStrongCall(addr, value: null, /*ignored*/ resultIgnored: true); |
1670 | } |
1671 | |
1672 | static void emitCXXDestructMethod(CodeGenFunction &CGF, |
1673 | ObjCImplementationDecl *impl) { |
1674 | CodeGenFunction::RunCleanupsScope scope(CGF); |
1675 | |
1676 | llvm::Value *self = CGF.LoadObjCSelf(); |
1677 | |
1678 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
1679 | for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); |
1680 | ivar; ivar = ivar->getNextIvar()) { |
1681 | QualType type = ivar->getType(); |
1682 | |
1683 | // Check whether the ivar is a destructible type. |
1684 | QualType::DestructionKind dtorKind = type.isDestructedType(); |
1685 | if (!dtorKind) continue; |
1686 | |
1687 | CodeGenFunction::Destroyer *destroyer = nullptr; |
1688 | |
1689 | // Use a call to objc_storeStrong to destroy strong ivars, for the |
1690 | // general benefit of the tools. |
1691 | if (dtorKind == QualType::DK_objc_strong_lifetime) { |
1692 | destroyer = destroyARCStrongWithStore; |
1693 | |
1694 | // Otherwise use the default for the destruction kind. |
1695 | } else { |
1696 | destroyer = CGF.getDestroyer(destructionKind: dtorKind); |
1697 | } |
1698 | |
1699 | CleanupKind cleanupKind = CGF.getCleanupKind(kind: dtorKind); |
1700 | |
1701 | CGF.EHStack.pushCleanup<DestroyIvar>(Kind: cleanupKind, A: self, A: ivar, A: destroyer, |
1702 | A: cleanupKind & EHCleanup); |
1703 | } |
1704 | |
1705 | assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?" ); |
1706 | } |
1707 | |
1708 | void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, |
1709 | ObjCMethodDecl *MD, |
1710 | bool ctor) { |
1711 | MD->createImplicitParams(Context&: CGM.getContext(), ID: IMP->getClassInterface()); |
1712 | StartObjCMethod(OMD: MD, CD: IMP->getClassInterface()); |
1713 | |
1714 | // Emit .cxx_construct. |
1715 | if (ctor) { |
1716 | // Suppress the final autorelease in ARC. |
1717 | AutoreleaseResult = false; |
1718 | |
1719 | for (const auto *IvarInit : IMP->inits()) { |
1720 | FieldDecl *Field = IvarInit->getAnyMember(); |
1721 | ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Val: Field); |
1722 | LValue LV = EmitLValueForIvar(ObjectTy: TypeOfSelfObject(), |
1723 | Base: LoadObjCSelf(), Ivar, CVRQualifiers: 0); |
1724 | EmitAggExpr(E: IvarInit->getInit(), |
1725 | AS: AggValueSlot::forLValue(LV, CGF&: *this, isDestructed: AggValueSlot::IsDestructed, |
1726 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
1727 | isAliased: AggValueSlot::IsNotAliased, |
1728 | mayOverlap: AggValueSlot::DoesNotOverlap)); |
1729 | } |
1730 | // constructor returns 'self'. |
1731 | CodeGenTypes &Types = CGM.getTypes(); |
1732 | QualType IdTy(CGM.getContext().getObjCIdType()); |
1733 | llvm::Value *SelfAsId = |
1734 | Builder.CreateBitCast(V: LoadObjCSelf(), DestTy: Types.ConvertType(T: IdTy)); |
1735 | EmitReturnOfRValue(RV: RValue::get(V: SelfAsId), Ty: IdTy); |
1736 | |
1737 | // Emit .cxx_destruct. |
1738 | } else { |
1739 | emitCXXDestructMethod(CGF&: *this, impl: IMP); |
1740 | } |
1741 | FinishFunction(); |
1742 | } |
1743 | |
1744 | llvm::Value *CodeGenFunction::LoadObjCSelf() { |
1745 | VarDecl *Self = cast<ObjCMethodDecl>(Val: CurFuncDecl)->getSelfDecl(); |
1746 | DeclRefExpr DRE(getContext(), Self, |
1747 | /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), |
1748 | Self->getType(), VK_LValue, SourceLocation()); |
1749 | return EmitLoadOfScalar(lvalue: EmitDeclRefLValue(E: &DRE), Loc: SourceLocation()); |
1750 | } |
1751 | |
1752 | QualType CodeGenFunction::TypeOfSelfObject() { |
1753 | const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(Val: CurFuncDecl); |
1754 | ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); |
1755 | const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( |
1756 | getContext().getCanonicalType(selfDecl->getType())); |
1757 | return PTy->getPointeeType(); |
1758 | } |
1759 | |
1760 | void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ |
1761 | llvm::FunctionCallee EnumerationMutationFnPtr = |
1762 | CGM.getObjCRuntime().EnumerationMutationFunction(); |
1763 | if (!EnumerationMutationFnPtr) { |
1764 | CGM.ErrorUnsupported(S: &S, Type: "Obj-C fast enumeration for this runtime" ); |
1765 | return; |
1766 | } |
1767 | CGCallee EnumerationMutationFn = |
1768 | CGCallee::forDirect(functionPtr: EnumerationMutationFnPtr); |
1769 | |
1770 | CGDebugInfo *DI = getDebugInfo(); |
1771 | if (DI) |
1772 | DI->EmitLexicalBlockStart(Builder, Loc: S.getSourceRange().getBegin()); |
1773 | |
1774 | RunCleanupsScope ForScope(*this); |
1775 | |
1776 | // The local variable comes into scope immediately. |
1777 | AutoVarEmission variable = AutoVarEmission::invalid(); |
1778 | if (const DeclStmt *SD = dyn_cast<DeclStmt>(Val: S.getElement())) |
1779 | variable = EmitAutoVarAlloca(var: *cast<VarDecl>(Val: SD->getSingleDecl())); |
1780 | |
1781 | JumpDest LoopEnd = getJumpDestInCurrentScope(Name: "forcoll.end" ); |
1782 | |
1783 | // Fast enumeration state. |
1784 | QualType StateTy = CGM.getObjCFastEnumerationStateType(); |
1785 | Address StatePtr = CreateMemTemp(T: StateTy, Name: "state.ptr" ); |
1786 | EmitNullInitialization(DestPtr: StatePtr, Ty: StateTy); |
1787 | |
1788 | // Number of elements in the items array. |
1789 | static const unsigned NumItems = 16; |
1790 | |
1791 | // Fetch the countByEnumeratingWithState:objects:count: selector. |
1792 | const IdentifierInfo *II[] = { |
1793 | &CGM.getContext().Idents.get(Name: "countByEnumeratingWithState" ), |
1794 | &CGM.getContext().Idents.get(Name: "objects" ), |
1795 | &CGM.getContext().Idents.get(Name: "count" )}; |
1796 | Selector FastEnumSel = |
1797 | CGM.getContext().Selectors.getSelector(NumArgs: std::size(II), IIV: &II[0]); |
1798 | |
1799 | QualType ItemsTy = getContext().getConstantArrayType( |
1800 | EltTy: getContext().getObjCIdType(), ArySize: llvm::APInt(32, NumItems), SizeExpr: nullptr, |
1801 | ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0); |
1802 | Address ItemsPtr = CreateMemTemp(T: ItemsTy, Name: "items.ptr" ); |
1803 | |
1804 | // Emit the collection pointer. In ARC, we do a retain. |
1805 | llvm::Value *Collection; |
1806 | if (getLangOpts().ObjCAutoRefCount) { |
1807 | Collection = EmitARCRetainScalarExpr(expr: S.getCollection()); |
1808 | |
1809 | // Enter a cleanup to do the release. |
1810 | EmitObjCConsumeObject(T: S.getCollection()->getType(), Ptr: Collection); |
1811 | } else { |
1812 | Collection = EmitScalarExpr(E: S.getCollection()); |
1813 | } |
1814 | |
1815 | // The 'continue' label needs to appear within the cleanup for the |
1816 | // collection object. |
1817 | JumpDest AfterBody = getJumpDestInCurrentScope(Name: "forcoll.next" ); |
1818 | |
1819 | // Send it our message: |
1820 | CallArgList Args; |
1821 | |
1822 | // The first argument is a temporary of the enumeration-state type. |
1823 | Args.add(rvalue: RValue::get(Addr: StatePtr, CGF&: *this), type: getContext().getPointerType(T: StateTy)); |
1824 | |
1825 | // The second argument is a temporary array with space for NumItems |
1826 | // pointers. We'll actually be loading elements from the array |
1827 | // pointer written into the control state; this buffer is so that |
1828 | // collections that *aren't* backed by arrays can still queue up |
1829 | // batches of elements. |
1830 | Args.add(rvalue: RValue::get(Addr: ItemsPtr, CGF&: *this), type: getContext().getPointerType(T: ItemsTy)); |
1831 | |
1832 | // The third argument is the capacity of that temporary array. |
1833 | llvm::Type *NSUIntegerTy = ConvertType(T: getContext().getNSUIntegerType()); |
1834 | llvm::Constant *Count = llvm::ConstantInt::get(Ty: NSUIntegerTy, V: NumItems); |
1835 | Args.add(rvalue: RValue::get(V: Count), type: getContext().getNSUIntegerType()); |
1836 | |
1837 | // Start the enumeration. |
1838 | RValue CountRV = |
1839 | CGM.getObjCRuntime().GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), |
1840 | ResultType: getContext().getNSUIntegerType(), |
1841 | Sel: FastEnumSel, Receiver: Collection, CallArgs: Args); |
1842 | |
1843 | // The initial number of objects that were returned in the buffer. |
1844 | llvm::Value *initialBufferLimit = CountRV.getScalarVal(); |
1845 | |
1846 | llvm::BasicBlock *EmptyBB = createBasicBlock(name: "forcoll.empty" ); |
1847 | llvm::BasicBlock *LoopInitBB = createBasicBlock(name: "forcoll.loopinit" ); |
1848 | |
1849 | llvm::Value *zero = llvm::Constant::getNullValue(Ty: NSUIntegerTy); |
1850 | |
1851 | // If the limit pointer was zero to begin with, the collection is |
1852 | // empty; skip all this. Set the branch weight assuming this has the same |
1853 | // probability of exiting the loop as any other loop exit. |
1854 | uint64_t EntryCount = getCurrentProfileCount(); |
1855 | Builder.CreateCondBr( |
1856 | Cond: Builder.CreateICmpEQ(LHS: initialBufferLimit, RHS: zero, Name: "iszero" ), True: EmptyBB, |
1857 | False: LoopInitBB, |
1858 | BranchWeights: createProfileWeights(TrueCount: EntryCount, FalseCount: getProfileCount(S: S.getBody()))); |
1859 | |
1860 | // Otherwise, initialize the loop. |
1861 | EmitBlock(BB: LoopInitBB); |
1862 | |
1863 | // Save the initial mutations value. This is the value at an |
1864 | // address that was written into the state object by |
1865 | // countByEnumeratingWithState:objects:count:. |
1866 | Address StateMutationsPtrPtr = |
1867 | Builder.CreateStructGEP(Addr: StatePtr, Index: 2, Name: "mutationsptr.ptr" ); |
1868 | llvm::Value *StateMutationsPtr |
1869 | = Builder.CreateLoad(Addr: StateMutationsPtrPtr, Name: "mutationsptr" ); |
1870 | |
1871 | llvm::Type *UnsignedLongTy = ConvertType(getContext().UnsignedLongTy); |
1872 | llvm::Value *initialMutations = |
1873 | Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, |
1874 | getPointerAlign(), "forcoll.initial-mutations" ); |
1875 | |
1876 | // Start looping. This is the point we return to whenever we have a |
1877 | // fresh, non-empty batch of objects. |
1878 | llvm::BasicBlock *LoopBodyBB = createBasicBlock(name: "forcoll.loopbody" ); |
1879 | EmitBlock(BB: LoopBodyBB); |
1880 | |
1881 | // The current index into the buffer. |
1882 | llvm::PHINode *index = Builder.CreatePHI(Ty: NSUIntegerTy, NumReservedValues: 3, Name: "forcoll.index" ); |
1883 | index->addIncoming(V: zero, BB: LoopInitBB); |
1884 | |
1885 | // The current buffer size. |
1886 | llvm::PHINode *count = Builder.CreatePHI(Ty: NSUIntegerTy, NumReservedValues: 3, Name: "forcoll.count" ); |
1887 | count->addIncoming(V: initialBufferLimit, BB: LoopInitBB); |
1888 | |
1889 | incrementProfileCounter(S: &S); |
1890 | |
1891 | // Check whether the mutations value has changed from where it was |
1892 | // at start. StateMutationsPtr should actually be invariant between |
1893 | // refreshes. |
1894 | StateMutationsPtr = Builder.CreateLoad(Addr: StateMutationsPtrPtr, Name: "mutationsptr" ); |
1895 | llvm::Value *currentMutations |
1896 | = Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, |
1897 | getPointerAlign(), "statemutations" ); |
1898 | |
1899 | llvm::BasicBlock *WasMutatedBB = createBasicBlock(name: "forcoll.mutated" ); |
1900 | llvm::BasicBlock *WasNotMutatedBB = createBasicBlock(name: "forcoll.notmutated" ); |
1901 | |
1902 | Builder.CreateCondBr(Cond: Builder.CreateICmpEQ(LHS: currentMutations, RHS: initialMutations), |
1903 | True: WasNotMutatedBB, False: WasMutatedBB); |
1904 | |
1905 | // If so, call the enumeration-mutation function. |
1906 | EmitBlock(BB: WasMutatedBB); |
1907 | llvm::Type *ObjCIdType = ConvertType(T: getContext().getObjCIdType()); |
1908 | llvm::Value *V = |
1909 | Builder.CreateBitCast(V: Collection, DestTy: ObjCIdType); |
1910 | CallArgList Args2; |
1911 | Args2.add(rvalue: RValue::get(V), type: getContext().getObjCIdType()); |
1912 | // FIXME: We shouldn't need to get the function info here, the runtime already |
1913 | // should have computed it to build the function. |
1914 | EmitCall( |
1915 | CGM.getTypes().arrangeBuiltinFunctionCall(resultType: getContext().VoidTy, args: Args2), |
1916 | EnumerationMutationFn, ReturnValueSlot(), Args2); |
1917 | |
1918 | // Otherwise, or if the mutation function returns, just continue. |
1919 | EmitBlock(BB: WasNotMutatedBB); |
1920 | |
1921 | // Initialize the element variable. |
1922 | RunCleanupsScope elementVariableScope(*this); |
1923 | bool elementIsVariable; |
1924 | LValue elementLValue; |
1925 | QualType elementType; |
1926 | if (const DeclStmt *SD = dyn_cast<DeclStmt>(Val: S.getElement())) { |
1927 | // Initialize the variable, in case it's a __block variable or something. |
1928 | EmitAutoVarInit(emission: variable); |
1929 | |
1930 | const VarDecl *D = cast<VarDecl>(Val: SD->getSingleDecl()); |
1931 | DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false, |
1932 | D->getType(), VK_LValue, SourceLocation()); |
1933 | elementLValue = EmitLValue(&tempDRE); |
1934 | elementType = D->getType(); |
1935 | elementIsVariable = true; |
1936 | |
1937 | if (D->isARCPseudoStrong()) |
1938 | elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); |
1939 | } else { |
1940 | elementLValue = LValue(); // suppress warning |
1941 | elementType = cast<Expr>(Val: S.getElement())->getType(); |
1942 | elementIsVariable = false; |
1943 | } |
1944 | llvm::Type *convertedElementType = ConvertType(T: elementType); |
1945 | |
1946 | // Fetch the buffer out of the enumeration state. |
1947 | // TODO: this pointer should actually be invariant between |
1948 | // refreshes, which would help us do certain loop optimizations. |
1949 | Address StateItemsPtr = |
1950 | Builder.CreateStructGEP(Addr: StatePtr, Index: 1, Name: "stateitems.ptr" ); |
1951 | llvm::Value *EnumStateItems = |
1952 | Builder.CreateLoad(Addr: StateItemsPtr, Name: "stateitems" ); |
1953 | |
1954 | // Fetch the value at the current index from the buffer. |
1955 | llvm::Value *CurrentItemPtr = Builder.CreateGEP( |
1956 | Ty: ObjCIdType, Ptr: EnumStateItems, IdxList: index, Name: "currentitem.ptr" ); |
1957 | llvm::Value *CurrentItem = |
1958 | Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign()); |
1959 | |
1960 | if (SanOpts.has(K: SanitizerKind::ObjCCast)) { |
1961 | // Before using an item from the collection, check that the implicit cast |
1962 | // from id to the element type is valid. This is done with instrumentation |
1963 | // roughly corresponding to: |
1964 | // |
1965 | // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ } |
1966 | const ObjCObjectPointerType *ObjPtrTy = |
1967 | elementType->getAsObjCInterfacePointerType(); |
1968 | const ObjCInterfaceType *InterfaceTy = |
1969 | ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr; |
1970 | if (InterfaceTy) { |
1971 | SanitizerScope SanScope(this); |
1972 | auto &C = CGM.getContext(); |
1973 | assert(InterfaceTy->getDecl() && "No decl for ObjC interface type" ); |
1974 | Selector IsKindOfClassSel = GetUnarySelector(name: "isKindOfClass" , Ctx&: C); |
1975 | CallArgList IsKindOfClassArgs; |
1976 | llvm::Value *Cls = |
1977 | CGM.getObjCRuntime().GetClass(CGF&: *this, OID: InterfaceTy->getDecl()); |
1978 | IsKindOfClassArgs.add(rvalue: RValue::get(V: Cls), type: C.getObjCClassType()); |
1979 | llvm::Value *IsClass = |
1980 | CGM.getObjCRuntime() |
1981 | .GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), ResultType: C.BoolTy, |
1982 | Sel: IsKindOfClassSel, Receiver: CurrentItem, |
1983 | CallArgs: IsKindOfClassArgs) |
1984 | .getScalarVal(); |
1985 | llvm::Constant *StaticData[] = { |
1986 | EmitCheckSourceLocation(Loc: S.getBeginLoc()), |
1987 | EmitCheckTypeDescriptor(T: QualType(InterfaceTy, 0))}; |
1988 | EmitCheck(Checked: {{IsClass, SanitizerKind::ObjCCast}}, |
1989 | Check: SanitizerHandler::InvalidObjCCast, |
1990 | StaticArgs: ArrayRef<llvm::Constant *>(StaticData), DynamicArgs: CurrentItem); |
1991 | } |
1992 | } |
1993 | |
1994 | // Cast that value to the right type. |
1995 | CurrentItem = Builder.CreateBitCast(V: CurrentItem, DestTy: convertedElementType, |
1996 | Name: "currentitem" ); |
1997 | |
1998 | // Make sure we have an l-value. Yes, this gets evaluated every |
1999 | // time through the loop. |
2000 | if (!elementIsVariable) { |
2001 | elementLValue = EmitLValue(E: cast<Expr>(Val: S.getElement())); |
2002 | EmitStoreThroughLValue(Src: RValue::get(V: CurrentItem), Dst: elementLValue); |
2003 | } else { |
2004 | EmitStoreThroughLValue(Src: RValue::get(V: CurrentItem), Dst: elementLValue, |
2005 | /*isInit*/ true); |
2006 | } |
2007 | |
2008 | // If we do have an element variable, this assignment is the end of |
2009 | // its initialization. |
2010 | if (elementIsVariable) |
2011 | EmitAutoVarCleanups(emission: variable); |
2012 | |
2013 | // Perform the loop body, setting up break and continue labels. |
2014 | BreakContinueStack.push_back(Elt: BreakContinue(LoopEnd, AfterBody)); |
2015 | { |
2016 | RunCleanupsScope Scope(*this); |
2017 | EmitStmt(S: S.getBody()); |
2018 | } |
2019 | BreakContinueStack.pop_back(); |
2020 | |
2021 | // Destroy the element variable now. |
2022 | elementVariableScope.ForceCleanup(); |
2023 | |
2024 | // Check whether there are more elements. |
2025 | EmitBlock(BB: AfterBody.getBlock()); |
2026 | |
2027 | llvm::BasicBlock *FetchMoreBB = createBasicBlock(name: "forcoll.refetch" ); |
2028 | |
2029 | // First we check in the local buffer. |
2030 | llvm::Value *indexPlusOne = |
2031 | Builder.CreateAdd(LHS: index, RHS: llvm::ConstantInt::get(Ty: NSUIntegerTy, V: 1)); |
2032 | |
2033 | // If we haven't overrun the buffer yet, we can continue. |
2034 | // Set the branch weights based on the simplifying assumption that this is |
2035 | // like a while-loop, i.e., ignoring that the false branch fetches more |
2036 | // elements and then returns to the loop. |
2037 | Builder.CreateCondBr( |
2038 | Cond: Builder.CreateICmpULT(LHS: indexPlusOne, RHS: count), True: LoopBodyBB, False: FetchMoreBB, |
2039 | BranchWeights: createProfileWeights(TrueCount: getProfileCount(S: S.getBody()), FalseCount: EntryCount)); |
2040 | |
2041 | index->addIncoming(V: indexPlusOne, BB: AfterBody.getBlock()); |
2042 | count->addIncoming(V: count, BB: AfterBody.getBlock()); |
2043 | |
2044 | // Otherwise, we have to fetch more elements. |
2045 | EmitBlock(BB: FetchMoreBB); |
2046 | |
2047 | CountRV = |
2048 | CGM.getObjCRuntime().GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), |
2049 | ResultType: getContext().getNSUIntegerType(), |
2050 | Sel: FastEnumSel, Receiver: Collection, CallArgs: Args); |
2051 | |
2052 | // If we got a zero count, we're done. |
2053 | llvm::Value *refetchCount = CountRV.getScalarVal(); |
2054 | |
2055 | // (note that the message send might split FetchMoreBB) |
2056 | index->addIncoming(V: zero, BB: Builder.GetInsertBlock()); |
2057 | count->addIncoming(V: refetchCount, BB: Builder.GetInsertBlock()); |
2058 | |
2059 | Builder.CreateCondBr(Cond: Builder.CreateICmpEQ(LHS: refetchCount, RHS: zero), |
2060 | True: EmptyBB, False: LoopBodyBB); |
2061 | |
2062 | // No more elements. |
2063 | EmitBlock(BB: EmptyBB); |
2064 | |
2065 | if (!elementIsVariable) { |
2066 | // If the element was not a declaration, set it to be null. |
2067 | |
2068 | llvm::Value *null = llvm::Constant::getNullValue(Ty: convertedElementType); |
2069 | elementLValue = EmitLValue(E: cast<Expr>(Val: S.getElement())); |
2070 | EmitStoreThroughLValue(Src: RValue::get(V: null), Dst: elementLValue); |
2071 | } |
2072 | |
2073 | if (DI) |
2074 | DI->EmitLexicalBlockEnd(Builder, Loc: S.getSourceRange().getEnd()); |
2075 | |
2076 | ForScope.ForceCleanup(); |
2077 | EmitBlock(BB: LoopEnd.getBlock()); |
2078 | } |
2079 | |
2080 | void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { |
2081 | CGM.getObjCRuntime().EmitTryStmt(CGF&: *this, S); |
2082 | } |
2083 | |
2084 | void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { |
2085 | CGM.getObjCRuntime().EmitThrowStmt(CGF&: *this, S); |
2086 | } |
2087 | |
2088 | void CodeGenFunction::EmitObjCAtSynchronizedStmt( |
2089 | const ObjCAtSynchronizedStmt &S) { |
2090 | CGM.getObjCRuntime().EmitSynchronizedStmt(CGF&: *this, S); |
2091 | } |
2092 | |
2093 | namespace { |
2094 | struct CallObjCRelease final : EHScopeStack::Cleanup { |
2095 | CallObjCRelease(llvm::Value *object) : object(object) {} |
2096 | llvm::Value *object; |
2097 | |
2098 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2099 | // Releases at the end of the full-expression are imprecise. |
2100 | CGF.EmitARCRelease(value: object, precise: ARCImpreciseLifetime); |
2101 | } |
2102 | }; |
2103 | } |
2104 | |
2105 | /// Produce the code for a CK_ARCConsumeObject. Does a primitive |
2106 | /// release at the end of the full-expression. |
2107 | llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, |
2108 | llvm::Value *object) { |
2109 | // If we're in a conditional branch, we need to make the cleanup |
2110 | // conditional. |
2111 | pushFullExprCleanup<CallObjCRelease>(kind: getARCCleanupKind(), A: object); |
2112 | return object; |
2113 | } |
2114 | |
2115 | llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, |
2116 | llvm::Value *value) { |
2117 | return EmitARCRetainAutorelease(type, value); |
2118 | } |
2119 | |
2120 | /// Given a number of pointers, inform the optimizer that they're |
2121 | /// being intrinsically used up until this point in the program. |
2122 | void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { |
2123 | llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use; |
2124 | if (!fn) |
2125 | fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use); |
2126 | |
2127 | // This isn't really a "runtime" function, but as an intrinsic it |
2128 | // doesn't really matter as long as we align things up. |
2129 | EmitNounwindRuntimeCall(callee: fn, args: values); |
2130 | } |
2131 | |
2132 | /// Emit a call to "clang.arc.noop.use", which consumes the result of a call |
2133 | /// that has operand bundle "clang.arc.attachedcall". |
2134 | void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) { |
2135 | llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use; |
2136 | if (!fn) |
2137 | fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_noop_use); |
2138 | EmitNounwindRuntimeCall(callee: fn, args: values); |
2139 | } |
2140 | |
2141 | static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) { |
2142 | if (auto *F = dyn_cast<llvm::Function>(Val: RTF)) { |
2143 | // If the target runtime doesn't naturally support ARC, emit weak |
2144 | // references to the runtime support library. We don't really |
2145 | // permit this to fail, but we need a particular relocation style. |
2146 | if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && |
2147 | !CGM.getTriple().isOSBinFormatCOFF()) { |
2148 | F->setLinkage(llvm::Function::ExternalWeakLinkage); |
2149 | } |
2150 | } |
2151 | } |
2152 | |
2153 | static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, |
2154 | llvm::FunctionCallee RTF) { |
2155 | setARCRuntimeFunctionLinkage(CGM, RTF: RTF.getCallee()); |
2156 | } |
2157 | |
2158 | static llvm::Function *getARCIntrinsic(llvm::Intrinsic::ID IntID, |
2159 | CodeGenModule &CGM) { |
2160 | llvm::Function *fn = CGM.getIntrinsic(IID: IntID); |
2161 | setARCRuntimeFunctionLinkage(CGM, RTF: fn); |
2162 | return fn; |
2163 | } |
2164 | |
2165 | /// Perform an operation having the signature |
2166 | /// i8* (i8*) |
2167 | /// where a null input causes a no-op and returns null. |
2168 | static llvm::Value *emitARCValueOperation( |
2169 | CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType, |
2170 | llvm::Function *&fn, llvm::Intrinsic::ID IntID, |
2171 | llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) { |
2172 | if (isa<llvm::ConstantPointerNull>(Val: value)) |
2173 | return value; |
2174 | |
2175 | if (!fn) |
2176 | fn = getARCIntrinsic(IntID, CGM&: CGF.CGM); |
2177 | |
2178 | // Cast the argument to 'id'. |
2179 | llvm::Type *origType = returnType ? returnType : value->getType(); |
2180 | value = CGF.Builder.CreateBitCast(V: value, DestTy: CGF.Int8PtrTy); |
2181 | |
2182 | // Call the function. |
2183 | llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(callee: fn, args: value); |
2184 | call->setTailCallKind(tailKind); |
2185 | |
2186 | // Cast the result back to the original type. |
2187 | return CGF.Builder.CreateBitCast(V: call, DestTy: origType); |
2188 | } |
2189 | |
2190 | /// Perform an operation having the following signature: |
2191 | /// i8* (i8**) |
2192 | static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, |
2193 | llvm::Function *&fn, |
2194 | llvm::Intrinsic::ID IntID) { |
2195 | if (!fn) |
2196 | fn = getARCIntrinsic(IntID, CGM&: CGF.CGM); |
2197 | |
2198 | return CGF.EmitNounwindRuntimeCall(callee: fn, args: addr.emitRawPointer(CGF)); |
2199 | } |
2200 | |
2201 | /// Perform an operation having the following signature: |
2202 | /// i8* (i8**, i8*) |
2203 | static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, |
2204 | llvm::Value *value, |
2205 | llvm::Function *&fn, |
2206 | llvm::Intrinsic::ID IntID, |
2207 | bool ignored) { |
2208 | assert(addr.getElementType() == value->getType()); |
2209 | |
2210 | if (!fn) |
2211 | fn = getARCIntrinsic(IntID, CGM&: CGF.CGM); |
2212 | |
2213 | llvm::Type *origType = value->getType(); |
2214 | |
2215 | llvm::Value *args[] = { |
2216 | CGF.Builder.CreateBitCast(V: addr.emitRawPointer(CGF), DestTy: CGF.Int8PtrPtrTy), |
2217 | CGF.Builder.CreateBitCast(V: value, DestTy: CGF.Int8PtrTy)}; |
2218 | llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(callee: fn, args); |
2219 | |
2220 | if (ignored) return nullptr; |
2221 | |
2222 | return CGF.Builder.CreateBitCast(V: result, DestTy: origType); |
2223 | } |
2224 | |
2225 | /// Perform an operation having the following signature: |
2226 | /// void (i8**, i8**) |
2227 | static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, |
2228 | llvm::Function *&fn, |
2229 | llvm::Intrinsic::ID IntID) { |
2230 | assert(dst.getType() == src.getType()); |
2231 | |
2232 | if (!fn) |
2233 | fn = getARCIntrinsic(IntID, CGM&: CGF.CGM); |
2234 | |
2235 | llvm::Value *args[] = { |
2236 | CGF.Builder.CreateBitCast(V: dst.emitRawPointer(CGF), DestTy: CGF.Int8PtrPtrTy), |
2237 | CGF.Builder.CreateBitCast(V: src.emitRawPointer(CGF), DestTy: CGF.Int8PtrPtrTy)}; |
2238 | CGF.EmitNounwindRuntimeCall(callee: fn, args); |
2239 | } |
2240 | |
2241 | /// Perform an operation having the signature |
2242 | /// i8* (i8*) |
2243 | /// where a null input causes a no-op and returns null. |
2244 | static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF, |
2245 | llvm::Value *value, |
2246 | llvm::Type *returnType, |
2247 | llvm::FunctionCallee &fn, |
2248 | StringRef fnName) { |
2249 | if (isa<llvm::ConstantPointerNull>(Val: value)) |
2250 | return value; |
2251 | |
2252 | if (!fn) { |
2253 | llvm::FunctionType *fnType = |
2254 | llvm::FunctionType::get(Result: CGF.Int8PtrTy, Params: CGF.Int8PtrTy, isVarArg: false); |
2255 | fn = CGF.CGM.CreateRuntimeFunction(Ty: fnType, Name: fnName); |
2256 | |
2257 | // We have Native ARC, so set nonlazybind attribute for performance |
2258 | if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) |
2259 | if (fnName == "objc_retain" ) |
2260 | f->addFnAttr(llvm::Attribute::NonLazyBind); |
2261 | } |
2262 | |
2263 | // Cast the argument to 'id'. |
2264 | llvm::Type *origType = returnType ? returnType : value->getType(); |
2265 | value = CGF.Builder.CreateBitCast(V: value, DestTy: CGF.Int8PtrTy); |
2266 | |
2267 | // Call the function. |
2268 | llvm::CallBase *Inst = CGF.EmitCallOrInvoke(Callee: fn, Args: value); |
2269 | |
2270 | // Mark calls to objc_autorelease as tail on the assumption that methods |
2271 | // overriding autorelease do not touch anything on the stack. |
2272 | if (fnName == "objc_autorelease" ) |
2273 | if (auto *Call = dyn_cast<llvm::CallInst>(Val: Inst)) |
2274 | Call->setTailCall(); |
2275 | |
2276 | // Cast the result back to the original type. |
2277 | return CGF.Builder.CreateBitCast(V: Inst, DestTy: origType); |
2278 | } |
2279 | |
2280 | /// Produce the code to do a retain. Based on the type, calls one of: |
2281 | /// call i8* \@objc_retain(i8* %value) |
2282 | /// call i8* \@objc_retainBlock(i8* %value) |
2283 | llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { |
2284 | if (type->isBlockPointerType()) |
2285 | return EmitARCRetainBlock(value, /*mandatory*/ false); |
2286 | else |
2287 | return EmitARCRetainNonBlock(value); |
2288 | } |
2289 | |
2290 | /// Retain the given object, with normal retain semantics. |
2291 | /// call i8* \@objc_retain(i8* %value) |
2292 | llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { |
2293 | return emitARCValueOperation(*this, value, nullptr, |
2294 | CGM.getObjCEntrypoints().objc_retain, |
2295 | llvm::Intrinsic::objc_retain); |
2296 | } |
2297 | |
2298 | /// Retain the given block, with _Block_copy semantics. |
2299 | /// call i8* \@objc_retainBlock(i8* %value) |
2300 | /// |
2301 | /// \param mandatory - If false, emit the call with metadata |
2302 | /// indicating that it's okay for the optimizer to eliminate this call |
2303 | /// if it can prove that the block never escapes except down the stack. |
2304 | llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, |
2305 | bool mandatory) { |
2306 | llvm::Value *result |
2307 | = emitARCValueOperation(*this, value, nullptr, |
2308 | CGM.getObjCEntrypoints().objc_retainBlock, |
2309 | llvm::Intrinsic::objc_retainBlock); |
2310 | |
2311 | // If the copy isn't mandatory, add !clang.arc.copy_on_escape to |
2312 | // tell the optimizer that it doesn't need to do this copy if the |
2313 | // block doesn't escape, where being passed as an argument doesn't |
2314 | // count as escaping. |
2315 | if (!mandatory && isa<llvm::Instruction>(Val: result)) { |
2316 | llvm::CallInst *call |
2317 | = cast<llvm::CallInst>(Val: result->stripPointerCasts()); |
2318 | assert(call->getCalledOperand() == |
2319 | CGM.getObjCEntrypoints().objc_retainBlock); |
2320 | |
2321 | call->setMetadata(Kind: "clang.arc.copy_on_escape" , |
2322 | Node: llvm::MDNode::get(Context&: Builder.getContext(), MDs: std::nullopt)); |
2323 | } |
2324 | |
2325 | return result; |
2326 | } |
2327 | |
2328 | static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { |
2329 | // Fetch the void(void) inline asm which marks that we're going to |
2330 | // do something with the autoreleased return value. |
2331 | llvm::InlineAsm *&marker |
2332 | = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; |
2333 | if (!marker) { |
2334 | StringRef assembly |
2335 | = CGF.CGM.getTargetCodeGenInfo() |
2336 | .getARCRetainAutoreleasedReturnValueMarker(); |
2337 | |
2338 | // If we have an empty assembly string, there's nothing to do. |
2339 | if (assembly.empty()) { |
2340 | |
2341 | // Otherwise, at -O0, build an inline asm that we're going to call |
2342 | // in a moment. |
2343 | } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2344 | llvm::FunctionType *type = |
2345 | llvm::FunctionType::get(Result: CGF.VoidTy, /*variadic*/isVarArg: false); |
2346 | |
2347 | marker = llvm::InlineAsm::get(Ty: type, AsmString: assembly, Constraints: "" , /*sideeffects*/ hasSideEffects: true); |
2348 | |
2349 | // If we're at -O1 and above, we don't want to litter the code |
2350 | // with this marker yet, so leave a breadcrumb for the ARC |
2351 | // optimizer to pick up. |
2352 | } else { |
2353 | const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr(); |
2354 | if (!CGF.CGM.getModule().getModuleFlag(Key: retainRVMarkerKey)) { |
2355 | auto *str = llvm::MDString::get(Context&: CGF.getLLVMContext(), Str: assembly); |
2356 | CGF.CGM.getModule().addModuleFlag(Behavior: llvm::Module::Error, |
2357 | Key: retainRVMarkerKey, Val: str); |
2358 | } |
2359 | } |
2360 | } |
2361 | |
2362 | // Call the marker asm if we made one, which we do only at -O0. |
2363 | if (marker) |
2364 | CGF.Builder.CreateCall(Callee: marker, Args: std::nullopt, |
2365 | OpBundles: CGF.getBundlesForFunclet(Callee: marker)); |
2366 | } |
2367 | |
2368 | static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value, |
2369 | bool IsRetainRV, |
2370 | CodeGenFunction &CGF) { |
2371 | emitAutoreleasedReturnValueMarker(CGF); |
2372 | |
2373 | // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting |
2374 | // retainRV or claimRV calls in the IR. We currently do this only when the |
2375 | // optimization level isn't -O0 since global-isel, which is currently run at |
2376 | // -O0, doesn't know about the operand bundle. |
2377 | ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints(); |
2378 | llvm::Function *&EP = IsRetainRV |
2379 | ? EPs.objc_retainAutoreleasedReturnValue |
2380 | : EPs.objc_unsafeClaimAutoreleasedReturnValue; |
2381 | llvm::Intrinsic::ID IID = |
2382 | IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue |
2383 | : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue; |
2384 | EP = getARCIntrinsic(IntID: IID, CGM&: CGF.CGM); |
2385 | |
2386 | llvm::Triple::ArchType Arch = CGF.CGM.getTriple().getArch(); |
2387 | |
2388 | // FIXME: Do this on all targets and at -O0 too. This can be enabled only if |
2389 | // the target backend knows how to handle the operand bundle. |
2390 | if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 && |
2391 | (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::x86_64)) { |
2392 | llvm::Value *bundleArgs[] = {EP}; |
2393 | llvm::OperandBundleDef OB("clang.arc.attachedcall" , bundleArgs); |
2394 | auto *oldCall = cast<llvm::CallBase>(Val: value); |
2395 | llvm::CallBase *newCall = llvm::CallBase::addOperandBundle( |
2396 | CB: oldCall, ID: llvm::LLVMContext::OB_clang_arc_attachedcall, OB, InsertPt: oldCall); |
2397 | newCall->copyMetadata(SrcInst: *oldCall); |
2398 | oldCall->replaceAllUsesWith(V: newCall); |
2399 | oldCall->eraseFromParent(); |
2400 | CGF.EmitARCNoopIntrinsicUse(values: newCall); |
2401 | return newCall; |
2402 | } |
2403 | |
2404 | bool isNoTail = |
2405 | CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail(); |
2406 | llvm::CallInst::TailCallKind tailKind = |
2407 | isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None; |
2408 | return emitARCValueOperation(CGF, value, returnType: nullptr, fn&: EP, IntID: IID, tailKind); |
2409 | } |
2410 | |
2411 | /// Retain the given object which is the result of a function call. |
2412 | /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) |
2413 | /// |
2414 | /// Yes, this function name is one character away from a different |
2415 | /// call with completely different semantics. |
2416 | llvm::Value * |
2417 | CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { |
2418 | return emitOptimizedARCReturnCall(value, IsRetainRV: true, CGF&: *this); |
2419 | } |
2420 | |
2421 | /// Claim a possibly-autoreleased return value at +0. This is only |
2422 | /// valid to do in contexts which do not rely on the retain to keep |
2423 | /// the object valid for all of its uses; for example, when |
2424 | /// the value is ignored, or when it is being assigned to an |
2425 | /// __unsafe_unretained variable. |
2426 | /// |
2427 | /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) |
2428 | llvm::Value * |
2429 | CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { |
2430 | return emitOptimizedARCReturnCall(value, IsRetainRV: false, CGF&: *this); |
2431 | } |
2432 | |
2433 | /// Release the given object. |
2434 | /// call void \@objc_release(i8* %value) |
2435 | void CodeGenFunction::EmitARCRelease(llvm::Value *value, |
2436 | ARCPreciseLifetime_t precise) { |
2437 | if (isa<llvm::ConstantPointerNull>(Val: value)) return; |
2438 | |
2439 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release; |
2440 | if (!fn) |
2441 | fn = getARCIntrinsic(llvm::Intrinsic::objc_release, CGM); |
2442 | |
2443 | // Cast the argument to 'id'. |
2444 | value = Builder.CreateBitCast(V: value, DestTy: Int8PtrTy); |
2445 | |
2446 | // Call objc_release. |
2447 | llvm::CallInst *call = EmitNounwindRuntimeCall(callee: fn, args: value); |
2448 | |
2449 | if (precise == ARCImpreciseLifetime) { |
2450 | call->setMetadata(Kind: "clang.imprecise_release" , |
2451 | Node: llvm::MDNode::get(Context&: Builder.getContext(), MDs: std::nullopt)); |
2452 | } |
2453 | } |
2454 | |
2455 | /// Destroy a __strong variable. |
2456 | /// |
2457 | /// At -O0, emit a call to store 'null' into the address; |
2458 | /// instrumenting tools prefer this because the address is exposed, |
2459 | /// but it's relatively cumbersome to optimize. |
2460 | /// |
2461 | /// At -O1 and above, just load and call objc_release. |
2462 | /// |
2463 | /// call void \@objc_storeStrong(i8** %addr, i8* null) |
2464 | void CodeGenFunction::EmitARCDestroyStrong(Address addr, |
2465 | ARCPreciseLifetime_t precise) { |
2466 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2467 | llvm::Value *null = getNullForVariable(addr); |
2468 | EmitARCStoreStrongCall(addr, value: null, /*ignored*/ resultIgnored: true); |
2469 | return; |
2470 | } |
2471 | |
2472 | llvm::Value *value = Builder.CreateLoad(Addr: addr); |
2473 | EmitARCRelease(value, precise); |
2474 | } |
2475 | |
2476 | /// Store into a strong object. Always calls this: |
2477 | /// call void \@objc_storeStrong(i8** %addr, i8* %value) |
2478 | llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, |
2479 | llvm::Value *value, |
2480 | bool ignored) { |
2481 | assert(addr.getElementType() == value->getType()); |
2482 | |
2483 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong; |
2484 | if (!fn) |
2485 | fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM); |
2486 | |
2487 | llvm::Value *args[] = { |
2488 | Builder.CreateBitCast(V: addr.emitRawPointer(CGF&: *this), DestTy: Int8PtrPtrTy), |
2489 | Builder.CreateBitCast(V: value, DestTy: Int8PtrTy)}; |
2490 | EmitNounwindRuntimeCall(callee: fn, args); |
2491 | |
2492 | if (ignored) return nullptr; |
2493 | return value; |
2494 | } |
2495 | |
2496 | /// Store into a strong object. Sometimes calls this: |
2497 | /// call void \@objc_storeStrong(i8** %addr, i8* %value) |
2498 | /// Other times, breaks it down into components. |
2499 | llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, |
2500 | llvm::Value *newValue, |
2501 | bool ignored) { |
2502 | QualType type = dst.getType(); |
2503 | bool isBlock = type->isBlockPointerType(); |
2504 | |
2505 | // Use a store barrier at -O0 unless this is a block type or the |
2506 | // lvalue is inadequately aligned. |
2507 | if (shouldUseFusedARCCalls() && |
2508 | !isBlock && |
2509 | (dst.getAlignment().isZero() || |
2510 | dst.getAlignment() >= CharUnits::fromQuantity(Quantity: PointerAlignInBytes))) { |
2511 | return EmitARCStoreStrongCall(addr: dst.getAddress(CGF&: *this), value: newValue, ignored); |
2512 | } |
2513 | |
2514 | // Otherwise, split it out. |
2515 | |
2516 | // Retain the new value. |
2517 | newValue = EmitARCRetain(type, value: newValue); |
2518 | |
2519 | // Read the old value. |
2520 | llvm::Value *oldValue = EmitLoadOfScalar(lvalue: dst, Loc: SourceLocation()); |
2521 | |
2522 | // Store. We do this before the release so that any deallocs won't |
2523 | // see the old value. |
2524 | EmitStoreOfScalar(value: newValue, lvalue: dst); |
2525 | |
2526 | // Finally, release the old value. |
2527 | EmitARCRelease(value: oldValue, precise: dst.isARCPreciseLifetime()); |
2528 | |
2529 | return newValue; |
2530 | } |
2531 | |
2532 | /// Autorelease the given object. |
2533 | /// call i8* \@objc_autorelease(i8* %value) |
2534 | llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { |
2535 | return emitARCValueOperation(*this, value, nullptr, |
2536 | CGM.getObjCEntrypoints().objc_autorelease, |
2537 | llvm::Intrinsic::objc_autorelease); |
2538 | } |
2539 | |
2540 | /// Autorelease the given object. |
2541 | /// call i8* \@objc_autoreleaseReturnValue(i8* %value) |
2542 | llvm::Value * |
2543 | CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { |
2544 | return emitARCValueOperation(*this, value, nullptr, |
2545 | CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, |
2546 | llvm::Intrinsic::objc_autoreleaseReturnValue, |
2547 | llvm::CallInst::TCK_Tail); |
2548 | } |
2549 | |
2550 | /// Do a fused retain/autorelease of the given object. |
2551 | /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) |
2552 | llvm::Value * |
2553 | CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { |
2554 | return emitARCValueOperation(*this, value, nullptr, |
2555 | CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, |
2556 | llvm::Intrinsic::objc_retainAutoreleaseReturnValue, |
2557 | llvm::CallInst::TCK_Tail); |
2558 | } |
2559 | |
2560 | /// Do a fused retain/autorelease of the given object. |
2561 | /// call i8* \@objc_retainAutorelease(i8* %value) |
2562 | /// or |
2563 | /// %retain = call i8* \@objc_retainBlock(i8* %value) |
2564 | /// call i8* \@objc_autorelease(i8* %retain) |
2565 | llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, |
2566 | llvm::Value *value) { |
2567 | if (!type->isBlockPointerType()) |
2568 | return EmitARCRetainAutoreleaseNonBlock(value); |
2569 | |
2570 | if (isa<llvm::ConstantPointerNull>(Val: value)) return value; |
2571 | |
2572 | llvm::Type *origType = value->getType(); |
2573 | value = Builder.CreateBitCast(V: value, DestTy: Int8PtrTy); |
2574 | value = EmitARCRetainBlock(value, /*mandatory*/ true); |
2575 | value = EmitARCAutorelease(value); |
2576 | return Builder.CreateBitCast(V: value, DestTy: origType); |
2577 | } |
2578 | |
2579 | /// Do a fused retain/autorelease of the given object. |
2580 | /// call i8* \@objc_retainAutorelease(i8* %value) |
2581 | llvm::Value * |
2582 | CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { |
2583 | return emitARCValueOperation(*this, value, nullptr, |
2584 | CGM.getObjCEntrypoints().objc_retainAutorelease, |
2585 | llvm::Intrinsic::objc_retainAutorelease); |
2586 | } |
2587 | |
2588 | /// i8* \@objc_loadWeak(i8** %addr) |
2589 | /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). |
2590 | llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { |
2591 | return emitARCLoadOperation(*this, addr, |
2592 | CGM.getObjCEntrypoints().objc_loadWeak, |
2593 | llvm::Intrinsic::objc_loadWeak); |
2594 | } |
2595 | |
2596 | /// i8* \@objc_loadWeakRetained(i8** %addr) |
2597 | llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { |
2598 | return emitARCLoadOperation(*this, addr, |
2599 | CGM.getObjCEntrypoints().objc_loadWeakRetained, |
2600 | llvm::Intrinsic::objc_loadWeakRetained); |
2601 | } |
2602 | |
2603 | /// i8* \@objc_storeWeak(i8** %addr, i8* %value) |
2604 | /// Returns %value. |
2605 | llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, |
2606 | llvm::Value *value, |
2607 | bool ignored) { |
2608 | return emitARCStoreOperation(*this, addr, value, |
2609 | CGM.getObjCEntrypoints().objc_storeWeak, |
2610 | llvm::Intrinsic::objc_storeWeak, ignored); |
2611 | } |
2612 | |
2613 | /// i8* \@objc_initWeak(i8** %addr, i8* %value) |
2614 | /// Returns %value. %addr is known to not have a current weak entry. |
2615 | /// Essentially equivalent to: |
2616 | /// *addr = nil; objc_storeWeak(addr, value); |
2617 | void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { |
2618 | // If we're initializing to null, just write null to memory; no need |
2619 | // to get the runtime involved. But don't do this if optimization |
2620 | // is enabled, because accounting for this would make the optimizer |
2621 | // much more complicated. |
2622 | if (isa<llvm::ConstantPointerNull>(Val: value) && |
2623 | CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2624 | Builder.CreateStore(Val: value, Addr: addr); |
2625 | return; |
2626 | } |
2627 | |
2628 | emitARCStoreOperation(*this, addr, value, |
2629 | CGM.getObjCEntrypoints().objc_initWeak, |
2630 | llvm::Intrinsic::objc_initWeak, /*ignored*/ true); |
2631 | } |
2632 | |
2633 | /// void \@objc_destroyWeak(i8** %addr) |
2634 | /// Essentially objc_storeWeak(addr, nil). |
2635 | void CodeGenFunction::EmitARCDestroyWeak(Address addr) { |
2636 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; |
2637 | if (!fn) |
2638 | fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM); |
2639 | |
2640 | EmitNounwindRuntimeCall(callee: fn, args: addr.emitRawPointer(CGF&: *this)); |
2641 | } |
2642 | |
2643 | /// void \@objc_moveWeak(i8** %dest, i8** %src) |
2644 | /// Disregards the current value in %dest. Leaves %src pointing to nothing. |
2645 | /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). |
2646 | void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { |
2647 | emitARCCopyOperation(*this, dst, src, |
2648 | CGM.getObjCEntrypoints().objc_moveWeak, |
2649 | llvm::Intrinsic::objc_moveWeak); |
2650 | } |
2651 | |
2652 | /// void \@objc_copyWeak(i8** %dest, i8** %src) |
2653 | /// Disregards the current value in %dest. Essentially |
2654 | /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) |
2655 | void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { |
2656 | emitARCCopyOperation(*this, dst, src, |
2657 | CGM.getObjCEntrypoints().objc_copyWeak, |
2658 | llvm::Intrinsic::objc_copyWeak); |
2659 | } |
2660 | |
2661 | void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, |
2662 | Address SrcAddr) { |
2663 | llvm::Value *Object = EmitARCLoadWeakRetained(addr: SrcAddr); |
2664 | Object = EmitObjCConsumeObject(type: Ty, object: Object); |
2665 | EmitARCStoreWeak(addr: DstAddr, value: Object, ignored: false); |
2666 | } |
2667 | |
2668 | void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, |
2669 | Address SrcAddr) { |
2670 | llvm::Value *Object = EmitARCLoadWeakRetained(addr: SrcAddr); |
2671 | Object = EmitObjCConsumeObject(type: Ty, object: Object); |
2672 | EmitARCStoreWeak(addr: DstAddr, value: Object, ignored: false); |
2673 | EmitARCDestroyWeak(addr: SrcAddr); |
2674 | } |
2675 | |
2676 | /// Produce the code to do a objc_autoreleasepool_push. |
2677 | /// call i8* \@objc_autoreleasePoolPush(void) |
2678 | llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { |
2679 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; |
2680 | if (!fn) |
2681 | fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush, CGM); |
2682 | |
2683 | return EmitNounwindRuntimeCall(callee: fn); |
2684 | } |
2685 | |
2686 | /// Produce the code to do a primitive release. |
2687 | /// call void \@objc_autoreleasePoolPop(i8* %ptr) |
2688 | void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { |
2689 | assert(value->getType() == Int8PtrTy); |
2690 | |
2691 | if (getInvokeDest()) { |
2692 | // Call the runtime method not the intrinsic if we are handling exceptions |
2693 | llvm::FunctionCallee &fn = |
2694 | CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke; |
2695 | if (!fn) { |
2696 | llvm::FunctionType *fnType = |
2697 | llvm::FunctionType::get(Result: Builder.getVoidTy(), Params: Int8PtrTy, isVarArg: false); |
2698 | fn = CGM.CreateRuntimeFunction(Ty: fnType, Name: "objc_autoreleasePoolPop" ); |
2699 | setARCRuntimeFunctionLinkage(CGM, RTF: fn); |
2700 | } |
2701 | |
2702 | // objc_autoreleasePoolPop can throw. |
2703 | EmitRuntimeCallOrInvoke(callee: fn, args: value); |
2704 | } else { |
2705 | llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; |
2706 | if (!fn) |
2707 | fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop, CGM); |
2708 | |
2709 | EmitRuntimeCall(callee: fn, args: value); |
2710 | } |
2711 | } |
2712 | |
2713 | /// Produce the code to do an MRR version objc_autoreleasepool_push. |
2714 | /// Which is: [[NSAutoreleasePool alloc] init]; |
2715 | /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. |
2716 | /// init is declared as: - (id) init; in its NSObject super class. |
2717 | /// |
2718 | llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { |
2719 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
2720 | llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(CGF&: *this); |
2721 | // [NSAutoreleasePool alloc] |
2722 | const IdentifierInfo *II = &CGM.getContext().Idents.get(Name: "alloc" ); |
2723 | Selector AllocSel = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
2724 | CallArgList Args; |
2725 | RValue AllocRV = |
2726 | Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), |
2727 | ResultType: getContext().getObjCIdType(), |
2728 | Sel: AllocSel, Receiver, CallArgs: Args); |
2729 | |
2730 | // [Receiver init] |
2731 | Receiver = AllocRV.getScalarVal(); |
2732 | II = &CGM.getContext().Idents.get(Name: "init" ); |
2733 | Selector InitSel = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
2734 | RValue InitRV = |
2735 | Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), |
2736 | ResultType: getContext().getObjCIdType(), |
2737 | Sel: InitSel, Receiver, CallArgs: Args); |
2738 | return InitRV.getScalarVal(); |
2739 | } |
2740 | |
2741 | /// Allocate the given objc object. |
2742 | /// call i8* \@objc_alloc(i8* %value) |
2743 | llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, |
2744 | llvm::Type *resultType) { |
2745 | return emitObjCValueOperation(CGF&: *this, value, returnType: resultType, |
2746 | fn&: CGM.getObjCEntrypoints().objc_alloc, |
2747 | fnName: "objc_alloc" ); |
2748 | } |
2749 | |
2750 | /// Allocate the given objc object. |
2751 | /// call i8* \@objc_allocWithZone(i8* %value) |
2752 | llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, |
2753 | llvm::Type *resultType) { |
2754 | return emitObjCValueOperation(CGF&: *this, value, returnType: resultType, |
2755 | fn&: CGM.getObjCEntrypoints().objc_allocWithZone, |
2756 | fnName: "objc_allocWithZone" ); |
2757 | } |
2758 | |
2759 | llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, |
2760 | llvm::Type *resultType) { |
2761 | return emitObjCValueOperation(CGF&: *this, value, returnType: resultType, |
2762 | fn&: CGM.getObjCEntrypoints().objc_alloc_init, |
2763 | fnName: "objc_alloc_init" ); |
2764 | } |
2765 | |
2766 | /// Produce the code to do a primitive release. |
2767 | /// [tmp drain]; |
2768 | void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { |
2769 | const IdentifierInfo *II = &CGM.getContext().Idents.get(Name: "drain" ); |
2770 | Selector DrainSel = getContext().Selectors.getSelector(NumArgs: 0, IIV: &II); |
2771 | CallArgList Args; |
2772 | CGM.getObjCRuntime().GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), |
2773 | ResultType: getContext().VoidTy, Sel: DrainSel, Receiver: Arg, CallArgs: Args); |
2774 | } |
2775 | |
2776 | void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, |
2777 | Address addr, |
2778 | QualType type) { |
2779 | CGF.EmitARCDestroyStrong(addr, precise: ARCPreciseLifetime); |
2780 | } |
2781 | |
2782 | void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, |
2783 | Address addr, |
2784 | QualType type) { |
2785 | CGF.EmitARCDestroyStrong(addr, precise: ARCImpreciseLifetime); |
2786 | } |
2787 | |
2788 | void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, |
2789 | Address addr, |
2790 | QualType type) { |
2791 | CGF.EmitARCDestroyWeak(addr); |
2792 | } |
2793 | |
2794 | void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, |
2795 | QualType type) { |
2796 | llvm::Value *value = CGF.Builder.CreateLoad(Addr: addr); |
2797 | CGF.EmitARCIntrinsicUse(values: value); |
2798 | } |
2799 | |
2800 | /// Autorelease the given object. |
2801 | /// call i8* \@objc_autorelease(i8* %value) |
2802 | llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value, |
2803 | llvm::Type *returnType) { |
2804 | return emitObjCValueOperation( |
2805 | CGF&: *this, value, returnType, |
2806 | fn&: CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction, |
2807 | fnName: "objc_autorelease" ); |
2808 | } |
2809 | |
2810 | /// Retain the given object, with normal retain semantics. |
2811 | /// call i8* \@objc_retain(i8* %value) |
2812 | llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value, |
2813 | llvm::Type *returnType) { |
2814 | return emitObjCValueOperation( |
2815 | CGF&: *this, value, returnType, |
2816 | fn&: CGM.getObjCEntrypoints().objc_retainRuntimeFunction, fnName: "objc_retain" ); |
2817 | } |
2818 | |
2819 | /// Release the given object. |
2820 | /// call void \@objc_release(i8* %value) |
2821 | void CodeGenFunction::EmitObjCRelease(llvm::Value *value, |
2822 | ARCPreciseLifetime_t precise) { |
2823 | if (isa<llvm::ConstantPointerNull>(Val: value)) return; |
2824 | |
2825 | llvm::FunctionCallee &fn = |
2826 | CGM.getObjCEntrypoints().objc_releaseRuntimeFunction; |
2827 | if (!fn) { |
2828 | llvm::FunctionType *fnType = |
2829 | llvm::FunctionType::get(Result: Builder.getVoidTy(), Params: Int8PtrTy, isVarArg: false); |
2830 | fn = CGM.CreateRuntimeFunction(Ty: fnType, Name: "objc_release" ); |
2831 | setARCRuntimeFunctionLinkage(CGM, RTF: fn); |
2832 | // We have Native ARC, so set nonlazybind attribute for performance |
2833 | if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) |
2834 | f->addFnAttr(llvm::Attribute::NonLazyBind); |
2835 | } |
2836 | |
2837 | // Cast the argument to 'id'. |
2838 | value = Builder.CreateBitCast(V: value, DestTy: Int8PtrTy); |
2839 | |
2840 | // Call objc_release. |
2841 | llvm::CallBase *call = EmitCallOrInvoke(Callee: fn, Args: value); |
2842 | |
2843 | if (precise == ARCImpreciseLifetime) { |
2844 | call->setMetadata(Kind: "clang.imprecise_release" , |
2845 | Node: llvm::MDNode::get(Context&: Builder.getContext(), MDs: std::nullopt)); |
2846 | } |
2847 | } |
2848 | |
2849 | namespace { |
2850 | struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { |
2851 | llvm::Value *Token; |
2852 | |
2853 | CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} |
2854 | |
2855 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2856 | CGF.EmitObjCAutoreleasePoolPop(value: Token); |
2857 | } |
2858 | }; |
2859 | struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { |
2860 | llvm::Value *Token; |
2861 | |
2862 | CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} |
2863 | |
2864 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2865 | CGF.EmitObjCMRRAutoreleasePoolPop(Arg: Token); |
2866 | } |
2867 | }; |
2868 | } |
2869 | |
2870 | void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { |
2871 | if (CGM.getLangOpts().ObjCAutoRefCount) |
2872 | EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(Kind: NormalCleanup, A: Ptr); |
2873 | else |
2874 | EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(Kind: NormalCleanup, A: Ptr); |
2875 | } |
2876 | |
2877 | static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { |
2878 | switch (lifetime) { |
2879 | case Qualifiers::OCL_None: |
2880 | case Qualifiers::OCL_ExplicitNone: |
2881 | case Qualifiers::OCL_Strong: |
2882 | case Qualifiers::OCL_Autoreleasing: |
2883 | return true; |
2884 | |
2885 | case Qualifiers::OCL_Weak: |
2886 | return false; |
2887 | } |
2888 | |
2889 | llvm_unreachable("impossible lifetime!" ); |
2890 | } |
2891 | |
2892 | static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
2893 | LValue lvalue, |
2894 | QualType type) { |
2895 | llvm::Value *result; |
2896 | bool shouldRetain = shouldRetainObjCLifetime(lifetime: type.getObjCLifetime()); |
2897 | if (shouldRetain) { |
2898 | result = CGF.EmitLoadOfLValue(V: lvalue, Loc: SourceLocation()).getScalarVal(); |
2899 | } else { |
2900 | assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); |
2901 | result = CGF.EmitARCLoadWeakRetained(addr: lvalue.getAddress(CGF)); |
2902 | } |
2903 | return TryEmitResult(result, !shouldRetain); |
2904 | } |
2905 | |
2906 | static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
2907 | const Expr *e) { |
2908 | e = e->IgnoreParens(); |
2909 | QualType type = e->getType(); |
2910 | |
2911 | // If we're loading retained from a __strong xvalue, we can avoid |
2912 | // an extra retain/release pair by zeroing out the source of this |
2913 | // "move" operation. |
2914 | if (e->isXValue() && |
2915 | !type.isConstQualified() && |
2916 | type.getObjCLifetime() == Qualifiers::OCL_Strong) { |
2917 | // Emit the lvalue. |
2918 | LValue lv = CGF.EmitLValue(E: e); |
2919 | |
2920 | // Load the object pointer. |
2921 | llvm::Value *result = CGF.EmitLoadOfLValue(V: lv, |
2922 | Loc: SourceLocation()).getScalarVal(); |
2923 | |
2924 | // Set the source pointer to NULL. |
2925 | CGF.EmitStoreOfScalar(value: getNullForVariable(addr: lv.getAddress(CGF)), lvalue: lv); |
2926 | |
2927 | return TryEmitResult(result, true); |
2928 | } |
2929 | |
2930 | // As a very special optimization, in ARC++, if the l-value is the |
2931 | // result of a non-volatile assignment, do a simple retain of the |
2932 | // result of the call to objc_storeWeak instead of reloading. |
2933 | if (CGF.getLangOpts().CPlusPlus && |
2934 | !type.isVolatileQualified() && |
2935 | type.getObjCLifetime() == Qualifiers::OCL_Weak && |
2936 | isa<BinaryOperator>(Val: e) && |
2937 | cast<BinaryOperator>(Val: e)->getOpcode() == BO_Assign) |
2938 | return TryEmitResult(CGF.EmitScalarExpr(E: e), false); |
2939 | |
2940 | // Try to emit code for scalar constant instead of emitting LValue and |
2941 | // loading it because we are not guaranteed to have an l-value. One of such |
2942 | // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. |
2943 | if (const auto *decl_expr = dyn_cast<DeclRefExpr>(Val: e)) { |
2944 | auto *DRE = const_cast<DeclRefExpr *>(decl_expr); |
2945 | if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(refExpr: DRE)) |
2946 | return TryEmitResult(CGF.emitScalarConstant(constant, DRE), |
2947 | !shouldRetainObjCLifetime(lifetime: type.getObjCLifetime())); |
2948 | } |
2949 | |
2950 | return tryEmitARCRetainLoadOfScalar(CGF, lvalue: CGF.EmitLValue(E: e), type); |
2951 | } |
2952 | |
2953 | typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, |
2954 | llvm::Value *value)> |
2955 | ValueTransform; |
2956 | |
2957 | /// Insert code immediately after a call. |
2958 | |
2959 | // FIXME: We should find a way to emit the runtime call immediately |
2960 | // after the call is emitted to eliminate the need for this function. |
2961 | static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, |
2962 | llvm::Value *value, |
2963 | ValueTransform doAfterCall, |
2964 | ValueTransform doFallback) { |
2965 | CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); |
2966 | auto *callBase = dyn_cast<llvm::CallBase>(Val: value); |
2967 | |
2968 | if (callBase && llvm::objcarc::hasAttachedCallOpBundle(CB: callBase)) { |
2969 | // Fall back if the call base has operand bundle "clang.arc.attachedcall". |
2970 | value = doFallback(CGF, value); |
2971 | } else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(Val: value)) { |
2972 | // Place the retain immediately following the call. |
2973 | CGF.Builder.SetInsertPoint(TheBB: call->getParent(), |
2974 | IP: ++llvm::BasicBlock::iterator(call)); |
2975 | value = doAfterCall(CGF, value); |
2976 | } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(Val: value)) { |
2977 | // Place the retain at the beginning of the normal destination block. |
2978 | llvm::BasicBlock *BB = invoke->getNormalDest(); |
2979 | CGF.Builder.SetInsertPoint(TheBB: BB, IP: BB->begin()); |
2980 | value = doAfterCall(CGF, value); |
2981 | |
2982 | // Bitcasts can arise because of related-result returns. Rewrite |
2983 | // the operand. |
2984 | } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(Val: value)) { |
2985 | // Change the insert point to avoid emitting the fall-back call after the |
2986 | // bitcast. |
2987 | CGF.Builder.SetInsertPoint(TheBB: bitcast->getParent(), IP: bitcast->getIterator()); |
2988 | llvm::Value *operand = bitcast->getOperand(i_nocapture: 0); |
2989 | operand = emitARCOperationAfterCall(CGF, value: operand, doAfterCall, doFallback); |
2990 | bitcast->setOperand(i_nocapture: 0, Val_nocapture: operand); |
2991 | value = bitcast; |
2992 | } else { |
2993 | auto *phi = dyn_cast<llvm::PHINode>(Val: value); |
2994 | if (phi && phi->getNumIncomingValues() == 2 && |
2995 | isa<llvm::ConstantPointerNull>(Val: phi->getIncomingValue(i: 1)) && |
2996 | isa<llvm::CallBase>(Val: phi->getIncomingValue(i: 0))) { |
2997 | // Handle phi instructions that are generated when it's necessary to check |
2998 | // whether the receiver of a message is null. |
2999 | llvm::Value *inVal = phi->getIncomingValue(i: 0); |
3000 | inVal = emitARCOperationAfterCall(CGF, value: inVal, doAfterCall, doFallback); |
3001 | phi->setIncomingValue(i: 0, V: inVal); |
3002 | value = phi; |
3003 | } else { |
3004 | // Generic fall-back case. |
3005 | // Retain using the non-block variant: we never need to do a copy |
3006 | // of a block that's been returned to us. |
3007 | value = doFallback(CGF, value); |
3008 | } |
3009 | } |
3010 | |
3011 | CGF.Builder.restoreIP(IP: ip); |
3012 | return value; |
3013 | } |
3014 | |
3015 | /// Given that the given expression is some sort of call (which does |
3016 | /// not return retained), emit a retain following it. |
3017 | static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, |
3018 | const Expr *e) { |
3019 | llvm::Value *value = CGF.EmitScalarExpr(E: e); |
3020 | return emitARCOperationAfterCall(CGF, value, |
3021 | doAfterCall: [](CodeGenFunction &CGF, llvm::Value *value) { |
3022 | return CGF.EmitARCRetainAutoreleasedReturnValue(value); |
3023 | }, |
3024 | doFallback: [](CodeGenFunction &CGF, llvm::Value *value) { |
3025 | return CGF.EmitARCRetainNonBlock(value); |
3026 | }); |
3027 | } |
3028 | |
3029 | /// Given that the given expression is some sort of call (which does |
3030 | /// not return retained), perform an unsafeClaim following it. |
3031 | static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, |
3032 | const Expr *e) { |
3033 | llvm::Value *value = CGF.EmitScalarExpr(E: e); |
3034 | return emitARCOperationAfterCall(CGF, value, |
3035 | doAfterCall: [](CodeGenFunction &CGF, llvm::Value *value) { |
3036 | return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); |
3037 | }, |
3038 | doFallback: [](CodeGenFunction &CGF, llvm::Value *value) { |
3039 | return value; |
3040 | }); |
3041 | } |
3042 | |
3043 | llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, |
3044 | bool allowUnsafeClaim) { |
3045 | if (allowUnsafeClaim && |
3046 | CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { |
3047 | return emitARCUnsafeClaimCallResult(CGF&: *this, e: E); |
3048 | } else { |
3049 | llvm::Value *value = emitARCRetainCallResult(CGF&: *this, e: E); |
3050 | return EmitObjCConsumeObject(type: E->getType(), object: value); |
3051 | } |
3052 | } |
3053 | |
3054 | /// Determine whether it might be important to emit a separate |
3055 | /// objc_retain_block on the result of the given expression, or |
3056 | /// whether it's okay to just emit it in a +1 context. |
3057 | static bool shouldEmitSeparateBlockRetain(const Expr *e) { |
3058 | assert(e->getType()->isBlockPointerType()); |
3059 | e = e->IgnoreParens(); |
3060 | |
3061 | // For future goodness, emit block expressions directly in +1 |
3062 | // contexts if we can. |
3063 | if (isa<BlockExpr>(Val: e)) |
3064 | return false; |
3065 | |
3066 | if (const CastExpr *cast = dyn_cast<CastExpr>(Val: e)) { |
3067 | switch (cast->getCastKind()) { |
3068 | // Emitting these operations in +1 contexts is goodness. |
3069 | case CK_LValueToRValue: |
3070 | case CK_ARCReclaimReturnedObject: |
3071 | case CK_ARCConsumeObject: |
3072 | case CK_ARCProduceObject: |
3073 | return false; |
3074 | |
3075 | // These operations preserve a block type. |
3076 | case CK_NoOp: |
3077 | case CK_BitCast: |
3078 | return shouldEmitSeparateBlockRetain(e: cast->getSubExpr()); |
3079 | |
3080 | // These operations are known to be bad (or haven't been considered). |
3081 | case CK_AnyPointerToBlockPointerCast: |
3082 | default: |
3083 | return true; |
3084 | } |
3085 | } |
3086 | |
3087 | return true; |
3088 | } |
3089 | |
3090 | namespace { |
3091 | /// A CRTP base class for emitting expressions of retainable object |
3092 | /// pointer type in ARC. |
3093 | template <typename Impl, typename Result> class ARCExprEmitter { |
3094 | protected: |
3095 | CodeGenFunction &CGF; |
3096 | Impl &asImpl() { return *static_cast<Impl*>(this); } |
3097 | |
3098 | ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} |
3099 | |
3100 | public: |
3101 | Result visit(const Expr *e); |
3102 | Result visitCastExpr(const CastExpr *e); |
3103 | Result visitPseudoObjectExpr(const PseudoObjectExpr *e); |
3104 | Result visitBlockExpr(const BlockExpr *e); |
3105 | Result visitBinaryOperator(const BinaryOperator *e); |
3106 | Result visitBinAssign(const BinaryOperator *e); |
3107 | Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); |
3108 | Result visitBinAssignAutoreleasing(const BinaryOperator *e); |
3109 | Result visitBinAssignWeak(const BinaryOperator *e); |
3110 | Result visitBinAssignStrong(const BinaryOperator *e); |
3111 | |
3112 | // Minimal implementation: |
3113 | // Result visitLValueToRValue(const Expr *e) |
3114 | // Result visitConsumeObject(const Expr *e) |
3115 | // Result visitExtendBlockObject(const Expr *e) |
3116 | // Result visitReclaimReturnedObject(const Expr *e) |
3117 | // Result visitCall(const Expr *e) |
3118 | // Result visitExpr(const Expr *e) |
3119 | // |
3120 | // Result emitBitCast(Result result, llvm::Type *resultType) |
3121 | // llvm::Value *getValueOfResult(Result result) |
3122 | }; |
3123 | } |
3124 | |
3125 | /// Try to emit a PseudoObjectExpr under special ARC rules. |
3126 | /// |
3127 | /// This massively duplicates emitPseudoObjectRValue. |
3128 | template <typename Impl, typename Result> |
3129 | Result |
3130 | ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { |
3131 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
3132 | |
3133 | // Find the result expression. |
3134 | const Expr *resultExpr = E->getResultExpr(); |
3135 | assert(resultExpr); |
3136 | Result result; |
3137 | |
3138 | for (PseudoObjectExpr::const_semantics_iterator |
3139 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { |
3140 | const Expr *semantic = *i; |
3141 | |
3142 | // If this semantic expression is an opaque value, bind it |
3143 | // to the result of its source expression. |
3144 | if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(Val: semantic)) { |
3145 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
3146 | OVMA opaqueData; |
3147 | |
3148 | // If this semantic is the result of the pseudo-object |
3149 | // expression, try to evaluate the source as +1. |
3150 | if (ov == resultExpr) { |
3151 | assert(!OVMA::shouldBindAsLValue(ov)); |
3152 | result = asImpl().visit(ov->getSourceExpr()); |
3153 | opaqueData = OVMA::bind(CGF, ov, |
3154 | RValue::get(asImpl().getValueOfResult(result))); |
3155 | |
3156 | // Otherwise, just bind it. |
3157 | } else { |
3158 | opaqueData = OVMA::bind(CGF, ov, e: ov->getSourceExpr()); |
3159 | } |
3160 | opaques.push_back(Elt: opaqueData); |
3161 | |
3162 | // Otherwise, if the expression is the result, evaluate it |
3163 | // and remember the result. |
3164 | } else if (semantic == resultExpr) { |
3165 | result = asImpl().visit(semantic); |
3166 | |
3167 | // Otherwise, evaluate the expression in an ignored context. |
3168 | } else { |
3169 | CGF.EmitIgnoredExpr(E: semantic); |
3170 | } |
3171 | } |
3172 | |
3173 | // Unbind all the opaques now. |
3174 | for (unsigned i = 0, e = opaques.size(); i != e; ++i) |
3175 | opaques[i].unbind(CGF); |
3176 | |
3177 | return result; |
3178 | } |
3179 | |
3180 | template <typename Impl, typename Result> |
3181 | Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) { |
3182 | // The default implementation just forwards the expression to visitExpr. |
3183 | return asImpl().visitExpr(e); |
3184 | } |
3185 | |
3186 | template <typename Impl, typename Result> |
3187 | Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { |
3188 | switch (e->getCastKind()) { |
3189 | |
3190 | // No-op casts don't change the type, so we just ignore them. |
3191 | case CK_NoOp: |
3192 | return asImpl().visit(e->getSubExpr()); |
3193 | |
3194 | // These casts can change the type. |
3195 | case CK_CPointerToObjCPointerCast: |
3196 | case CK_BlockPointerToObjCPointerCast: |
3197 | case CK_AnyPointerToBlockPointerCast: |
3198 | case CK_BitCast: { |
3199 | llvm::Type *resultType = CGF.ConvertType(e->getType()); |
3200 | assert(e->getSubExpr()->getType()->hasPointerRepresentation()); |
3201 | Result result = asImpl().visit(e->getSubExpr()); |
3202 | return asImpl().emitBitCast(result, resultType); |
3203 | } |
3204 | |
3205 | // Handle some casts specially. |
3206 | case CK_LValueToRValue: |
3207 | return asImpl().visitLValueToRValue(e->getSubExpr()); |
3208 | case CK_ARCConsumeObject: |
3209 | return asImpl().visitConsumeObject(e->getSubExpr()); |
3210 | case CK_ARCExtendBlockObject: |
3211 | return asImpl().visitExtendBlockObject(e->getSubExpr()); |
3212 | case CK_ARCReclaimReturnedObject: |
3213 | return asImpl().visitReclaimReturnedObject(e->getSubExpr()); |
3214 | |
3215 | // Otherwise, use the default logic. |
3216 | default: |
3217 | return asImpl().visitExpr(e); |
3218 | } |
3219 | } |
3220 | |
3221 | template <typename Impl, typename Result> |
3222 | Result |
3223 | ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { |
3224 | switch (e->getOpcode()) { |
3225 | case BO_Comma: |
3226 | CGF.EmitIgnoredExpr(E: e->getLHS()); |
3227 | CGF.EnsureInsertPoint(); |
3228 | return asImpl().visit(e->getRHS()); |
3229 | |
3230 | case BO_Assign: |
3231 | return asImpl().visitBinAssign(e); |
3232 | |
3233 | default: |
3234 | return asImpl().visitExpr(e); |
3235 | } |
3236 | } |
3237 | |
3238 | template <typename Impl, typename Result> |
3239 | Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { |
3240 | switch (e->getLHS()->getType().getObjCLifetime()) { |
3241 | case Qualifiers::OCL_ExplicitNone: |
3242 | return asImpl().visitBinAssignUnsafeUnretained(e); |
3243 | |
3244 | case Qualifiers::OCL_Weak: |
3245 | return asImpl().visitBinAssignWeak(e); |
3246 | |
3247 | case Qualifiers::OCL_Autoreleasing: |
3248 | return asImpl().visitBinAssignAutoreleasing(e); |
3249 | |
3250 | case Qualifiers::OCL_Strong: |
3251 | return asImpl().visitBinAssignStrong(e); |
3252 | |
3253 | case Qualifiers::OCL_None: |
3254 | return asImpl().visitExpr(e); |
3255 | } |
3256 | llvm_unreachable("bad ObjC ownership qualifier" ); |
3257 | } |
3258 | |
3259 | /// The default rule for __unsafe_unretained emits the RHS recursively, |
3260 | /// stores into the unsafe variable, and propagates the result outward. |
3261 | template <typename Impl, typename Result> |
3262 | Result ARCExprEmitter<Impl,Result>:: |
3263 | visitBinAssignUnsafeUnretained(const BinaryOperator *e) { |
3264 | // Recursively emit the RHS. |
3265 | // For __block safety, do this before emitting the LHS. |
3266 | Result result = asImpl().visit(e->getRHS()); |
3267 | |
3268 | // Perform the store. |
3269 | LValue lvalue = |
3270 | CGF.EmitCheckedLValue(E: e->getLHS(), TCK: CodeGenFunction::TCK_Store); |
3271 | CGF.EmitStoreThroughLValue(Src: RValue::get(asImpl().getValueOfResult(result)), |
3272 | Dst: lvalue); |
3273 | |
3274 | return result; |
3275 | } |
3276 | |
3277 | template <typename Impl, typename Result> |
3278 | Result |
3279 | ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { |
3280 | return asImpl().visitExpr(e); |
3281 | } |
3282 | |
3283 | template <typename Impl, typename Result> |
3284 | Result |
3285 | ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { |
3286 | return asImpl().visitExpr(e); |
3287 | } |
3288 | |
3289 | template <typename Impl, typename Result> |
3290 | Result |
3291 | ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { |
3292 | return asImpl().visitExpr(e); |
3293 | } |
3294 | |
3295 | /// The general expression-emission logic. |
3296 | template <typename Impl, typename Result> |
3297 | Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { |
3298 | // We should *never* see a nested full-expression here, because if |
3299 | // we fail to emit at +1, our caller must not retain after we close |
3300 | // out the full-expression. This isn't as important in the unsafe |
3301 | // emitter. |
3302 | assert(!isa<ExprWithCleanups>(e)); |
3303 | |
3304 | // Look through parens, __extension__, generic selection, etc. |
3305 | e = e->IgnoreParens(); |
3306 | |
3307 | // Handle certain kinds of casts. |
3308 | if (const CastExpr *ce = dyn_cast<CastExpr>(Val: e)) { |
3309 | return asImpl().visitCastExpr(ce); |
3310 | |
3311 | // Handle the comma operator. |
3312 | } else if (auto op = dyn_cast<BinaryOperator>(Val: e)) { |
3313 | return asImpl().visitBinaryOperator(op); |
3314 | |
3315 | // TODO: handle conditional operators here |
3316 | |
3317 | // For calls and message sends, use the retained-call logic. |
3318 | // Delegate inits are a special case in that they're the only |
3319 | // returns-retained expression that *isn't* surrounded by |
3320 | // a consume. |
3321 | } else if (isa<CallExpr>(Val: e) || |
3322 | (isa<ObjCMessageExpr>(Val: e) && |
3323 | !cast<ObjCMessageExpr>(Val: e)->isDelegateInitCall())) { |
3324 | return asImpl().visitCall(e); |
3325 | |
3326 | // Look through pseudo-object expressions. |
3327 | } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(Val: e)) { |
3328 | return asImpl().visitPseudoObjectExpr(pseudo); |
3329 | } else if (auto *be = dyn_cast<BlockExpr>(Val: e)) |
3330 | return asImpl().visitBlockExpr(be); |
3331 | |
3332 | return asImpl().visitExpr(e); |
3333 | } |
3334 | |
3335 | namespace { |
3336 | |
3337 | /// An emitter for +1 results. |
3338 | struct ARCRetainExprEmitter : |
3339 | public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { |
3340 | |
3341 | ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} |
3342 | |
3343 | llvm::Value *getValueOfResult(TryEmitResult result) { |
3344 | return result.getPointer(); |
3345 | } |
3346 | |
3347 | TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { |
3348 | llvm::Value *value = result.getPointer(); |
3349 | value = CGF.Builder.CreateBitCast(V: value, DestTy: resultType); |
3350 | result.setPointer(value); |
3351 | return result; |
3352 | } |
3353 | |
3354 | TryEmitResult visitLValueToRValue(const Expr *e) { |
3355 | return tryEmitARCRetainLoadOfScalar(CGF, e); |
3356 | } |
3357 | |
3358 | /// For consumptions, just emit the subexpression and thus elide |
3359 | /// the retain/release pair. |
3360 | TryEmitResult visitConsumeObject(const Expr *e) { |
3361 | llvm::Value *result = CGF.EmitScalarExpr(E: e); |
3362 | return TryEmitResult(result, true); |
3363 | } |
3364 | |
3365 | TryEmitResult visitBlockExpr(const BlockExpr *e) { |
3366 | TryEmitResult result = visitExpr(e); |
3367 | // Avoid the block-retain if this is a block literal that doesn't need to be |
3368 | // copied to the heap. |
3369 | if (CGF.CGM.getCodeGenOpts().ObjCAvoidHeapifyLocalBlocks && |
3370 | e->getBlockDecl()->canAvoidCopyToHeap()) |
3371 | result.setInt(true); |
3372 | return result; |
3373 | } |
3374 | |
3375 | /// Block extends are net +0. Naively, we could just recurse on |
3376 | /// the subexpression, but actually we need to ensure that the |
3377 | /// value is copied as a block, so there's a little filter here. |
3378 | TryEmitResult visitExtendBlockObject(const Expr *e) { |
3379 | llvm::Value *result; // will be a +0 value |
3380 | |
3381 | // If we can't safely assume the sub-expression will produce a |
3382 | // block-copied value, emit the sub-expression at +0. |
3383 | if (shouldEmitSeparateBlockRetain(e)) { |
3384 | result = CGF.EmitScalarExpr(E: e); |
3385 | |
3386 | // Otherwise, try to emit the sub-expression at +1 recursively. |
3387 | } else { |
3388 | TryEmitResult subresult = asImpl().visit(e); |
3389 | |
3390 | // If that produced a retained value, just use that. |
3391 | if (subresult.getInt()) { |
3392 | return subresult; |
3393 | } |
3394 | |
3395 | // Otherwise it's +0. |
3396 | result = subresult.getPointer(); |
3397 | } |
3398 | |
3399 | // Retain the object as a block. |
3400 | result = CGF.EmitARCRetainBlock(value: result, /*mandatory*/ true); |
3401 | return TryEmitResult(result, true); |
3402 | } |
3403 | |
3404 | /// For reclaims, emit the subexpression as a retained call and |
3405 | /// skip the consumption. |
3406 | TryEmitResult visitReclaimReturnedObject(const Expr *e) { |
3407 | llvm::Value *result = emitARCRetainCallResult(CGF, e); |
3408 | return TryEmitResult(result, true); |
3409 | } |
3410 | |
3411 | /// When we have an undecorated call, retroactively do a claim. |
3412 | TryEmitResult visitCall(const Expr *e) { |
3413 | llvm::Value *result = emitARCRetainCallResult(CGF, e); |
3414 | return TryEmitResult(result, true); |
3415 | } |
3416 | |
3417 | // TODO: maybe special-case visitBinAssignWeak? |
3418 | |
3419 | TryEmitResult visitExpr(const Expr *e) { |
3420 | // We didn't find an obvious production, so emit what we've got and |
3421 | // tell the caller that we didn't manage to retain. |
3422 | llvm::Value *result = CGF.EmitScalarExpr(E: e); |
3423 | return TryEmitResult(result, false); |
3424 | } |
3425 | }; |
3426 | } |
3427 | |
3428 | static TryEmitResult |
3429 | tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { |
3430 | return ARCRetainExprEmitter(CGF).visit(e); |
3431 | } |
3432 | |
3433 | static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
3434 | LValue lvalue, |
3435 | QualType type) { |
3436 | TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); |
3437 | llvm::Value *value = result.getPointer(); |
3438 | if (!result.getInt()) |
3439 | value = CGF.EmitARCRetain(type, value); |
3440 | return value; |
3441 | } |
3442 | |
3443 | /// EmitARCRetainScalarExpr - Semantically equivalent to |
3444 | /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a |
3445 | /// best-effort attempt to peephole expressions that naturally produce |
3446 | /// retained objects. |
3447 | llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { |
3448 | // The retain needs to happen within the full-expression. |
3449 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Val: e)) { |
3450 | RunCleanupsScope scope(*this); |
3451 | return EmitARCRetainScalarExpr(e: cleanups->getSubExpr()); |
3452 | } |
3453 | |
3454 | TryEmitResult result = tryEmitARCRetainScalarExpr(CGF&: *this, e); |
3455 | llvm::Value *value = result.getPointer(); |
3456 | if (!result.getInt()) |
3457 | value = EmitARCRetain(type: e->getType(), value); |
3458 | return value; |
3459 | } |
3460 | |
3461 | llvm::Value * |
3462 | CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { |
3463 | // The retain needs to happen within the full-expression. |
3464 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Val: e)) { |
3465 | RunCleanupsScope scope(*this); |
3466 | return EmitARCRetainAutoreleaseScalarExpr(e: cleanups->getSubExpr()); |
3467 | } |
3468 | |
3469 | TryEmitResult result = tryEmitARCRetainScalarExpr(CGF&: *this, e); |
3470 | llvm::Value *value = result.getPointer(); |
3471 | if (result.getInt()) |
3472 | value = EmitARCAutorelease(value); |
3473 | else |
3474 | value = EmitARCRetainAutorelease(type: e->getType(), value); |
3475 | return value; |
3476 | } |
3477 | |
3478 | llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { |
3479 | llvm::Value *result; |
3480 | bool doRetain; |
3481 | |
3482 | if (shouldEmitSeparateBlockRetain(e)) { |
3483 | result = EmitScalarExpr(E: e); |
3484 | doRetain = true; |
3485 | } else { |
3486 | TryEmitResult subresult = tryEmitARCRetainScalarExpr(CGF&: *this, e); |
3487 | result = subresult.getPointer(); |
3488 | doRetain = !subresult.getInt(); |
3489 | } |
3490 | |
3491 | if (doRetain) |
3492 | result = EmitARCRetainBlock(value: result, /*mandatory*/ true); |
3493 | return EmitObjCConsumeObject(type: e->getType(), object: result); |
3494 | } |
3495 | |
3496 | llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { |
3497 | // In ARC, retain and autorelease the expression. |
3498 | if (getLangOpts().ObjCAutoRefCount) { |
3499 | // Do so before running any cleanups for the full-expression. |
3500 | // EmitARCRetainAutoreleaseScalarExpr does this for us. |
3501 | return EmitARCRetainAutoreleaseScalarExpr(e: expr); |
3502 | } |
3503 | |
3504 | // Otherwise, use the normal scalar-expression emission. The |
3505 | // exception machinery doesn't do anything special with the |
3506 | // exception like retaining it, so there's no safety associated with |
3507 | // only running cleanups after the throw has started, and when it |
3508 | // matters it tends to be substantially inferior code. |
3509 | return EmitScalarExpr(E: expr); |
3510 | } |
3511 | |
3512 | namespace { |
3513 | |
3514 | /// An emitter for assigning into an __unsafe_unretained context. |
3515 | struct ARCUnsafeUnretainedExprEmitter : |
3516 | public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { |
3517 | |
3518 | ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} |
3519 | |
3520 | llvm::Value *getValueOfResult(llvm::Value *value) { |
3521 | return value; |
3522 | } |
3523 | |
3524 | llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { |
3525 | return CGF.Builder.CreateBitCast(V: value, DestTy: resultType); |
3526 | } |
3527 | |
3528 | llvm::Value *visitLValueToRValue(const Expr *e) { |
3529 | return CGF.EmitScalarExpr(E: e); |
3530 | } |
3531 | |
3532 | /// For consumptions, just emit the subexpression and perform the |
3533 | /// consumption like normal. |
3534 | llvm::Value *visitConsumeObject(const Expr *e) { |
3535 | llvm::Value *value = CGF.EmitScalarExpr(E: e); |
3536 | return CGF.EmitObjCConsumeObject(type: e->getType(), object: value); |
3537 | } |
3538 | |
3539 | /// No special logic for block extensions. (This probably can't |
3540 | /// actually happen in this emitter, though.) |
3541 | llvm::Value *visitExtendBlockObject(const Expr *e) { |
3542 | return CGF.EmitARCExtendBlockObject(e); |
3543 | } |
3544 | |
3545 | /// For reclaims, perform an unsafeClaim if that's enabled. |
3546 | llvm::Value *visitReclaimReturnedObject(const Expr *e) { |
3547 | return CGF.EmitARCReclaimReturnedObject(E: e, /*unsafe*/ allowUnsafeClaim: true); |
3548 | } |
3549 | |
3550 | /// When we have an undecorated call, just emit it without adding |
3551 | /// the unsafeClaim. |
3552 | llvm::Value *visitCall(const Expr *e) { |
3553 | return CGF.EmitScalarExpr(E: e); |
3554 | } |
3555 | |
3556 | /// Just do normal scalar emission in the default case. |
3557 | llvm::Value *visitExpr(const Expr *e) { |
3558 | return CGF.EmitScalarExpr(E: e); |
3559 | } |
3560 | }; |
3561 | } |
3562 | |
3563 | static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, |
3564 | const Expr *e) { |
3565 | return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); |
3566 | } |
3567 | |
3568 | /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to |
3569 | /// immediately releasing the resut of EmitARCRetainScalarExpr, but |
3570 | /// avoiding any spurious retains, including by performing reclaims |
3571 | /// with objc_unsafeClaimAutoreleasedReturnValue. |
3572 | llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { |
3573 | // Look through full-expressions. |
3574 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Val: e)) { |
3575 | RunCleanupsScope scope(*this); |
3576 | return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); |
3577 | } |
3578 | |
3579 | return emitARCUnsafeUnretainedScalarExpr(CGF&: *this, e); |
3580 | } |
3581 | |
3582 | std::pair<LValue,llvm::Value*> |
3583 | CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, |
3584 | bool ignored) { |
3585 | // Evaluate the RHS first. If we're ignoring the result, assume |
3586 | // that we can emit at an unsafe +0. |
3587 | llvm::Value *value; |
3588 | if (ignored) { |
3589 | value = EmitARCUnsafeUnretainedScalarExpr(e: e->getRHS()); |
3590 | } else { |
3591 | value = EmitScalarExpr(E: e->getRHS()); |
3592 | } |
3593 | |
3594 | // Emit the LHS and perform the store. |
3595 | LValue lvalue = EmitLValue(E: e->getLHS()); |
3596 | EmitStoreOfScalar(value, lvalue); |
3597 | |
3598 | return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); |
3599 | } |
3600 | |
3601 | std::pair<LValue,llvm::Value*> |
3602 | CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, |
3603 | bool ignored) { |
3604 | // Evaluate the RHS first. |
3605 | TryEmitResult result = tryEmitARCRetainScalarExpr(CGF&: *this, e: e->getRHS()); |
3606 | llvm::Value *value = result.getPointer(); |
3607 | |
3608 | bool hasImmediateRetain = result.getInt(); |
3609 | |
3610 | // If we didn't emit a retained object, and the l-value is of block |
3611 | // type, then we need to emit the block-retain immediately in case |
3612 | // it invalidates the l-value. |
3613 | if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { |
3614 | value = EmitARCRetainBlock(value, /*mandatory*/ false); |
3615 | hasImmediateRetain = true; |
3616 | } |
3617 | |
3618 | LValue lvalue = EmitLValue(E: e->getLHS()); |
3619 | |
3620 | // If the RHS was emitted retained, expand this. |
3621 | if (hasImmediateRetain) { |
3622 | llvm::Value *oldValue = EmitLoadOfScalar(lvalue, Loc: SourceLocation()); |
3623 | EmitStoreOfScalar(value, lvalue); |
3624 | EmitARCRelease(value: oldValue, precise: lvalue.isARCPreciseLifetime()); |
3625 | } else { |
3626 | value = EmitARCStoreStrong(dst: lvalue, newValue: value, ignored); |
3627 | } |
3628 | |
3629 | return std::pair<LValue,llvm::Value*>(lvalue, value); |
3630 | } |
3631 | |
3632 | std::pair<LValue,llvm::Value*> |
3633 | CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { |
3634 | llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e: e->getRHS()); |
3635 | LValue lvalue = EmitLValue(E: e->getLHS()); |
3636 | |
3637 | EmitStoreOfScalar(value, lvalue); |
3638 | |
3639 | return std::pair<LValue,llvm::Value*>(lvalue, value); |
3640 | } |
3641 | |
3642 | void CodeGenFunction::EmitObjCAutoreleasePoolStmt( |
3643 | const ObjCAutoreleasePoolStmt &ARPS) { |
3644 | const Stmt *subStmt = ARPS.getSubStmt(); |
3645 | const CompoundStmt &S = cast<CompoundStmt>(Val: *subStmt); |
3646 | |
3647 | CGDebugInfo *DI = getDebugInfo(); |
3648 | if (DI) |
3649 | DI->EmitLexicalBlockStart(Builder, Loc: S.getLBracLoc()); |
3650 | |
3651 | // Keep track of the current cleanup stack depth. |
3652 | RunCleanupsScope Scope(*this); |
3653 | if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { |
3654 | llvm::Value *token = EmitObjCAutoreleasePoolPush(); |
3655 | EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(Kind: NormalCleanup, A: token); |
3656 | } else { |
3657 | llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); |
3658 | EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(Kind: NormalCleanup, A: token); |
3659 | } |
3660 | |
3661 | for (const auto *I : S.body()) |
3662 | EmitStmt(S: I); |
3663 | |
3664 | if (DI) |
3665 | DI->EmitLexicalBlockEnd(Builder, Loc: S.getRBracLoc()); |
3666 | } |
3667 | |
3668 | /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, |
3669 | /// make sure it survives garbage collection until this point. |
3670 | void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { |
3671 | // We just use an inline assembly. |
3672 | llvm::FunctionType *extenderType |
3673 | = llvm::FunctionType::get(Result: VoidTy, Params: VoidPtrTy, isVarArg: RequiredArgs::All); |
3674 | llvm::InlineAsm *extender = llvm::InlineAsm::get(Ty: extenderType, |
3675 | /* assembly */ AsmString: "" , |
3676 | /* constraints */ Constraints: "r" , |
3677 | /* side effects */ hasSideEffects: true); |
3678 | |
3679 | EmitNounwindRuntimeCall(callee: extender, args: object); |
3680 | } |
3681 | |
3682 | /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with |
3683 | /// non-trivial copy assignment function, produce following helper function. |
3684 | /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } |
3685 | /// |
3686 | llvm::Constant * |
3687 | CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( |
3688 | const ObjCPropertyImplDecl *PID) { |
3689 | const ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
3690 | if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) |
3691 | return nullptr; |
3692 | |
3693 | QualType Ty = PID->getPropertyIvarDecl()->getType(); |
3694 | ASTContext &C = getContext(); |
3695 | |
3696 | if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
3697 | // Call the move assignment operator instead of calling the copy assignment |
3698 | // operator and destructor. |
3699 | CharUnits Alignment = C.getTypeAlignInChars(T: Ty); |
3700 | llvm::Constant *Fn = getNonTrivialCStructMoveAssignmentOperator( |
3701 | CGM, DstAlignment: Alignment, SrcAlignment: Alignment, IsVolatile: Ty.isVolatileQualified(), QT: Ty); |
3702 | return Fn; |
3703 | } |
3704 | |
3705 | if (!getLangOpts().CPlusPlus || |
3706 | !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) |
3707 | return nullptr; |
3708 | if (!Ty->isRecordType()) |
3709 | return nullptr; |
3710 | llvm::Constant *HelperFn = nullptr; |
3711 | if (hasTrivialSetExpr(PID)) |
3712 | return nullptr; |
3713 | assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null" ); |
3714 | if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) |
3715 | return HelperFn; |
3716 | |
3717 | const IdentifierInfo *II = |
3718 | &CGM.getContext().Idents.get(Name: "__assign_helper_atomic_property_" ); |
3719 | |
3720 | QualType ReturnTy = C.VoidTy; |
3721 | QualType DestTy = C.getPointerType(T: Ty); |
3722 | QualType SrcTy = Ty; |
3723 | SrcTy.addConst(); |
3724 | SrcTy = C.getPointerType(T: SrcTy); |
3725 | |
3726 | SmallVector<QualType, 2> ArgTys; |
3727 | ArgTys.push_back(Elt: DestTy); |
3728 | ArgTys.push_back(Elt: SrcTy); |
3729 | QualType FunctionTy = C.getFunctionType(ResultTy: ReturnTy, Args: ArgTys, EPI: {}); |
3730 | |
3731 | FunctionDecl *FD = FunctionDecl::Create( |
3732 | C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
3733 | FunctionTy, nullptr, SC_Static, false, false, false); |
3734 | |
3735 | FunctionArgList args; |
3736 | ParmVarDecl *Params[2]; |
3737 | ParmVarDecl *DstDecl = ParmVarDecl::Create( |
3738 | C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, |
3739 | C.getTrivialTypeSourceInfo(T: DestTy, Loc: SourceLocation()), SC_None, |
3740 | /*DefArg=*/nullptr); |
3741 | args.push_back(Params[0] = DstDecl); |
3742 | ParmVarDecl *SrcDecl = ParmVarDecl::Create( |
3743 | C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, |
3744 | C.getTrivialTypeSourceInfo(T: SrcTy, Loc: SourceLocation()), SC_None, |
3745 | /*DefArg=*/nullptr); |
3746 | args.push_back(Params[1] = SrcDecl); |
3747 | FD->setParams(Params); |
3748 | |
3749 | const CGFunctionInfo &FI = |
3750 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: ReturnTy, args); |
3751 | |
3752 | llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(Info: FI); |
3753 | |
3754 | llvm::Function *Fn = |
3755 | llvm::Function::Create(Ty: LTy, Linkage: llvm::GlobalValue::InternalLinkage, |
3756 | N: "__assign_helper_atomic_property_" , |
3757 | M: &CGM.getModule()); |
3758 | |
3759 | CGM.SetInternalFunctionAttributes(GD: GlobalDecl(), F: Fn, FI); |
3760 | |
3761 | StartFunction(GD: FD, RetTy: ReturnTy, Fn, FnInfo: FI, Args: args); |
3762 | |
3763 | DeclRefExpr DstExpr(C, DstDecl, false, DestTy, VK_PRValue, SourceLocation()); |
3764 | UnaryOperator *DST = UnaryOperator::Create( |
3765 | C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3766 | SourceLocation(), false, FPOptionsOverride()); |
3767 | |
3768 | DeclRefExpr SrcExpr(C, SrcDecl, false, SrcTy, VK_PRValue, SourceLocation()); |
3769 | UnaryOperator *SRC = UnaryOperator::Create( |
3770 | C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3771 | SourceLocation(), false, FPOptionsOverride()); |
3772 | |
3773 | Expr *Args[2] = {DST, SRC}; |
3774 | CallExpr *CalleeExp = cast<CallExpr>(Val: PID->getSetterCXXAssignment()); |
3775 | CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create( |
3776 | Ctx: C, OpKind: OO_Equal, Fn: CalleeExp->getCallee(), Args, Ty: DestTy->getPointeeType(), |
3777 | VK: VK_LValue, OperatorLoc: SourceLocation(), FPFeatures: FPOptionsOverride()); |
3778 | |
3779 | EmitStmt(TheCall); |
3780 | |
3781 | FinishFunction(); |
3782 | HelperFn = Fn; |
3783 | CGM.setAtomicSetterHelperFnMap(Ty, Fn: HelperFn); |
3784 | return HelperFn; |
3785 | } |
3786 | |
3787 | llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( |
3788 | const ObjCPropertyImplDecl *PID) { |
3789 | const ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
3790 | if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) |
3791 | return nullptr; |
3792 | |
3793 | QualType Ty = PD->getType(); |
3794 | ASTContext &C = getContext(); |
3795 | |
3796 | if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
3797 | CharUnits Alignment = C.getTypeAlignInChars(T: Ty); |
3798 | llvm::Constant *Fn = getNonTrivialCStructCopyConstructor( |
3799 | CGM, DstAlignment: Alignment, SrcAlignment: Alignment, IsVolatile: Ty.isVolatileQualified(), QT: Ty); |
3800 | return Fn; |
3801 | } |
3802 | |
3803 | if (!getLangOpts().CPlusPlus || |
3804 | !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) |
3805 | return nullptr; |
3806 | if (!Ty->isRecordType()) |
3807 | return nullptr; |
3808 | llvm::Constant *HelperFn = nullptr; |
3809 | if (hasTrivialGetExpr(propImpl: PID)) |
3810 | return nullptr; |
3811 | assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null" ); |
3812 | if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) |
3813 | return HelperFn; |
3814 | |
3815 | const IdentifierInfo *II = |
3816 | &CGM.getContext().Idents.get(Name: "__copy_helper_atomic_property_" ); |
3817 | |
3818 | QualType ReturnTy = C.VoidTy; |
3819 | QualType DestTy = C.getPointerType(T: Ty); |
3820 | QualType SrcTy = Ty; |
3821 | SrcTy.addConst(); |
3822 | SrcTy = C.getPointerType(T: SrcTy); |
3823 | |
3824 | SmallVector<QualType, 2> ArgTys; |
3825 | ArgTys.push_back(Elt: DestTy); |
3826 | ArgTys.push_back(Elt: SrcTy); |
3827 | QualType FunctionTy = C.getFunctionType(ResultTy: ReturnTy, Args: ArgTys, EPI: {}); |
3828 | |
3829 | FunctionDecl *FD = FunctionDecl::Create( |
3830 | C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
3831 | FunctionTy, nullptr, SC_Static, false, false, false); |
3832 | |
3833 | FunctionArgList args; |
3834 | ParmVarDecl *Params[2]; |
3835 | ParmVarDecl *DstDecl = ParmVarDecl::Create( |
3836 | C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, |
3837 | C.getTrivialTypeSourceInfo(T: DestTy, Loc: SourceLocation()), SC_None, |
3838 | /*DefArg=*/nullptr); |
3839 | args.push_back(Params[0] = DstDecl); |
3840 | ParmVarDecl *SrcDecl = ParmVarDecl::Create( |
3841 | C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, |
3842 | C.getTrivialTypeSourceInfo(T: SrcTy, Loc: SourceLocation()), SC_None, |
3843 | /*DefArg=*/nullptr); |
3844 | args.push_back(Params[1] = SrcDecl); |
3845 | FD->setParams(Params); |
3846 | |
3847 | const CGFunctionInfo &FI = |
3848 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: ReturnTy, args); |
3849 | |
3850 | llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(Info: FI); |
3851 | |
3852 | llvm::Function *Fn = llvm::Function::Create( |
3853 | Ty: LTy, Linkage: llvm::GlobalValue::InternalLinkage, N: "__copy_helper_atomic_property_" , |
3854 | M: &CGM.getModule()); |
3855 | |
3856 | CGM.SetInternalFunctionAttributes(GD: GlobalDecl(), F: Fn, FI); |
3857 | |
3858 | StartFunction(GD: FD, RetTy: ReturnTy, Fn, FnInfo: FI, Args: args); |
3859 | |
3860 | DeclRefExpr SrcExpr(getContext(), SrcDecl, false, SrcTy, VK_PRValue, |
3861 | SourceLocation()); |
3862 | |
3863 | UnaryOperator *SRC = UnaryOperator::Create( |
3864 | C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3865 | SourceLocation(), false, FPOptionsOverride()); |
3866 | |
3867 | CXXConstructExpr *CXXConstExpr = |
3868 | cast<CXXConstructExpr>(Val: PID->getGetterCXXConstructor()); |
3869 | |
3870 | SmallVector<Expr*, 4> ConstructorArgs; |
3871 | ConstructorArgs.push_back(SRC); |
3872 | ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), |
3873 | CXXConstExpr->arg_end()); |
3874 | |
3875 | CXXConstructExpr *TheCXXConstructExpr = |
3876 | CXXConstructExpr::Create(Ctx: C, Ty, Loc: SourceLocation(), |
3877 | Ctor: CXXConstExpr->getConstructor(), |
3878 | Elidable: CXXConstExpr->isElidable(), |
3879 | Args: ConstructorArgs, |
3880 | HadMultipleCandidates: CXXConstExpr->hadMultipleCandidates(), |
3881 | ListInitialization: CXXConstExpr->isListInitialization(), |
3882 | StdInitListInitialization: CXXConstExpr->isStdInitListInitialization(), |
3883 | ZeroInitialization: CXXConstExpr->requiresZeroInitialization(), |
3884 | ConstructKind: CXXConstExpr->getConstructionKind(), |
3885 | ParenOrBraceRange: SourceRange()); |
3886 | |
3887 | DeclRefExpr DstExpr(getContext(), DstDecl, false, DestTy, VK_PRValue, |
3888 | SourceLocation()); |
3889 | |
3890 | RValue DV = EmitAnyExpr(&DstExpr); |
3891 | CharUnits Alignment = |
3892 | getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); |
3893 | EmitAggExpr(TheCXXConstructExpr, |
3894 | AggValueSlot::forAddr( |
3895 | addr: Address(DV.getScalarVal(), ConvertTypeForMem(T: Ty), Alignment), |
3896 | quals: Qualifiers(), isDestructed: AggValueSlot::IsDestructed, |
3897 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
3898 | isAliased: AggValueSlot::IsNotAliased, mayOverlap: AggValueSlot::DoesNotOverlap)); |
3899 | |
3900 | FinishFunction(); |
3901 | HelperFn = Fn; |
3902 | CGM.setAtomicGetterHelperFnMap(Ty, Fn: HelperFn); |
3903 | return HelperFn; |
3904 | } |
3905 | |
3906 | llvm::Value * |
3907 | CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { |
3908 | // Get selectors for retain/autorelease. |
3909 | const IdentifierInfo *CopyID = &getContext().Idents.get(Name: "copy" ); |
3910 | Selector CopySelector = |
3911 | getContext().Selectors.getNullarySelector(ID: CopyID); |
3912 | const IdentifierInfo *AutoreleaseID = &getContext().Idents.get(Name: "autorelease" ); |
3913 | Selector AutoreleaseSelector = |
3914 | getContext().Selectors.getNullarySelector(ID: AutoreleaseID); |
3915 | |
3916 | // Emit calls to retain/autorelease. |
3917 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
3918 | llvm::Value *Val = Block; |
3919 | RValue Result; |
3920 | Result = Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), |
3921 | ResultType: Ty, Sel: CopySelector, |
3922 | Receiver: Val, CallArgs: CallArgList(), Class: nullptr, Method: nullptr); |
3923 | Val = Result.getScalarVal(); |
3924 | Result = Runtime.GenerateMessageSend(CGF&: *this, ReturnSlot: ReturnValueSlot(), |
3925 | ResultType: Ty, Sel: AutoreleaseSelector, |
3926 | Receiver: Val, CallArgs: CallArgList(), Class: nullptr, Method: nullptr); |
3927 | Val = Result.getScalarVal(); |
3928 | return Val; |
3929 | } |
3930 | |
3931 | static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) { |
3932 | switch (TT.getOS()) { |
3933 | case llvm::Triple::Darwin: |
3934 | case llvm::Triple::MacOSX: |
3935 | return llvm::MachO::PLATFORM_MACOS; |
3936 | case llvm::Triple::IOS: |
3937 | return llvm::MachO::PLATFORM_IOS; |
3938 | case llvm::Triple::TvOS: |
3939 | return llvm::MachO::PLATFORM_TVOS; |
3940 | case llvm::Triple::WatchOS: |
3941 | return llvm::MachO::PLATFORM_WATCHOS; |
3942 | case llvm::Triple::XROS: |
3943 | return llvm::MachO::PLATFORM_XROS; |
3944 | case llvm::Triple::DriverKit: |
3945 | return llvm::MachO::PLATFORM_DRIVERKIT; |
3946 | default: |
3947 | return llvm::MachO::PLATFORM_UNKNOWN; |
3948 | } |
3949 | } |
3950 | |
3951 | static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF, |
3952 | const VersionTuple &Version) { |
3953 | CodeGenModule &CGM = CGF.CGM; |
3954 | // Note: we intend to support multi-platform version checks, so reserve |
3955 | // the room for a dual platform checking invocation that will be |
3956 | // implemented in the future. |
3957 | llvm::SmallVector<llvm::Value *, 8> Args; |
3958 | |
3959 | auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) { |
3960 | std::optional<unsigned> Min = Version.getMinor(), |
3961 | SMin = Version.getSubminor(); |
3962 | Args.push_back( |
3963 | Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: getBaseMachOPlatformID(TT))); |
3964 | Args.push_back(Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Version.getMajor())); |
3965 | Args.push_back(Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Min.value_or(u: 0))); |
3966 | Args.push_back(Elt: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: SMin.value_or(u: 0))); |
3967 | }; |
3968 | |
3969 | assert(!Version.empty() && "unexpected empty version" ); |
3970 | EmitArgs(Version, CGM.getTarget().getTriple()); |
3971 | |
3972 | if (!CGM.IsPlatformVersionAtLeastFn) { |
3973 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3974 | Result: CGM.Int32Ty, Params: {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty}, |
3975 | isVarArg: false); |
3976 | CGM.IsPlatformVersionAtLeastFn = |
3977 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "__isPlatformVersionAtLeast" ); |
3978 | } |
3979 | |
3980 | llvm::Value *Check = |
3981 | CGF.EmitNounwindRuntimeCall(callee: CGM.IsPlatformVersionAtLeastFn, args: Args); |
3982 | return CGF.Builder.CreateICmpNE(LHS: Check, |
3983 | RHS: llvm::Constant::getNullValue(Ty: CGM.Int32Ty)); |
3984 | } |
3985 | |
3986 | llvm::Value * |
3987 | CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) { |
3988 | // Darwin uses the new __isPlatformVersionAtLeast family of routines. |
3989 | if (CGM.getTarget().getTriple().isOSDarwin()) |
3990 | return emitIsPlatformVersionAtLeast(CGF&: *this, Version); |
3991 | |
3992 | if (!CGM.IsOSVersionAtLeastFn) { |
3993 | llvm::FunctionType *FTy = |
3994 | llvm::FunctionType::get(Result: Int32Ty, Params: {Int32Ty, Int32Ty, Int32Ty}, isVarArg: false); |
3995 | CGM.IsOSVersionAtLeastFn = |
3996 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "__isOSVersionAtLeast" ); |
3997 | } |
3998 | |
3999 | std::optional<unsigned> Min = Version.getMinor(), |
4000 | SMin = Version.getSubminor(); |
4001 | llvm::Value *Args[] = { |
4002 | llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Version.getMajor()), |
4003 | llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Min.value_or(u: 0)), |
4004 | llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: SMin.value_or(u: 0))}; |
4005 | |
4006 | llvm::Value *CallRes = |
4007 | EmitNounwindRuntimeCall(callee: CGM.IsOSVersionAtLeastFn, args: Args); |
4008 | |
4009 | return Builder.CreateICmpNE(LHS: CallRes, RHS: llvm::Constant::getNullValue(Ty: Int32Ty)); |
4010 | } |
4011 | |
4012 | static bool isFoundationNeededForDarwinAvailabilityCheck( |
4013 | const llvm::Triple &TT, const VersionTuple &TargetVersion) { |
4014 | VersionTuple FoundationDroppedInVersion; |
4015 | switch (TT.getOS()) { |
4016 | case llvm::Triple::IOS: |
4017 | case llvm::Triple::TvOS: |
4018 | FoundationDroppedInVersion = VersionTuple(/*Major=*/13); |
4019 | break; |
4020 | case llvm::Triple::WatchOS: |
4021 | FoundationDroppedInVersion = VersionTuple(/*Major=*/6); |
4022 | break; |
4023 | case llvm::Triple::Darwin: |
4024 | case llvm::Triple::MacOSX: |
4025 | FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15); |
4026 | break; |
4027 | case llvm::Triple::XROS: |
4028 | // XROS doesn't need Foundation. |
4029 | return false; |
4030 | case llvm::Triple::DriverKit: |
4031 | // DriverKit doesn't need Foundation. |
4032 | return false; |
4033 | default: |
4034 | llvm_unreachable("Unexpected OS" ); |
4035 | } |
4036 | return TargetVersion < FoundationDroppedInVersion; |
4037 | } |
4038 | |
4039 | void CodeGenModule::emitAtAvailableLinkGuard() { |
4040 | if (!IsPlatformVersionAtLeastFn) |
4041 | return; |
4042 | // @available requires CoreFoundation only on Darwin. |
4043 | if (!Target.getTriple().isOSDarwin()) |
4044 | return; |
4045 | // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or |
4046 | // watchOS 6+. |
4047 | if (!isFoundationNeededForDarwinAvailabilityCheck( |
4048 | TT: Target.getTriple(), TargetVersion: Target.getPlatformMinVersion())) |
4049 | return; |
4050 | // Add -framework CoreFoundation to the linker commands. We still want to |
4051 | // emit the core foundation reference down below because otherwise if |
4052 | // CoreFoundation is not used in the code, the linker won't link the |
4053 | // framework. |
4054 | auto &Context = getLLVMContext(); |
4055 | llvm::Metadata *Args[2] = {llvm::MDString::get(Context, Str: "-framework" ), |
4056 | llvm::MDString::get(Context, Str: "CoreFoundation" )}; |
4057 | LinkerOptionsMetadata.push_back(Elt: llvm::MDNode::get(Context, MDs: Args)); |
4058 | // Emit a reference to a symbol from CoreFoundation to ensure that |
4059 | // CoreFoundation is linked into the final binary. |
4060 | llvm::FunctionType *FTy = |
4061 | llvm::FunctionType::get(Result: Int32Ty, Params: {VoidPtrTy}, isVarArg: false); |
4062 | llvm::FunctionCallee CFFunc = |
4063 | CreateRuntimeFunction(Ty: FTy, Name: "CFBundleGetVersionNumber" ); |
4064 | |
4065 | llvm::FunctionType *CheckFTy = llvm::FunctionType::get(Result: VoidTy, Params: {}, isVarArg: false); |
4066 | llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction( |
4067 | Ty: CheckFTy, Name: "__clang_at_available_requires_core_foundation_framework" , |
4068 | ExtraAttrs: llvm::AttributeList(), /*Local=*/true); |
4069 | llvm::Function *CFLinkCheckFunc = |
4070 | cast<llvm::Function>(Val: CFLinkCheckFuncRef.getCallee()->stripPointerCasts()); |
4071 | if (CFLinkCheckFunc->empty()) { |
4072 | CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); |
4073 | CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); |
4074 | CodeGenFunction CGF(*this); |
4075 | CGF.Builder.SetInsertPoint(CGF.createBasicBlock(name: "" , parent: CFLinkCheckFunc)); |
4076 | CGF.EmitNounwindRuntimeCall(callee: CFFunc, |
4077 | args: llvm::Constant::getNullValue(Ty: VoidPtrTy)); |
4078 | CGF.Builder.CreateUnreachable(); |
4079 | addCompilerUsedGlobal(GV: CFLinkCheckFunc); |
4080 | } |
4081 | } |
4082 | |
4083 | CGObjCRuntime::~CGObjCRuntime() {} |
4084 | |