1 | //===- llvm/IR/Metadata.h - Metadata definitions ----------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// @file |
10 | /// This file contains the declarations for metadata subclasses. |
11 | /// They represent the different flavors of metadata that live in LLVM. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_IR_METADATA_H |
16 | #define LLVM_IR_METADATA_H |
17 | |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/DenseMap.h" |
20 | #include "llvm/ADT/DenseMapInfo.h" |
21 | #include "llvm/ADT/PointerUnion.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/ADT/StringRef.h" |
24 | #include "llvm/ADT/ilist_node.h" |
25 | #include "llvm/ADT/iterator_range.h" |
26 | #include "llvm/IR/Constant.h" |
27 | #include "llvm/IR/LLVMContext.h" |
28 | #include "llvm/IR/Value.h" |
29 | #include "llvm/Support/CBindingWrapping.h" |
30 | #include "llvm/Support/Casting.h" |
31 | #include "llvm/Support/ErrorHandling.h" |
32 | #include <cassert> |
33 | #include <cstddef> |
34 | #include <cstdint> |
35 | #include <iterator> |
36 | #include <memory> |
37 | #include <string> |
38 | #include <type_traits> |
39 | #include <utility> |
40 | |
41 | namespace llvm { |
42 | |
43 | class Module; |
44 | class ModuleSlotTracker; |
45 | class raw_ostream; |
46 | class DPValue; |
47 | template <typename T> class StringMapEntry; |
48 | template <typename ValueTy> class StringMapEntryStorage; |
49 | class Type; |
50 | |
51 | enum LLVMConstants : uint32_t { |
52 | DEBUG_METADATA_VERSION = 3 // Current debug info version number. |
53 | }; |
54 | |
55 | /// Magic number in the value profile metadata showing a target has been |
56 | /// promoted for the instruction and shouldn't be promoted again. |
57 | const uint64_t NOMORE_ICP_MAGICNUM = -1; |
58 | |
59 | /// Root of the metadata hierarchy. |
60 | /// |
61 | /// This is a root class for typeless data in the IR. |
62 | class Metadata { |
63 | friend class ReplaceableMetadataImpl; |
64 | |
65 | /// RTTI. |
66 | const unsigned char SubclassID; |
67 | |
68 | protected: |
69 | /// Active type of storage. |
70 | enum StorageType { Uniqued, Distinct, Temporary }; |
71 | |
72 | /// Storage flag for non-uniqued, otherwise unowned, metadata. |
73 | unsigned char Storage : 7; |
74 | |
75 | unsigned char SubclassData1 : 1; |
76 | unsigned short SubclassData16 = 0; |
77 | unsigned SubclassData32 = 0; |
78 | |
79 | public: |
80 | enum MetadataKind { |
81 | #define HANDLE_METADATA_LEAF(CLASS) CLASS##Kind, |
82 | #include "llvm/IR/Metadata.def" |
83 | }; |
84 | |
85 | protected: |
86 | Metadata(unsigned ID, StorageType Storage) |
87 | : SubclassID(ID), Storage(Storage), SubclassData1(false) { |
88 | static_assert(sizeof(*this) == 8, "Metadata fields poorly packed" ); |
89 | } |
90 | |
91 | ~Metadata() = default; |
92 | |
93 | /// Default handling of a changed operand, which asserts. |
94 | /// |
95 | /// If subclasses pass themselves in as owners to a tracking node reference, |
96 | /// they must provide an implementation of this method. |
97 | void handleChangedOperand(void *, Metadata *) { |
98 | llvm_unreachable("Unimplemented in Metadata subclass" ); |
99 | } |
100 | |
101 | public: |
102 | unsigned getMetadataID() const { return SubclassID; } |
103 | |
104 | /// User-friendly dump. |
105 | /// |
106 | /// If \c M is provided, metadata nodes will be numbered canonically; |
107 | /// otherwise, pointer addresses are substituted. |
108 | /// |
109 | /// Note: this uses an explicit overload instead of default arguments so that |
110 | /// the nullptr version is easy to call from a debugger. |
111 | /// |
112 | /// @{ |
113 | void dump() const; |
114 | void dump(const Module *M) const; |
115 | /// @} |
116 | |
117 | /// Print. |
118 | /// |
119 | /// Prints definition of \c this. |
120 | /// |
121 | /// If \c M is provided, metadata nodes will be numbered canonically; |
122 | /// otherwise, pointer addresses are substituted. |
123 | /// @{ |
124 | void print(raw_ostream &OS, const Module *M = nullptr, |
125 | bool IsForDebug = false) const; |
126 | void print(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr, |
127 | bool IsForDebug = false) const; |
128 | /// @} |
129 | |
130 | /// Print as operand. |
131 | /// |
132 | /// Prints reference of \c this. |
133 | /// |
134 | /// If \c M is provided, metadata nodes will be numbered canonically; |
135 | /// otherwise, pointer addresses are substituted. |
136 | /// @{ |
137 | void printAsOperand(raw_ostream &OS, const Module *M = nullptr) const; |
138 | void printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST, |
139 | const Module *M = nullptr) const; |
140 | /// @} |
141 | }; |
142 | |
143 | // Create wrappers for C Binding types (see CBindingWrapping.h). |
144 | DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef) |
145 | |
146 | // Specialized opaque metadata conversions. |
147 | inline Metadata **unwrap(LLVMMetadataRef *MDs) { |
148 | return reinterpret_cast<Metadata**>(MDs); |
149 | } |
150 | |
151 | #define HANDLE_METADATA(CLASS) class CLASS; |
152 | #include "llvm/IR/Metadata.def" |
153 | |
154 | // Provide specializations of isa so that we don't need definitions of |
155 | // subclasses to see if the metadata is a subclass. |
156 | #define HANDLE_METADATA_LEAF(CLASS) \ |
157 | template <> struct isa_impl<CLASS, Metadata> { \ |
158 | static inline bool doit(const Metadata &MD) { \ |
159 | return MD.getMetadataID() == Metadata::CLASS##Kind; \ |
160 | } \ |
161 | }; |
162 | #include "llvm/IR/Metadata.def" |
163 | |
164 | inline raw_ostream &operator<<(raw_ostream &OS, const Metadata &MD) { |
165 | MD.print(OS); |
166 | return OS; |
167 | } |
168 | |
169 | /// Metadata wrapper in the Value hierarchy. |
170 | /// |
171 | /// A member of the \a Value hierarchy to represent a reference to metadata. |
172 | /// This allows, e.g., intrinsics to have metadata as operands. |
173 | /// |
174 | /// Notably, this is the only thing in either hierarchy that is allowed to |
175 | /// reference \a LocalAsMetadata. |
176 | class MetadataAsValue : public Value { |
177 | friend class ReplaceableMetadataImpl; |
178 | friend class LLVMContextImpl; |
179 | |
180 | Metadata *MD; |
181 | |
182 | MetadataAsValue(Type *Ty, Metadata *MD); |
183 | |
184 | /// Drop use of metadata (during teardown). |
185 | void dropUse() { MD = nullptr; } |
186 | |
187 | public: |
188 | ~MetadataAsValue(); |
189 | |
190 | static MetadataAsValue *get(LLVMContext &Context, Metadata *MD); |
191 | static MetadataAsValue *getIfExists(LLVMContext &Context, Metadata *MD); |
192 | |
193 | Metadata *getMetadata() const { return MD; } |
194 | |
195 | static bool classof(const Value *V) { |
196 | return V->getValueID() == MetadataAsValueVal; |
197 | } |
198 | |
199 | private: |
200 | void handleChangedMetadata(Metadata *MD); |
201 | void track(); |
202 | void untrack(); |
203 | }; |
204 | |
205 | /// Base class for tracking ValueAsMetadata/DIArgLists with user lookups and |
206 | /// Owner callbacks outside of ValueAsMetadata. |
207 | /// |
208 | /// Currently only inherited by DPValue; if other classes need to use it, then |
209 | /// a SubclassID will need to be added (either as a new field or by making |
210 | /// DebugValue into a PointerIntUnion) to discriminate between the subclasses in |
211 | /// lookup and callback handling. |
212 | class DebugValueUser { |
213 | protected: |
214 | // Capacity to store 3 debug values. |
215 | // TODO: Not all DebugValueUser instances need all 3 elements, if we |
216 | // restructure the DPValue class then we can template parameterize this array |
217 | // size. |
218 | std::array<Metadata *, 3> DebugValues; |
219 | |
220 | ArrayRef<Metadata *> getDebugValues() const { return DebugValues; } |
221 | |
222 | public: |
223 | DPValue *getUser(); |
224 | const DPValue *getUser() const; |
225 | /// To be called by ReplaceableMetadataImpl::replaceAllUsesWith, where `Old` |
226 | /// is a pointer to one of the pointers in `DebugValues` (so should be type |
227 | /// Metadata**), and `NewDebugValue` is the new Metadata* that is replacing |
228 | /// *Old. |
229 | /// For manually replacing elements of DebugValues, |
230 | /// `resetDebugValue(Idx, NewDebugValue)` should be used instead. |
231 | void handleChangedValue(void *Old, Metadata *NewDebugValue); |
232 | DebugValueUser() = default; |
233 | explicit DebugValueUser(std::array<Metadata *, 3> DebugValues) |
234 | : DebugValues(DebugValues) { |
235 | trackDebugValues(); |
236 | } |
237 | DebugValueUser(DebugValueUser &&X) { |
238 | DebugValues = X.DebugValues; |
239 | retrackDebugValues(X); |
240 | } |
241 | DebugValueUser(const DebugValueUser &X) { |
242 | DebugValues = X.DebugValues; |
243 | trackDebugValues(); |
244 | } |
245 | |
246 | DebugValueUser &operator=(DebugValueUser &&X) { |
247 | if (&X == this) |
248 | return *this; |
249 | |
250 | untrackDebugValues(); |
251 | DebugValues = X.DebugValues; |
252 | retrackDebugValues(X); |
253 | return *this; |
254 | } |
255 | |
256 | DebugValueUser &operator=(const DebugValueUser &X) { |
257 | if (&X == this) |
258 | return *this; |
259 | |
260 | untrackDebugValues(); |
261 | DebugValues = X.DebugValues; |
262 | trackDebugValues(); |
263 | return *this; |
264 | } |
265 | |
266 | ~DebugValueUser() { untrackDebugValues(); } |
267 | |
268 | void resetDebugValues() { |
269 | untrackDebugValues(); |
270 | DebugValues.fill(u: nullptr); |
271 | } |
272 | |
273 | void resetDebugValue(size_t Idx, Metadata *DebugValue) { |
274 | assert(Idx < 3 && "Invalid debug value index." ); |
275 | untrackDebugValue(Idx); |
276 | DebugValues[Idx] = DebugValue; |
277 | trackDebugValue(Idx); |
278 | } |
279 | |
280 | bool operator==(const DebugValueUser &X) const { |
281 | return DebugValues == X.DebugValues; |
282 | } |
283 | bool operator!=(const DebugValueUser &X) const { |
284 | return DebugValues != X.DebugValues; |
285 | } |
286 | |
287 | private: |
288 | void trackDebugValue(size_t Idx); |
289 | void trackDebugValues(); |
290 | |
291 | void untrackDebugValue(size_t Idx); |
292 | void untrackDebugValues(); |
293 | |
294 | void retrackDebugValues(DebugValueUser &X); |
295 | }; |
296 | |
297 | /// API for tracking metadata references through RAUW and deletion. |
298 | /// |
299 | /// Shared API for updating \a Metadata pointers in subclasses that support |
300 | /// RAUW. |
301 | /// |
302 | /// This API is not meant to be used directly. See \a TrackingMDRef for a |
303 | /// user-friendly tracking reference. |
304 | class MetadataTracking { |
305 | public: |
306 | /// Track the reference to metadata. |
307 | /// |
308 | /// Register \c MD with \c *MD, if the subclass supports tracking. If \c *MD |
309 | /// gets RAUW'ed, \c MD will be updated to the new address. If \c *MD gets |
310 | /// deleted, \c MD will be set to \c nullptr. |
311 | /// |
312 | /// If tracking isn't supported, \c *MD will not change. |
313 | /// |
314 | /// \return true iff tracking is supported by \c MD. |
315 | static bool track(Metadata *&MD) { |
316 | return track(Ref: &MD, MD&: *MD, Owner: static_cast<Metadata *>(nullptr)); |
317 | } |
318 | |
319 | /// Track the reference to metadata for \a Metadata. |
320 | /// |
321 | /// As \a track(Metadata*&), but with support for calling back to \c Owner to |
322 | /// tell it that its operand changed. This could trigger \c Owner being |
323 | /// re-uniqued. |
324 | static bool track(void *Ref, Metadata &MD, Metadata &Owner) { |
325 | return track(Ref, MD, Owner: &Owner); |
326 | } |
327 | |
328 | /// Track the reference to metadata for \a MetadataAsValue. |
329 | /// |
330 | /// As \a track(Metadata*&), but with support for calling back to \c Owner to |
331 | /// tell it that its operand changed. This could trigger \c Owner being |
332 | /// re-uniqued. |
333 | static bool track(void *Ref, Metadata &MD, MetadataAsValue &Owner) { |
334 | return track(Ref, MD, Owner: &Owner); |
335 | } |
336 | |
337 | /// Track the reference to metadata for \a DebugValueUser. |
338 | /// |
339 | /// As \a track(Metadata*&), but with support for calling back to \c Owner to |
340 | /// tell it that its operand changed. This could trigger \c Owner being |
341 | /// re-uniqued. |
342 | static bool track(void *Ref, Metadata &MD, DebugValueUser &Owner) { |
343 | return track(Ref, MD, Owner: &Owner); |
344 | } |
345 | |
346 | /// Stop tracking a reference to metadata. |
347 | /// |
348 | /// Stops \c *MD from tracking \c MD. |
349 | static void untrack(Metadata *&MD) { untrack(Ref: &MD, MD&: *MD); } |
350 | static void untrack(void *Ref, Metadata &MD); |
351 | |
352 | /// Move tracking from one reference to another. |
353 | /// |
354 | /// Semantically equivalent to \c untrack(MD) followed by \c track(New), |
355 | /// except that ownership callbacks are maintained. |
356 | /// |
357 | /// Note: it is an error if \c *MD does not equal \c New. |
358 | /// |
359 | /// \return true iff tracking is supported by \c MD. |
360 | static bool retrack(Metadata *&MD, Metadata *&New) { |
361 | return retrack(Ref: &MD, MD&: *MD, New: &New); |
362 | } |
363 | static bool retrack(void *Ref, Metadata &MD, void *New); |
364 | |
365 | /// Check whether metadata is replaceable. |
366 | static bool isReplaceable(const Metadata &MD); |
367 | |
368 | using OwnerTy = PointerUnion<MetadataAsValue *, Metadata *, DebugValueUser *>; |
369 | |
370 | private: |
371 | /// Track a reference to metadata for an owner. |
372 | /// |
373 | /// Generalized version of tracking. |
374 | static bool track(void *Ref, Metadata &MD, OwnerTy Owner); |
375 | }; |
376 | |
377 | /// Shared implementation of use-lists for replaceable metadata. |
378 | /// |
379 | /// Most metadata cannot be RAUW'ed. This is a shared implementation of |
380 | /// use-lists and associated API for the three that support it ( |
381 | /// \a ValueAsMetadata, \a TempMDNode, and \a DIArgList). |
382 | class ReplaceableMetadataImpl { |
383 | friend class MetadataTracking; |
384 | |
385 | public: |
386 | using OwnerTy = MetadataTracking::OwnerTy; |
387 | |
388 | private: |
389 | LLVMContext &Context; |
390 | uint64_t NextIndex = 0; |
391 | SmallDenseMap<void *, std::pair<OwnerTy, uint64_t>, 4> UseMap; |
392 | |
393 | public: |
394 | ReplaceableMetadataImpl(LLVMContext &Context) : Context(Context) {} |
395 | |
396 | ~ReplaceableMetadataImpl() { |
397 | assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata" ); |
398 | } |
399 | |
400 | LLVMContext &getContext() const { return Context; } |
401 | |
402 | /// Replace all uses of this with MD. |
403 | /// |
404 | /// Replace all uses of this with \c MD, which is allowed to be null. |
405 | void replaceAllUsesWith(Metadata *MD); |
406 | /// Replace all uses of the constant with Undef in debug info metadata |
407 | static void SalvageDebugInfo(const Constant &C); |
408 | /// Returns the list of all DIArgList users of this. |
409 | SmallVector<Metadata *> getAllArgListUsers(); |
410 | /// Returns the list of all DPValue users of this. |
411 | SmallVector<DPValue *> getAllDPValueUsers(); |
412 | |
413 | /// Resolve all uses of this. |
414 | /// |
415 | /// Resolve all uses of this, turning off RAUW permanently. If \c |
416 | /// ResolveUsers, call \a MDNode::resolve() on any users whose last operand |
417 | /// is resolved. |
418 | void resolveAllUses(bool ResolveUsers = true); |
419 | |
420 | unsigned getNumUses() const { return UseMap.size(); } |
421 | |
422 | private: |
423 | void addRef(void *Ref, OwnerTy Owner); |
424 | void dropRef(void *Ref); |
425 | void moveRef(void *Ref, void *New, const Metadata &MD); |
426 | |
427 | /// Lazily construct RAUW support on MD. |
428 | /// |
429 | /// If this is an unresolved MDNode, RAUW support will be created on-demand. |
430 | /// ValueAsMetadata always has RAUW support. |
431 | static ReplaceableMetadataImpl *getOrCreate(Metadata &MD); |
432 | |
433 | /// Get RAUW support on MD, if it exists. |
434 | static ReplaceableMetadataImpl *getIfExists(Metadata &MD); |
435 | |
436 | /// Check whether this node will support RAUW. |
437 | /// |
438 | /// Returns \c true unless getOrCreate() would return null. |
439 | static bool isReplaceable(const Metadata &MD); |
440 | }; |
441 | |
442 | /// Value wrapper in the Metadata hierarchy. |
443 | /// |
444 | /// This is a custom value handle that allows other metadata to refer to |
445 | /// classes in the Value hierarchy. |
446 | /// |
447 | /// Because of full uniquing support, each value is only wrapped by a single \a |
448 | /// ValueAsMetadata object, so the lookup maps are far more efficient than |
449 | /// those using ValueHandleBase. |
450 | class ValueAsMetadata : public Metadata, ReplaceableMetadataImpl { |
451 | friend class ReplaceableMetadataImpl; |
452 | friend class LLVMContextImpl; |
453 | |
454 | Value *V; |
455 | |
456 | /// Drop users without RAUW (during teardown). |
457 | void dropUsers() { |
458 | ReplaceableMetadataImpl::resolveAllUses(/* ResolveUsers */ ResolveUsers: false); |
459 | } |
460 | |
461 | protected: |
462 | ValueAsMetadata(unsigned ID, Value *V) |
463 | : Metadata(ID, Uniqued), ReplaceableMetadataImpl(V->getContext()), V(V) { |
464 | assert(V && "Expected valid value" ); |
465 | } |
466 | |
467 | ~ValueAsMetadata() = default; |
468 | |
469 | public: |
470 | static ValueAsMetadata *get(Value *V); |
471 | |
472 | static ConstantAsMetadata *getConstant(Value *C) { |
473 | return cast<ConstantAsMetadata>(Val: get(V: C)); |
474 | } |
475 | |
476 | static LocalAsMetadata *getLocal(Value *Local) { |
477 | return cast<LocalAsMetadata>(Val: get(V: Local)); |
478 | } |
479 | |
480 | static ValueAsMetadata *getIfExists(Value *V); |
481 | |
482 | static ConstantAsMetadata *getConstantIfExists(Value *C) { |
483 | return cast_or_null<ConstantAsMetadata>(Val: getIfExists(V: C)); |
484 | } |
485 | |
486 | static LocalAsMetadata *getLocalIfExists(Value *Local) { |
487 | return cast_or_null<LocalAsMetadata>(Val: getIfExists(V: Local)); |
488 | } |
489 | |
490 | Value *getValue() const { return V; } |
491 | Type *getType() const { return V->getType(); } |
492 | LLVMContext &getContext() const { return V->getContext(); } |
493 | |
494 | SmallVector<Metadata *> getAllArgListUsers() { |
495 | return ReplaceableMetadataImpl::getAllArgListUsers(); |
496 | } |
497 | SmallVector<DPValue *> getAllDPValueUsers() { |
498 | return ReplaceableMetadataImpl::getAllDPValueUsers(); |
499 | } |
500 | |
501 | static void handleDeletion(Value *V); |
502 | static void handleRAUW(Value *From, Value *To); |
503 | |
504 | protected: |
505 | /// Handle collisions after \a Value::replaceAllUsesWith(). |
506 | /// |
507 | /// RAUW isn't supported directly for \a ValueAsMetadata, but if the wrapped |
508 | /// \a Value gets RAUW'ed and the target already exists, this is used to |
509 | /// merge the two metadata nodes. |
510 | void replaceAllUsesWith(Metadata *MD) { |
511 | ReplaceableMetadataImpl::replaceAllUsesWith(MD); |
512 | } |
513 | |
514 | public: |
515 | static bool classof(const Metadata *MD) { |
516 | return MD->getMetadataID() == LocalAsMetadataKind || |
517 | MD->getMetadataID() == ConstantAsMetadataKind; |
518 | } |
519 | }; |
520 | |
521 | class ConstantAsMetadata : public ValueAsMetadata { |
522 | friend class ValueAsMetadata; |
523 | |
524 | ConstantAsMetadata(Constant *C) |
525 | : ValueAsMetadata(ConstantAsMetadataKind, C) {} |
526 | |
527 | public: |
528 | static ConstantAsMetadata *get(Constant *C) { |
529 | return ValueAsMetadata::getConstant(C); |
530 | } |
531 | |
532 | static ConstantAsMetadata *getIfExists(Constant *C) { |
533 | return ValueAsMetadata::getConstantIfExists(C); |
534 | } |
535 | |
536 | Constant *getValue() const { |
537 | return cast<Constant>(Val: ValueAsMetadata::getValue()); |
538 | } |
539 | |
540 | static bool classof(const Metadata *MD) { |
541 | return MD->getMetadataID() == ConstantAsMetadataKind; |
542 | } |
543 | }; |
544 | |
545 | class LocalAsMetadata : public ValueAsMetadata { |
546 | friend class ValueAsMetadata; |
547 | |
548 | LocalAsMetadata(Value *Local) |
549 | : ValueAsMetadata(LocalAsMetadataKind, Local) { |
550 | assert(!isa<Constant>(Local) && "Expected local value" ); |
551 | } |
552 | |
553 | public: |
554 | static LocalAsMetadata *get(Value *Local) { |
555 | return ValueAsMetadata::getLocal(Local); |
556 | } |
557 | |
558 | static LocalAsMetadata *getIfExists(Value *Local) { |
559 | return ValueAsMetadata::getLocalIfExists(Local); |
560 | } |
561 | |
562 | static bool classof(const Metadata *MD) { |
563 | return MD->getMetadataID() == LocalAsMetadataKind; |
564 | } |
565 | }; |
566 | |
567 | /// Transitional API for extracting constants from Metadata. |
568 | /// |
569 | /// This namespace contains transitional functions for metadata that points to |
570 | /// \a Constants. |
571 | /// |
572 | /// In prehistory -- when metadata was a subclass of \a Value -- \a MDNode |
573 | /// operands could refer to any \a Value. There's was a lot of code like this: |
574 | /// |
575 | /// \code |
576 | /// MDNode *N = ...; |
577 | /// auto *CI = dyn_cast<ConstantInt>(N->getOperand(2)); |
578 | /// \endcode |
579 | /// |
580 | /// Now that \a Value and \a Metadata are in separate hierarchies, maintaining |
581 | /// the semantics for \a isa(), \a cast(), \a dyn_cast() (etc.) requires three |
582 | /// steps: cast in the \a Metadata hierarchy, extraction of the \a Value, and |
583 | /// cast in the \a Value hierarchy. Besides creating boiler-plate, this |
584 | /// requires subtle control flow changes. |
585 | /// |
586 | /// The end-goal is to create a new type of metadata, called (e.g.) \a MDInt, |
587 | /// so that metadata can refer to numbers without traversing a bridge to the \a |
588 | /// Value hierarchy. In this final state, the code above would look like this: |
589 | /// |
590 | /// \code |
591 | /// MDNode *N = ...; |
592 | /// auto *MI = dyn_cast<MDInt>(N->getOperand(2)); |
593 | /// \endcode |
594 | /// |
595 | /// The API in this namespace supports the transition. \a MDInt doesn't exist |
596 | /// yet, and even once it does, changing each metadata schema to use it is its |
597 | /// own mini-project. In the meantime this API prevents us from introducing |
598 | /// complex and bug-prone control flow that will disappear in the end. In |
599 | /// particular, the above code looks like this: |
600 | /// |
601 | /// \code |
602 | /// MDNode *N = ...; |
603 | /// auto *CI = mdconst::dyn_extract<ConstantInt>(N->getOperand(2)); |
604 | /// \endcode |
605 | /// |
606 | /// The full set of provided functions includes: |
607 | /// |
608 | /// mdconst::hasa <=> isa |
609 | /// mdconst::extract <=> cast |
610 | /// mdconst::extract_or_null <=> cast_or_null |
611 | /// mdconst::dyn_extract <=> dyn_cast |
612 | /// mdconst::dyn_extract_or_null <=> dyn_cast_or_null |
613 | /// |
614 | /// The target of the cast must be a subclass of \a Constant. |
615 | namespace mdconst { |
616 | |
617 | namespace detail { |
618 | |
619 | template <class T> T &make(); |
620 | template <class T, class Result> struct HasDereference { |
621 | using Yes = char[1]; |
622 | using No = char[2]; |
623 | template <size_t N> struct SFINAE {}; |
624 | |
625 | template <class U, class V> |
626 | static Yes &hasDereference(SFINAE<sizeof(static_cast<V>(*make<U>()))> * = 0); |
627 | template <class U, class V> static No &hasDereference(...); |
628 | |
629 | static const bool value = |
630 | sizeof(hasDereference<T, Result>(nullptr)) == sizeof(Yes); |
631 | }; |
632 | template <class V, class M> struct IsValidPointer { |
633 | static const bool value = std::is_base_of<Constant, V>::value && |
634 | HasDereference<M, const Metadata &>::value; |
635 | }; |
636 | template <class V, class M> struct IsValidReference { |
637 | static const bool value = std::is_base_of<Constant, V>::value && |
638 | std::is_convertible<M, const Metadata &>::value; |
639 | }; |
640 | |
641 | } // end namespace detail |
642 | |
643 | /// Check whether Metadata has a Value. |
644 | /// |
645 | /// As an analogue to \a isa(), check whether \c MD has an \a Value inside of |
646 | /// type \c X. |
647 | template <class X, class Y> |
648 | inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, bool> |
649 | hasa(Y &&MD) { |
650 | assert(MD && "Null pointer sent into hasa" ); |
651 | if (auto *V = dyn_cast<ConstantAsMetadata>(MD)) |
652 | return isa<X>(V->getValue()); |
653 | return false; |
654 | } |
655 | template <class X, class Y> |
656 | inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, bool> |
657 | hasa(Y &MD) { |
658 | return hasa(&MD); |
659 | } |
660 | |
661 | /// Extract a Value from Metadata. |
662 | /// |
663 | /// As an analogue to \a cast(), extract the \a Value subclass \c X from \c MD. |
664 | template <class X, class Y> |
665 | inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> |
666 | (Y &&MD) { |
667 | return cast<X>(cast<ConstantAsMetadata>(MD)->getValue()); |
668 | } |
669 | template <class X, class Y> |
670 | inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, X *> |
671 | (Y &MD) { |
672 | return extract(&MD); |
673 | } |
674 | |
675 | /// Extract a Value from Metadata, allowing null. |
676 | /// |
677 | /// As an analogue to \a cast_or_null(), extract the \a Value subclass \c X |
678 | /// from \c MD, allowing \c MD to be null. |
679 | template <class X, class Y> |
680 | inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> |
681 | (Y &&MD) { |
682 | if (auto *V = cast_or_null<ConstantAsMetadata>(MD)) |
683 | return cast<X>(V->getValue()); |
684 | return nullptr; |
685 | } |
686 | |
687 | /// Extract a Value from Metadata, if any. |
688 | /// |
689 | /// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X |
690 | /// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a |
691 | /// Value it does contain is of the wrong subclass. |
692 | template <class X, class Y> |
693 | inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> |
694 | (Y &&MD) { |
695 | if (auto *V = dyn_cast<ConstantAsMetadata>(MD)) |
696 | return dyn_cast<X>(V->getValue()); |
697 | return nullptr; |
698 | } |
699 | |
700 | /// Extract a Value from Metadata, if any, allowing null. |
701 | /// |
702 | /// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X |
703 | /// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a |
704 | /// Value it does contain is of the wrong subclass, allowing \c MD to be null. |
705 | template <class X, class Y> |
706 | inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> |
707 | (Y &&MD) { |
708 | if (auto *V = dyn_cast_or_null<ConstantAsMetadata>(MD)) |
709 | return dyn_cast<X>(V->getValue()); |
710 | return nullptr; |
711 | } |
712 | |
713 | } // end namespace mdconst |
714 | |
715 | //===----------------------------------------------------------------------===// |
716 | /// A single uniqued string. |
717 | /// |
718 | /// These are used to efficiently contain a byte sequence for metadata. |
719 | /// MDString is always unnamed. |
720 | class MDString : public Metadata { |
721 | friend class StringMapEntryStorage<MDString>; |
722 | |
723 | StringMapEntry<MDString> *Entry = nullptr; |
724 | |
725 | MDString() : Metadata(MDStringKind, Uniqued) {} |
726 | |
727 | public: |
728 | MDString(const MDString &) = delete; |
729 | MDString &operator=(MDString &&) = delete; |
730 | MDString &operator=(const MDString &) = delete; |
731 | |
732 | static MDString *get(LLVMContext &Context, StringRef Str); |
733 | static MDString *get(LLVMContext &Context, const char *Str) { |
734 | return get(Context, Str: Str ? StringRef(Str) : StringRef()); |
735 | } |
736 | |
737 | StringRef getString() const; |
738 | |
739 | unsigned getLength() const { return (unsigned)getString().size(); } |
740 | |
741 | using iterator = StringRef::iterator; |
742 | |
743 | /// Pointer to the first byte of the string. |
744 | iterator begin() const { return getString().begin(); } |
745 | |
746 | /// Pointer to one byte past the end of the string. |
747 | iterator end() const { return getString().end(); } |
748 | |
749 | const unsigned char *bytes_begin() const { return getString().bytes_begin(); } |
750 | const unsigned char *bytes_end() const { return getString().bytes_end(); } |
751 | |
752 | /// Methods for support type inquiry through isa, cast, and dyn_cast. |
753 | static bool classof(const Metadata *MD) { |
754 | return MD->getMetadataID() == MDStringKind; |
755 | } |
756 | }; |
757 | |
758 | /// A collection of metadata nodes that might be associated with a |
759 | /// memory access used by the alias-analysis infrastructure. |
760 | struct AAMDNodes { |
761 | explicit AAMDNodes() = default; |
762 | explicit AAMDNodes(MDNode *T, MDNode *TS, MDNode *S, MDNode *N) |
763 | : TBAA(T), TBAAStruct(TS), Scope(S), NoAlias(N) {} |
764 | |
765 | bool operator==(const AAMDNodes &A) const { |
766 | return TBAA == A.TBAA && TBAAStruct == A.TBAAStruct && Scope == A.Scope && |
767 | NoAlias == A.NoAlias; |
768 | } |
769 | |
770 | bool operator!=(const AAMDNodes &A) const { return !(*this == A); } |
771 | |
772 | explicit operator bool() const { |
773 | return TBAA || TBAAStruct || Scope || NoAlias; |
774 | } |
775 | |
776 | /// The tag for type-based alias analysis. |
777 | MDNode *TBAA = nullptr; |
778 | |
779 | /// The tag for type-based alias analysis (tbaa struct). |
780 | MDNode *TBAAStruct = nullptr; |
781 | |
782 | /// The tag for alias scope specification (used with noalias). |
783 | MDNode *Scope = nullptr; |
784 | |
785 | /// The tag specifying the noalias scope. |
786 | MDNode *NoAlias = nullptr; |
787 | |
788 | // Shift tbaa Metadata node to start off bytes later |
789 | static MDNode *shiftTBAA(MDNode *M, size_t off); |
790 | |
791 | // Shift tbaa.struct Metadata node to start off bytes later |
792 | static MDNode *shiftTBAAStruct(MDNode *M, size_t off); |
793 | |
794 | // Extend tbaa Metadata node to apply to a series of bytes of length len. |
795 | // A size of -1 denotes an unknown size. |
796 | static MDNode *extendToTBAA(MDNode *TBAA, ssize_t len); |
797 | |
798 | /// Given two sets of AAMDNodes that apply to the same pointer, |
799 | /// give the best AAMDNodes that are compatible with both (i.e. a set of |
800 | /// nodes whose allowable aliasing conclusions are a subset of those |
801 | /// allowable by both of the inputs). However, for efficiency |
802 | /// reasons, do not create any new MDNodes. |
803 | AAMDNodes intersect(const AAMDNodes &Other) const { |
804 | AAMDNodes Result; |
805 | Result.TBAA = Other.TBAA == TBAA ? TBAA : nullptr; |
806 | Result.TBAAStruct = Other.TBAAStruct == TBAAStruct ? TBAAStruct : nullptr; |
807 | Result.Scope = Other.Scope == Scope ? Scope : nullptr; |
808 | Result.NoAlias = Other.NoAlias == NoAlias ? NoAlias : nullptr; |
809 | return Result; |
810 | } |
811 | |
812 | /// Create a new AAMDNode that describes this AAMDNode after applying a |
813 | /// constant offset to the start of the pointer. |
814 | AAMDNodes shift(size_t Offset) const { |
815 | AAMDNodes Result; |
816 | Result.TBAA = TBAA ? shiftTBAA(M: TBAA, off: Offset) : nullptr; |
817 | Result.TBAAStruct = |
818 | TBAAStruct ? shiftTBAAStruct(M: TBAAStruct, off: Offset) : nullptr; |
819 | Result.Scope = Scope; |
820 | Result.NoAlias = NoAlias; |
821 | return Result; |
822 | } |
823 | |
824 | /// Create a new AAMDNode that describes this AAMDNode after extending it to |
825 | /// apply to a series of bytes of length Len. A size of -1 denotes an unknown |
826 | /// size. |
827 | AAMDNodes extendTo(ssize_t Len) const { |
828 | AAMDNodes Result; |
829 | Result.TBAA = TBAA ? extendToTBAA(TBAA, len: Len) : nullptr; |
830 | // tbaa.struct contains (offset, size, type) triples. Extending the length |
831 | // of the tbaa.struct doesn't require changing this (though more information |
832 | // could be provided by adding more triples at subsequent lengths). |
833 | Result.TBAAStruct = TBAAStruct; |
834 | Result.Scope = Scope; |
835 | Result.NoAlias = NoAlias; |
836 | return Result; |
837 | } |
838 | |
839 | /// Given two sets of AAMDNodes applying to potentially different locations, |
840 | /// determine the best AAMDNodes that apply to both. |
841 | AAMDNodes merge(const AAMDNodes &Other) const; |
842 | |
843 | /// Determine the best AAMDNodes after concatenating two different locations |
844 | /// together. Different from `merge`, where different locations should |
845 | /// overlap each other, `concat` puts non-overlapping locations together. |
846 | AAMDNodes concat(const AAMDNodes &Other) const; |
847 | |
848 | /// Create a new AAMDNode for accessing \p AccessSize bytes of this AAMDNode. |
849 | /// If his AAMDNode has !tbaa.struct and \p AccessSize matches the size of the |
850 | /// field at offset 0, get the TBAA tag describing the accessed field. |
851 | AAMDNodes adjustForAccess(unsigned AccessSize); |
852 | }; |
853 | |
854 | // Specialize DenseMapInfo for AAMDNodes. |
855 | template<> |
856 | struct DenseMapInfo<AAMDNodes> { |
857 | static inline AAMDNodes getEmptyKey() { |
858 | return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(), |
859 | nullptr, nullptr, nullptr); |
860 | } |
861 | |
862 | static inline AAMDNodes getTombstoneKey() { |
863 | return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(), |
864 | nullptr, nullptr, nullptr); |
865 | } |
866 | |
867 | static unsigned getHashValue(const AAMDNodes &Val) { |
868 | return DenseMapInfo<MDNode *>::getHashValue(PtrVal: Val.TBAA) ^ |
869 | DenseMapInfo<MDNode *>::getHashValue(PtrVal: Val.TBAAStruct) ^ |
870 | DenseMapInfo<MDNode *>::getHashValue(PtrVal: Val.Scope) ^ |
871 | DenseMapInfo<MDNode *>::getHashValue(PtrVal: Val.NoAlias); |
872 | } |
873 | |
874 | static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) { |
875 | return LHS == RHS; |
876 | } |
877 | }; |
878 | |
879 | /// Tracking metadata reference owned by Metadata. |
880 | /// |
881 | /// Similar to \a TrackingMDRef, but it's expected to be owned by an instance |
882 | /// of \a Metadata, which has the option of registering itself for callbacks to |
883 | /// re-unique itself. |
884 | /// |
885 | /// In particular, this is used by \a MDNode. |
886 | class MDOperand { |
887 | Metadata *MD = nullptr; |
888 | |
889 | public: |
890 | MDOperand() = default; |
891 | MDOperand(const MDOperand &) = delete; |
892 | MDOperand(MDOperand &&Op) { |
893 | MD = Op.MD; |
894 | if (MD) |
895 | (void)MetadataTracking::retrack(MD&: Op.MD, New&: MD); |
896 | Op.MD = nullptr; |
897 | } |
898 | MDOperand &operator=(const MDOperand &) = delete; |
899 | MDOperand &operator=(MDOperand &&Op) { |
900 | MD = Op.MD; |
901 | if (MD) |
902 | (void)MetadataTracking::retrack(MD&: Op.MD, New&: MD); |
903 | Op.MD = nullptr; |
904 | return *this; |
905 | } |
906 | |
907 | // Check if MDOperand is of type MDString and equals `Str`. |
908 | bool equalsStr(StringRef Str) const { |
909 | return isa<MDString>(Val: this->get()) && |
910 | cast<MDString>(Val: this->get())->getString() == Str; |
911 | } |
912 | |
913 | ~MDOperand() { untrack(); } |
914 | |
915 | Metadata *get() const { return MD; } |
916 | operator Metadata *() const { return get(); } |
917 | Metadata *operator->() const { return get(); } |
918 | Metadata &operator*() const { return *get(); } |
919 | |
920 | void reset() { |
921 | untrack(); |
922 | MD = nullptr; |
923 | } |
924 | void reset(Metadata *MD, Metadata *Owner) { |
925 | untrack(); |
926 | this->MD = MD; |
927 | track(Owner); |
928 | } |
929 | |
930 | private: |
931 | void track(Metadata *Owner) { |
932 | if (MD) { |
933 | if (Owner) |
934 | MetadataTracking::track(Ref: this, MD&: *MD, Owner&: *Owner); |
935 | else |
936 | MetadataTracking::track(MD); |
937 | } |
938 | } |
939 | |
940 | void untrack() { |
941 | assert(static_cast<void *>(this) == &MD && "Expected same address" ); |
942 | if (MD) |
943 | MetadataTracking::untrack(MD); |
944 | } |
945 | }; |
946 | |
947 | template <> struct simplify_type<MDOperand> { |
948 | using SimpleType = Metadata *; |
949 | |
950 | static SimpleType getSimplifiedValue(MDOperand &MD) { return MD.get(); } |
951 | }; |
952 | |
953 | template <> struct simplify_type<const MDOperand> { |
954 | using SimpleType = Metadata *; |
955 | |
956 | static SimpleType getSimplifiedValue(const MDOperand &MD) { return MD.get(); } |
957 | }; |
958 | |
959 | /// Pointer to the context, with optional RAUW support. |
960 | /// |
961 | /// Either a raw (non-null) pointer to the \a LLVMContext, or an owned pointer |
962 | /// to \a ReplaceableMetadataImpl (which has a reference to \a LLVMContext). |
963 | class ContextAndReplaceableUses { |
964 | PointerUnion<LLVMContext *, ReplaceableMetadataImpl *> Ptr; |
965 | |
966 | public: |
967 | ContextAndReplaceableUses(LLVMContext &Context) : Ptr(&Context) {} |
968 | ContextAndReplaceableUses( |
969 | std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) |
970 | : Ptr(ReplaceableUses.release()) { |
971 | assert(getReplaceableUses() && "Expected non-null replaceable uses" ); |
972 | } |
973 | ContextAndReplaceableUses() = delete; |
974 | ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete; |
975 | ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete; |
976 | ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete; |
977 | ContextAndReplaceableUses & |
978 | operator=(const ContextAndReplaceableUses &) = delete; |
979 | ~ContextAndReplaceableUses() { delete getReplaceableUses(); } |
980 | |
981 | operator LLVMContext &() { return getContext(); } |
982 | |
983 | /// Whether this contains RAUW support. |
984 | bool hasReplaceableUses() const { |
985 | return isa<ReplaceableMetadataImpl *>(Val: Ptr); |
986 | } |
987 | |
988 | LLVMContext &getContext() const { |
989 | if (hasReplaceableUses()) |
990 | return getReplaceableUses()->getContext(); |
991 | return *cast<LLVMContext *>(Val: Ptr); |
992 | } |
993 | |
994 | ReplaceableMetadataImpl *getReplaceableUses() const { |
995 | if (hasReplaceableUses()) |
996 | return cast<ReplaceableMetadataImpl *>(Val: Ptr); |
997 | return nullptr; |
998 | } |
999 | |
1000 | /// Ensure that this has RAUW support, and then return it. |
1001 | ReplaceableMetadataImpl *getOrCreateReplaceableUses() { |
1002 | if (!hasReplaceableUses()) |
1003 | makeReplaceable(ReplaceableUses: std::make_unique<ReplaceableMetadataImpl>(args&: getContext())); |
1004 | return getReplaceableUses(); |
1005 | } |
1006 | |
1007 | /// Assign RAUW support to this. |
1008 | /// |
1009 | /// Make this replaceable, taking ownership of \c ReplaceableUses (which must |
1010 | /// not be null). |
1011 | void |
1012 | makeReplaceable(std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) { |
1013 | assert(ReplaceableUses && "Expected non-null replaceable uses" ); |
1014 | assert(&ReplaceableUses->getContext() == &getContext() && |
1015 | "Expected same context" ); |
1016 | delete getReplaceableUses(); |
1017 | Ptr = ReplaceableUses.release(); |
1018 | } |
1019 | |
1020 | /// Drop RAUW support. |
1021 | /// |
1022 | /// Cede ownership of RAUW support, returning it. |
1023 | std::unique_ptr<ReplaceableMetadataImpl> takeReplaceableUses() { |
1024 | assert(hasReplaceableUses() && "Expected to own replaceable uses" ); |
1025 | std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses( |
1026 | getReplaceableUses()); |
1027 | Ptr = &ReplaceableUses->getContext(); |
1028 | return ReplaceableUses; |
1029 | } |
1030 | }; |
1031 | |
1032 | struct TempMDNodeDeleter { |
1033 | inline void operator()(MDNode *Node) const; |
1034 | }; |
1035 | |
1036 | #define HANDLE_MDNODE_LEAF(CLASS) \ |
1037 | using Temp##CLASS = std::unique_ptr<CLASS, TempMDNodeDeleter>; |
1038 | #define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_LEAF(CLASS) |
1039 | #include "llvm/IR/Metadata.def" |
1040 | |
1041 | /// Metadata node. |
1042 | /// |
1043 | /// Metadata nodes can be uniqued, like constants, or distinct. Temporary |
1044 | /// metadata nodes (with full support for RAUW) can be used to delay uniquing |
1045 | /// until forward references are known. The basic metadata node is an \a |
1046 | /// MDTuple. |
1047 | /// |
1048 | /// There is limited support for RAUW at construction time. At construction |
1049 | /// time, if any operand is a temporary node (or an unresolved uniqued node, |
1050 | /// which indicates a transitive temporary operand), the node itself will be |
1051 | /// unresolved. As soon as all operands become resolved, it will drop RAUW |
1052 | /// support permanently. |
1053 | /// |
1054 | /// If an unresolved node is part of a cycle, \a resolveCycles() needs |
1055 | /// to be called on some member of the cycle once all temporary nodes have been |
1056 | /// replaced. |
1057 | /// |
1058 | /// MDNodes can be large or small, as well as resizable or non-resizable. |
1059 | /// Large MDNodes' operands are allocated in a separate storage vector, |
1060 | /// whereas small MDNodes' operands are co-allocated. Distinct and temporary |
1061 | /// MDnodes are resizable, but only MDTuples support this capability. |
1062 | /// |
1063 | /// Clients can add operands to resizable MDNodes using push_back(). |
1064 | class MDNode : public Metadata { |
1065 | friend class ReplaceableMetadataImpl; |
1066 | friend class LLVMContextImpl; |
1067 | friend class DIAssignID; |
1068 | |
1069 | /// The header that is coallocated with an MDNode along with its "small" |
1070 | /// operands. It is located immediately before the main body of the node. |
1071 | /// The operands are in turn located immediately before the header. |
1072 | /// For resizable MDNodes, the space for the storage vector is also allocated |
1073 | /// immediately before the header, overlapping with the operands. |
1074 | /// Explicity set alignment because bitfields by default have an |
1075 | /// alignment of 1 on z/OS. |
1076 | struct alignas(alignof(size_t)) { |
1077 | bool : 1; |
1078 | bool : 1; |
1079 | size_t : 4; |
1080 | size_t : 4; |
1081 | size_t : sizeof(size_t) * CHAR_BIT - 10; |
1082 | |
1083 | unsigned = 0; |
1084 | using = SmallVector<MDOperand, 0>; |
1085 | |
1086 | static constexpr size_t = |
1087 | sizeof(LargeStorageVector) / sizeof(MDOperand); |
1088 | static_assert( |
1089 | NumOpsFitInVector * sizeof(MDOperand) == sizeof(LargeStorageVector), |
1090 | "sizeof(LargeStorageVector) must be a multiple of sizeof(MDOperand)" ); |
1091 | |
1092 | static constexpr size_t = 15; |
1093 | |
1094 | static constexpr size_t (unsigned NumOps) { |
1095 | return sizeof(MDOperand) * NumOps; |
1096 | } |
1097 | /// Returns the number of operands the node has space for based on its |
1098 | /// allocation characteristics. |
1099 | static size_t (size_t NumOps, bool IsResizable, bool IsLarge) { |
1100 | return IsLarge ? NumOpsFitInVector |
1101 | : std::max(a: NumOps, b: NumOpsFitInVector * IsResizable); |
1102 | } |
1103 | /// Returns the number of bytes allocated for operands and header. |
1104 | static size_t (StorageType Storage, size_t NumOps) { |
1105 | return getOpSize( |
1106 | NumOps: getSmallSize(NumOps, IsResizable: isResizable(Storage), IsLarge: isLarge(NumOps))) + |
1107 | sizeof(Header); |
1108 | } |
1109 | |
1110 | /// Only temporary and distinct nodes are resizable. |
1111 | static bool (StorageType Storage) { return Storage != Uniqued; } |
1112 | static bool (size_t NumOps) { return NumOps > MaxSmallSize; } |
1113 | |
1114 | size_t () const { |
1115 | return getOpSize(NumOps: SmallSize) + sizeof(Header); |
1116 | } |
1117 | void *() { |
1118 | return reinterpret_cast<char *>(this + 1) - |
1119 | alignTo(Value: getAllocSize(), Align: alignof(uint64_t)); |
1120 | } |
1121 | |
1122 | void *() const { |
1123 | static_assert(alignof(LargeStorageVector) <= alignof(Header), |
1124 | "LargeStorageVector too strongly aligned" ); |
1125 | return reinterpret_cast<char *>(const_cast<Header *>(this)) - |
1126 | sizeof(LargeStorageVector); |
1127 | } |
1128 | |
1129 | void *(); |
1130 | |
1131 | LargeStorageVector &() { |
1132 | assert(IsLarge); |
1133 | return *reinterpret_cast<LargeStorageVector *>(getLargePtr()); |
1134 | } |
1135 | |
1136 | const LargeStorageVector &() const { |
1137 | assert(IsLarge); |
1138 | return *reinterpret_cast<const LargeStorageVector *>(getLargePtr()); |
1139 | } |
1140 | |
1141 | void (size_t NumOps); |
1142 | void (size_t NumOps); |
1143 | void (size_t NumOps); |
1144 | |
1145 | explicit (size_t NumOps, StorageType Storage); |
1146 | (); |
1147 | |
1148 | MutableArrayRef<MDOperand> operands() { |
1149 | if (IsLarge) |
1150 | return getLarge(); |
1151 | return MutableArrayRef( |
1152 | reinterpret_cast<MDOperand *>(this) - SmallSize, SmallNumOps); |
1153 | } |
1154 | |
1155 | ArrayRef<MDOperand> operands() const { |
1156 | if (IsLarge) |
1157 | return getLarge(); |
1158 | return ArrayRef(reinterpret_cast<const MDOperand *>(this) - SmallSize, |
1159 | SmallNumOps); |
1160 | } |
1161 | |
1162 | unsigned getNumOperands() const { |
1163 | if (!IsLarge) |
1164 | return SmallNumOps; |
1165 | return getLarge().size(); |
1166 | } |
1167 | }; |
1168 | |
1169 | Header &() { return *(reinterpret_cast<Header *>(this) - 1); } |
1170 | |
1171 | const Header &() const { |
1172 | return *(reinterpret_cast<const Header *>(this) - 1); |
1173 | } |
1174 | |
1175 | ContextAndReplaceableUses Context; |
1176 | |
1177 | protected: |
1178 | MDNode(LLVMContext &Context, unsigned ID, StorageType Storage, |
1179 | ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = std::nullopt); |
1180 | ~MDNode() = default; |
1181 | |
1182 | void *operator new(size_t Size, size_t NumOps, StorageType Storage); |
1183 | void operator delete(void *Mem); |
1184 | |
1185 | /// Required by std, but never called. |
1186 | void operator delete(void *, unsigned) { |
1187 | llvm_unreachable("Constructor throws?" ); |
1188 | } |
1189 | |
1190 | /// Required by std, but never called. |
1191 | void operator delete(void *, unsigned, bool) { |
1192 | llvm_unreachable("Constructor throws?" ); |
1193 | } |
1194 | |
1195 | void dropAllReferences(); |
1196 | |
1197 | MDOperand *mutable_begin() { return getHeader().operands().begin(); } |
1198 | MDOperand *mutable_end() { return getHeader().operands().end(); } |
1199 | |
1200 | using mutable_op_range = iterator_range<MDOperand *>; |
1201 | |
1202 | mutable_op_range mutable_operands() { |
1203 | return mutable_op_range(mutable_begin(), mutable_end()); |
1204 | } |
1205 | |
1206 | public: |
1207 | MDNode(const MDNode &) = delete; |
1208 | void operator=(const MDNode &) = delete; |
1209 | void *operator new(size_t) = delete; |
1210 | |
1211 | static inline MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs); |
1212 | static inline MDTuple *getIfExists(LLVMContext &Context, |
1213 | ArrayRef<Metadata *> MDs); |
1214 | static inline MDTuple *getDistinct(LLVMContext &Context, |
1215 | ArrayRef<Metadata *> MDs); |
1216 | static inline TempMDTuple getTemporary(LLVMContext &Context, |
1217 | ArrayRef<Metadata *> MDs); |
1218 | |
1219 | /// Create a (temporary) clone of this. |
1220 | TempMDNode clone() const; |
1221 | |
1222 | /// Deallocate a node created by getTemporary. |
1223 | /// |
1224 | /// Calls \c replaceAllUsesWith(nullptr) before deleting, so any remaining |
1225 | /// references will be reset. |
1226 | static void deleteTemporary(MDNode *N); |
1227 | |
1228 | LLVMContext &getContext() const { return Context.getContext(); } |
1229 | |
1230 | /// Replace a specific operand. |
1231 | void replaceOperandWith(unsigned I, Metadata *New); |
1232 | |
1233 | /// Check if node is fully resolved. |
1234 | /// |
1235 | /// If \a isTemporary(), this always returns \c false; if \a isDistinct(), |
1236 | /// this always returns \c true. |
1237 | /// |
1238 | /// If \a isUniqued(), returns \c true if this has already dropped RAUW |
1239 | /// support (because all operands are resolved). |
1240 | /// |
1241 | /// As forward declarations are resolved, their containers should get |
1242 | /// resolved automatically. However, if this (or one of its operands) is |
1243 | /// involved in a cycle, \a resolveCycles() needs to be called explicitly. |
1244 | bool isResolved() const { return !isTemporary() && !getNumUnresolved(); } |
1245 | |
1246 | bool isUniqued() const { return Storage == Uniqued; } |
1247 | bool isDistinct() const { return Storage == Distinct; } |
1248 | bool isTemporary() const { return Storage == Temporary; } |
1249 | |
1250 | bool isReplaceable() const { return isTemporary() || isAlwaysReplaceable(); } |
1251 | bool isAlwaysReplaceable() const { return getMetadataID() == DIAssignIDKind; } |
1252 | |
1253 | unsigned getNumTemporaryUses() const { |
1254 | assert(isTemporary() && "Only for temporaries" ); |
1255 | return Context.getReplaceableUses()->getNumUses(); |
1256 | } |
1257 | |
1258 | /// RAUW a temporary. |
1259 | /// |
1260 | /// \pre \a isTemporary() must be \c true. |
1261 | void replaceAllUsesWith(Metadata *MD) { |
1262 | assert(isReplaceable() && "Expected temporary/replaceable node" ); |
1263 | if (Context.hasReplaceableUses()) |
1264 | Context.getReplaceableUses()->replaceAllUsesWith(MD); |
1265 | } |
1266 | |
1267 | /// Resolve cycles. |
1268 | /// |
1269 | /// Once all forward declarations have been resolved, force cycles to be |
1270 | /// resolved. |
1271 | /// |
1272 | /// \pre No operands (or operands' operands, etc.) have \a isTemporary(). |
1273 | void resolveCycles(); |
1274 | |
1275 | /// Resolve a unique, unresolved node. |
1276 | void resolve(); |
1277 | |
1278 | /// Replace a temporary node with a permanent one. |
1279 | /// |
1280 | /// Try to create a uniqued version of \c N -- in place, if possible -- and |
1281 | /// return it. If \c N cannot be uniqued, return a distinct node instead. |
1282 | template <class T> |
1283 | static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *> |
1284 | replaceWithPermanent(std::unique_ptr<T, TempMDNodeDeleter> N) { |
1285 | return cast<T>(N.release()->replaceWithPermanentImpl()); |
1286 | } |
1287 | |
1288 | /// Replace a temporary node with a uniqued one. |
1289 | /// |
1290 | /// Create a uniqued version of \c N -- in place, if possible -- and return |
1291 | /// it. Takes ownership of the temporary node. |
1292 | /// |
1293 | /// \pre N does not self-reference. |
1294 | template <class T> |
1295 | static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *> |
1296 | replaceWithUniqued(std::unique_ptr<T, TempMDNodeDeleter> N) { |
1297 | return cast<T>(N.release()->replaceWithUniquedImpl()); |
1298 | } |
1299 | |
1300 | /// Replace a temporary node with a distinct one. |
1301 | /// |
1302 | /// Create a distinct version of \c N -- in place, if possible -- and return |
1303 | /// it. Takes ownership of the temporary node. |
1304 | template <class T> |
1305 | static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *> |
1306 | replaceWithDistinct(std::unique_ptr<T, TempMDNodeDeleter> N) { |
1307 | return cast<T>(N.release()->replaceWithDistinctImpl()); |
1308 | } |
1309 | |
1310 | /// Print in tree shape. |
1311 | /// |
1312 | /// Prints definition of \c this in tree shape. |
1313 | /// |
1314 | /// If \c M is provided, metadata nodes will be numbered canonically; |
1315 | /// otherwise, pointer addresses are substituted. |
1316 | /// @{ |
1317 | void printTree(raw_ostream &OS, const Module *M = nullptr) const; |
1318 | void printTree(raw_ostream &OS, ModuleSlotTracker &MST, |
1319 | const Module *M = nullptr) const; |
1320 | /// @} |
1321 | |
1322 | /// User-friendly dump in tree shape. |
1323 | /// |
1324 | /// If \c M is provided, metadata nodes will be numbered canonically; |
1325 | /// otherwise, pointer addresses are substituted. |
1326 | /// |
1327 | /// Note: this uses an explicit overload instead of default arguments so that |
1328 | /// the nullptr version is easy to call from a debugger. |
1329 | /// |
1330 | /// @{ |
1331 | void dumpTree() const; |
1332 | void dumpTree(const Module *M) const; |
1333 | /// @} |
1334 | |
1335 | private: |
1336 | MDNode *replaceWithPermanentImpl(); |
1337 | MDNode *replaceWithUniquedImpl(); |
1338 | MDNode *replaceWithDistinctImpl(); |
1339 | |
1340 | protected: |
1341 | /// Set an operand. |
1342 | /// |
1343 | /// Sets the operand directly, without worrying about uniquing. |
1344 | void setOperand(unsigned I, Metadata *New); |
1345 | |
1346 | unsigned getNumUnresolved() const { return getHeader().NumUnresolved; } |
1347 | |
1348 | void setNumUnresolved(unsigned N) { getHeader().NumUnresolved = N; } |
1349 | void storeDistinctInContext(); |
1350 | template <class T, class StoreT> |
1351 | static T *storeImpl(T *N, StorageType Storage, StoreT &Store); |
1352 | template <class T> static T *storeImpl(T *N, StorageType Storage); |
1353 | |
1354 | /// Resize the node to hold \a NumOps operands. |
1355 | /// |
1356 | /// \pre \a isTemporary() or \a isDistinct() |
1357 | /// \pre MetadataID == MDTupleKind |
1358 | void resize(size_t NumOps) { |
1359 | assert(!isUniqued() && "Resizing is not supported for uniqued nodes" ); |
1360 | assert(getMetadataID() == MDTupleKind && |
1361 | "Resizing is not supported for this node kind" ); |
1362 | getHeader().resize(NumOps); |
1363 | } |
1364 | |
1365 | private: |
1366 | void handleChangedOperand(void *Ref, Metadata *New); |
1367 | |
1368 | /// Drop RAUW support, if any. |
1369 | void dropReplaceableUses(); |
1370 | |
1371 | void resolveAfterOperandChange(Metadata *Old, Metadata *New); |
1372 | void decrementUnresolvedOperandCount(); |
1373 | void countUnresolvedOperands(); |
1374 | |
1375 | /// Mutate this to be "uniqued". |
1376 | /// |
1377 | /// Mutate this so that \a isUniqued(). |
1378 | /// \pre \a isTemporary(). |
1379 | /// \pre already added to uniquing set. |
1380 | void makeUniqued(); |
1381 | |
1382 | /// Mutate this to be "distinct". |
1383 | /// |
1384 | /// Mutate this so that \a isDistinct(). |
1385 | /// \pre \a isTemporary(). |
1386 | void makeDistinct(); |
1387 | |
1388 | void deleteAsSubclass(); |
1389 | MDNode *uniquify(); |
1390 | void eraseFromStore(); |
1391 | |
1392 | template <class NodeTy> struct HasCachedHash; |
1393 | template <class NodeTy> |
1394 | static void dispatchRecalculateHash(NodeTy *N, std::true_type) { |
1395 | N->recalculateHash(); |
1396 | } |
1397 | template <class NodeTy> |
1398 | static void dispatchRecalculateHash(NodeTy *, std::false_type) {} |
1399 | template <class NodeTy> |
1400 | static void dispatchResetHash(NodeTy *N, std::true_type) { |
1401 | N->setHash(0); |
1402 | } |
1403 | template <class NodeTy> |
1404 | static void dispatchResetHash(NodeTy *, std::false_type) {} |
1405 | |
1406 | /// Merge branch weights from two direct callsites. |
1407 | static MDNode *mergeDirectCallProfMetadata(MDNode *A, MDNode *B, |
1408 | const Instruction *AInstr, |
1409 | const Instruction *BInstr); |
1410 | |
1411 | public: |
1412 | using op_iterator = const MDOperand *; |
1413 | using op_range = iterator_range<op_iterator>; |
1414 | |
1415 | op_iterator op_begin() const { |
1416 | return const_cast<MDNode *>(this)->mutable_begin(); |
1417 | } |
1418 | |
1419 | op_iterator op_end() const { |
1420 | return const_cast<MDNode *>(this)->mutable_end(); |
1421 | } |
1422 | |
1423 | ArrayRef<MDOperand> operands() const { return getHeader().operands(); } |
1424 | |
1425 | const MDOperand &getOperand(unsigned I) const { |
1426 | assert(I < getNumOperands() && "Out of range" ); |
1427 | return getHeader().operands()[I]; |
1428 | } |
1429 | |
1430 | /// Return number of MDNode operands. |
1431 | unsigned getNumOperands() const { return getHeader().getNumOperands(); } |
1432 | |
1433 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1434 | static bool classof(const Metadata *MD) { |
1435 | switch (MD->getMetadataID()) { |
1436 | default: |
1437 | return false; |
1438 | #define HANDLE_MDNODE_LEAF(CLASS) \ |
1439 | case CLASS##Kind: \ |
1440 | return true; |
1441 | #include "llvm/IR/Metadata.def" |
1442 | } |
1443 | } |
1444 | |
1445 | /// Check whether MDNode is a vtable access. |
1446 | bool isTBAAVtableAccess() const; |
1447 | |
1448 | /// Methods for metadata merging. |
1449 | static MDNode *concatenate(MDNode *A, MDNode *B); |
1450 | static MDNode *intersect(MDNode *A, MDNode *B); |
1451 | static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B); |
1452 | static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B); |
1453 | static MDNode *getMostGenericRange(MDNode *A, MDNode *B); |
1454 | static MDNode *getMostGenericAliasScope(MDNode *A, MDNode *B); |
1455 | static MDNode *getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B); |
1456 | /// Merge !prof metadata from two instructions. |
1457 | /// Currently only implemented with direct callsites with branch weights. |
1458 | static MDNode *getMergedProfMetadata(MDNode *A, MDNode *B, |
1459 | const Instruction *AInstr, |
1460 | const Instruction *BInstr); |
1461 | }; |
1462 | |
1463 | /// Tuple of metadata. |
1464 | /// |
1465 | /// This is the simple \a MDNode arbitrary tuple. Nodes are uniqued by |
1466 | /// default based on their operands. |
1467 | class MDTuple : public MDNode { |
1468 | friend class LLVMContextImpl; |
1469 | friend class MDNode; |
1470 | |
1471 | MDTuple(LLVMContext &C, StorageType Storage, unsigned Hash, |
1472 | ArrayRef<Metadata *> Vals) |
1473 | : MDNode(C, MDTupleKind, Storage, Vals) { |
1474 | setHash(Hash); |
1475 | } |
1476 | |
1477 | ~MDTuple() { dropAllReferences(); } |
1478 | |
1479 | void setHash(unsigned Hash) { SubclassData32 = Hash; } |
1480 | void recalculateHash(); |
1481 | |
1482 | static MDTuple *getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs, |
1483 | StorageType Storage, bool ShouldCreate = true); |
1484 | |
1485 | TempMDTuple cloneImpl() const { |
1486 | ArrayRef<MDOperand> Operands = operands(); |
1487 | return getTemporary(Context&: getContext(), MDs: SmallVector<Metadata *, 4>( |
1488 | Operands.begin(), Operands.end())); |
1489 | } |
1490 | |
1491 | public: |
1492 | /// Get the hash, if any. |
1493 | unsigned getHash() const { return SubclassData32; } |
1494 | |
1495 | static MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs) { |
1496 | return getImpl(Context, MDs, Storage: Uniqued); |
1497 | } |
1498 | |
1499 | static MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) { |
1500 | return getImpl(Context, MDs, Storage: Uniqued, /* ShouldCreate */ ShouldCreate: false); |
1501 | } |
1502 | |
1503 | /// Return a distinct node. |
1504 | /// |
1505 | /// Return a distinct node -- i.e., a node that is not uniqued. |
1506 | static MDTuple *getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) { |
1507 | return getImpl(Context, MDs, Storage: Distinct); |
1508 | } |
1509 | |
1510 | /// Return a temporary node. |
1511 | /// |
1512 | /// For use in constructing cyclic MDNode structures. A temporary MDNode is |
1513 | /// not uniqued, may be RAUW'd, and must be manually deleted with |
1514 | /// deleteTemporary. |
1515 | static TempMDTuple getTemporary(LLVMContext &Context, |
1516 | ArrayRef<Metadata *> MDs) { |
1517 | return TempMDTuple(getImpl(Context, MDs, Storage: Temporary)); |
1518 | } |
1519 | |
1520 | /// Return a (temporary) clone of this. |
1521 | TempMDTuple clone() const { return cloneImpl(); } |
1522 | |
1523 | /// Append an element to the tuple. This will resize the node. |
1524 | void push_back(Metadata *MD) { |
1525 | size_t NumOps = getNumOperands(); |
1526 | resize(NumOps: NumOps + 1); |
1527 | setOperand(I: NumOps, New: MD); |
1528 | } |
1529 | |
1530 | /// Shrink the operands by 1. |
1531 | void pop_back() { resize(NumOps: getNumOperands() - 1); } |
1532 | |
1533 | static bool classof(const Metadata *MD) { |
1534 | return MD->getMetadataID() == MDTupleKind; |
1535 | } |
1536 | }; |
1537 | |
1538 | MDTuple *MDNode::get(LLVMContext &Context, ArrayRef<Metadata *> MDs) { |
1539 | return MDTuple::get(Context, MDs); |
1540 | } |
1541 | |
1542 | MDTuple *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) { |
1543 | return MDTuple::getIfExists(Context, MDs); |
1544 | } |
1545 | |
1546 | MDTuple *MDNode::getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) { |
1547 | return MDTuple::getDistinct(Context, MDs); |
1548 | } |
1549 | |
1550 | TempMDTuple MDNode::getTemporary(LLVMContext &Context, |
1551 | ArrayRef<Metadata *> MDs) { |
1552 | return MDTuple::getTemporary(Context, MDs); |
1553 | } |
1554 | |
1555 | void TempMDNodeDeleter::operator()(MDNode *Node) const { |
1556 | MDNode::deleteTemporary(N: Node); |
1557 | } |
1558 | |
1559 | /// This is a simple wrapper around an MDNode which provides a higher-level |
1560 | /// interface by hiding the details of how alias analysis information is encoded |
1561 | /// in its operands. |
1562 | class AliasScopeNode { |
1563 | const MDNode *Node = nullptr; |
1564 | |
1565 | public: |
1566 | AliasScopeNode() = default; |
1567 | explicit AliasScopeNode(const MDNode *N) : Node(N) {} |
1568 | |
1569 | /// Get the MDNode for this AliasScopeNode. |
1570 | const MDNode *getNode() const { return Node; } |
1571 | |
1572 | /// Get the MDNode for this AliasScopeNode's domain. |
1573 | const MDNode *getDomain() const { |
1574 | if (Node->getNumOperands() < 2) |
1575 | return nullptr; |
1576 | return dyn_cast_or_null<MDNode>(Val: Node->getOperand(I: 1)); |
1577 | } |
1578 | StringRef getName() const { |
1579 | if (Node->getNumOperands() > 2) |
1580 | if (MDString *N = dyn_cast_or_null<MDString>(Val: Node->getOperand(I: 2))) |
1581 | return N->getString(); |
1582 | return StringRef(); |
1583 | } |
1584 | }; |
1585 | |
1586 | /// Typed iterator through MDNode operands. |
1587 | /// |
1588 | /// An iterator that transforms an \a MDNode::iterator into an iterator over a |
1589 | /// particular Metadata subclass. |
1590 | template <class T> class TypedMDOperandIterator { |
1591 | MDNode::op_iterator I = nullptr; |
1592 | |
1593 | public: |
1594 | using iterator_category = std::input_iterator_tag; |
1595 | using value_type = T *; |
1596 | using difference_type = std::ptrdiff_t; |
1597 | using pointer = void; |
1598 | using reference = T *; |
1599 | |
1600 | TypedMDOperandIterator() = default; |
1601 | explicit TypedMDOperandIterator(MDNode::op_iterator I) : I(I) {} |
1602 | |
1603 | T *operator*() const { return cast_or_null<T>(*I); } |
1604 | |
1605 | TypedMDOperandIterator &operator++() { |
1606 | ++I; |
1607 | return *this; |
1608 | } |
1609 | |
1610 | TypedMDOperandIterator operator++(int) { |
1611 | TypedMDOperandIterator Temp(*this); |
1612 | ++I; |
1613 | return Temp; |
1614 | } |
1615 | |
1616 | bool operator==(const TypedMDOperandIterator &X) const { return I == X.I; } |
1617 | bool operator!=(const TypedMDOperandIterator &X) const { return I != X.I; } |
1618 | }; |
1619 | |
1620 | /// Typed, array-like tuple of metadata. |
1621 | /// |
1622 | /// This is a wrapper for \a MDTuple that makes it act like an array holding a |
1623 | /// particular type of metadata. |
1624 | template <class T> class MDTupleTypedArrayWrapper { |
1625 | const MDTuple *N = nullptr; |
1626 | |
1627 | public: |
1628 | MDTupleTypedArrayWrapper() = default; |
1629 | MDTupleTypedArrayWrapper(const MDTuple *N) : N(N) {} |
1630 | |
1631 | template <class U> |
1632 | MDTupleTypedArrayWrapper( |
1633 | const MDTupleTypedArrayWrapper<U> &Other, |
1634 | std::enable_if_t<std::is_convertible<U *, T *>::value> * = nullptr) |
1635 | : N(Other.get()) {} |
1636 | |
1637 | template <class U> |
1638 | explicit MDTupleTypedArrayWrapper( |
1639 | const MDTupleTypedArrayWrapper<U> &Other, |
1640 | std::enable_if_t<!std::is_convertible<U *, T *>::value> * = nullptr) |
1641 | : N(Other.get()) {} |
1642 | |
1643 | explicit operator bool() const { return get(); } |
1644 | explicit operator MDTuple *() const { return get(); } |
1645 | |
1646 | MDTuple *get() const { return const_cast<MDTuple *>(N); } |
1647 | MDTuple *operator->() const { return get(); } |
1648 | MDTuple &operator*() const { return *get(); } |
1649 | |
1650 | // FIXME: Fix callers and remove condition on N. |
1651 | unsigned size() const { return N ? N->getNumOperands() : 0u; } |
1652 | bool empty() const { return N ? N->getNumOperands() == 0 : true; } |
1653 | T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); } |
1654 | |
1655 | // FIXME: Fix callers and remove condition on N. |
1656 | using iterator = TypedMDOperandIterator<T>; |
1657 | |
1658 | iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); } |
1659 | iterator end() const { return N ? iterator(N->op_end()) : iterator(); } |
1660 | }; |
1661 | |
1662 | #define HANDLE_METADATA(CLASS) \ |
1663 | using CLASS##Array = MDTupleTypedArrayWrapper<CLASS>; |
1664 | #include "llvm/IR/Metadata.def" |
1665 | |
1666 | /// Placeholder metadata for operands of distinct MDNodes. |
1667 | /// |
1668 | /// This is a lightweight placeholder for an operand of a distinct node. It's |
1669 | /// purpose is to help track forward references when creating a distinct node. |
1670 | /// This allows distinct nodes involved in a cycle to be constructed before |
1671 | /// their operands without requiring a heavyweight temporary node with |
1672 | /// full-blown RAUW support. |
1673 | /// |
1674 | /// Each placeholder supports only a single MDNode user. Clients should pass |
1675 | /// an ID, retrieved via \a getID(), to indicate the "real" operand that this |
1676 | /// should be replaced with. |
1677 | /// |
1678 | /// While it would be possible to implement move operators, they would be |
1679 | /// fairly expensive. Leave them unimplemented to discourage their use |
1680 | /// (clients can use std::deque, std::list, BumpPtrAllocator, etc.). |
1681 | class DistinctMDOperandPlaceholder : public Metadata { |
1682 | friend class MetadataTracking; |
1683 | |
1684 | Metadata **Use = nullptr; |
1685 | |
1686 | public: |
1687 | explicit DistinctMDOperandPlaceholder(unsigned ID) |
1688 | : Metadata(DistinctMDOperandPlaceholderKind, Distinct) { |
1689 | SubclassData32 = ID; |
1690 | } |
1691 | |
1692 | DistinctMDOperandPlaceholder() = delete; |
1693 | DistinctMDOperandPlaceholder(DistinctMDOperandPlaceholder &&) = delete; |
1694 | DistinctMDOperandPlaceholder(const DistinctMDOperandPlaceholder &) = delete; |
1695 | |
1696 | ~DistinctMDOperandPlaceholder() { |
1697 | if (Use) |
1698 | *Use = nullptr; |
1699 | } |
1700 | |
1701 | unsigned getID() const { return SubclassData32; } |
1702 | |
1703 | /// Replace the use of this with MD. |
1704 | void replaceUseWith(Metadata *MD) { |
1705 | if (!Use) |
1706 | return; |
1707 | *Use = MD; |
1708 | |
1709 | if (*Use) |
1710 | MetadataTracking::track(MD&: *Use); |
1711 | |
1712 | Metadata *T = cast<Metadata>(Val: this); |
1713 | MetadataTracking::untrack(MD&: T); |
1714 | assert(!Use && "Use is still being tracked despite being untracked!" ); |
1715 | } |
1716 | }; |
1717 | |
1718 | //===----------------------------------------------------------------------===// |
1719 | /// A tuple of MDNodes. |
1720 | /// |
1721 | /// Despite its name, a NamedMDNode isn't itself an MDNode. |
1722 | /// |
1723 | /// NamedMDNodes are named module-level entities that contain lists of MDNodes. |
1724 | /// |
1725 | /// It is illegal for a NamedMDNode to appear as an operand of an MDNode. |
1726 | class NamedMDNode : public ilist_node<NamedMDNode> { |
1727 | friend class LLVMContextImpl; |
1728 | friend class Module; |
1729 | |
1730 | std::string Name; |
1731 | Module *Parent = nullptr; |
1732 | void *Operands; // SmallVector<TrackingMDRef, 4> |
1733 | |
1734 | void setParent(Module *M) { Parent = M; } |
1735 | |
1736 | explicit NamedMDNode(const Twine &N); |
1737 | |
1738 | template <class T1> class op_iterator_impl { |
1739 | friend class NamedMDNode; |
1740 | |
1741 | const NamedMDNode *Node = nullptr; |
1742 | unsigned Idx = 0; |
1743 | |
1744 | op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) {} |
1745 | |
1746 | public: |
1747 | using iterator_category = std::bidirectional_iterator_tag; |
1748 | using value_type = T1; |
1749 | using difference_type = std::ptrdiff_t; |
1750 | using pointer = value_type *; |
1751 | using reference = value_type; |
1752 | |
1753 | op_iterator_impl() = default; |
1754 | |
1755 | bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; } |
1756 | bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; } |
1757 | |
1758 | op_iterator_impl &operator++() { |
1759 | ++Idx; |
1760 | return *this; |
1761 | } |
1762 | |
1763 | op_iterator_impl operator++(int) { |
1764 | op_iterator_impl tmp(*this); |
1765 | operator++(); |
1766 | return tmp; |
1767 | } |
1768 | |
1769 | op_iterator_impl &operator--() { |
1770 | --Idx; |
1771 | return *this; |
1772 | } |
1773 | |
1774 | op_iterator_impl operator--(int) { |
1775 | op_iterator_impl tmp(*this); |
1776 | operator--(); |
1777 | return tmp; |
1778 | } |
1779 | |
1780 | T1 operator*() const { return Node->getOperand(i: Idx); } |
1781 | }; |
1782 | |
1783 | public: |
1784 | NamedMDNode(const NamedMDNode &) = delete; |
1785 | ~NamedMDNode(); |
1786 | |
1787 | /// Drop all references and remove the node from parent module. |
1788 | void eraseFromParent(); |
1789 | |
1790 | /// Remove all uses and clear node vector. |
1791 | void dropAllReferences() { clearOperands(); } |
1792 | /// Drop all references to this node's operands. |
1793 | void clearOperands(); |
1794 | |
1795 | /// Get the module that holds this named metadata collection. |
1796 | inline Module *getParent() { return Parent; } |
1797 | inline const Module *getParent() const { return Parent; } |
1798 | |
1799 | MDNode *getOperand(unsigned i) const; |
1800 | unsigned getNumOperands() const; |
1801 | void addOperand(MDNode *M); |
1802 | void setOperand(unsigned I, MDNode *New); |
1803 | StringRef getName() const; |
1804 | void print(raw_ostream &ROS, bool IsForDebug = false) const; |
1805 | void print(raw_ostream &ROS, ModuleSlotTracker &MST, |
1806 | bool IsForDebug = false) const; |
1807 | void dump() const; |
1808 | |
1809 | // --------------------------------------------------------------------------- |
1810 | // Operand Iterator interface... |
1811 | // |
1812 | using op_iterator = op_iterator_impl<MDNode *>; |
1813 | |
1814 | op_iterator op_begin() { return op_iterator(this, 0); } |
1815 | op_iterator op_end() { return op_iterator(this, getNumOperands()); } |
1816 | |
1817 | using const_op_iterator = op_iterator_impl<const MDNode *>; |
1818 | |
1819 | const_op_iterator op_begin() const { return const_op_iterator(this, 0); } |
1820 | const_op_iterator op_end() const { return const_op_iterator(this, getNumOperands()); } |
1821 | |
1822 | inline iterator_range<op_iterator> operands() { |
1823 | return make_range(x: op_begin(), y: op_end()); |
1824 | } |
1825 | inline iterator_range<const_op_iterator> operands() const { |
1826 | return make_range(x: op_begin(), y: op_end()); |
1827 | } |
1828 | }; |
1829 | |
1830 | // Create wrappers for C Binding types (see CBindingWrapping.h). |
1831 | DEFINE_ISA_CONVERSION_FUNCTIONS(NamedMDNode, LLVMNamedMDNodeRef) |
1832 | |
1833 | } // end namespace llvm |
1834 | |
1835 | #endif // LLVM_IR_METADATA_H |
1836 | |