1//===- SyntheticSections.cpp ---------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "SyntheticSections.h"
10#include "ConcatOutputSection.h"
11#include "Config.h"
12#include "ExportTrie.h"
13#include "InputFiles.h"
14#include "MachOStructs.h"
15#include "OutputSegment.h"
16#include "SymbolTable.h"
17#include "Symbols.h"
18
19#include "lld/Common/CommonLinkerContext.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/Config/llvm-config.h"
22#include "llvm/Support/EndianStream.h"
23#include "llvm/Support/FileSystem.h"
24#include "llvm/Support/LEB128.h"
25#include "llvm/Support/Parallel.h"
26#include "llvm/Support/Path.h"
27#include "llvm/Support/xxhash.h"
28
29#if defined(__APPLE__)
30#include <sys/mman.h>
31
32#define COMMON_DIGEST_FOR_OPENSSL
33#include <CommonCrypto/CommonDigest.h>
34#else
35#include "llvm/Support/SHA256.h"
36#endif
37
38using namespace llvm;
39using namespace llvm::MachO;
40using namespace llvm::support;
41using namespace llvm::support::endian;
42using namespace lld;
43using namespace lld::macho;
44
45// Reads `len` bytes at data and writes the 32-byte SHA256 checksum to `output`.
46static void sha256(const uint8_t *data, size_t len, uint8_t *output) {
47#if defined(__APPLE__)
48 // FIXME: Make LLVM's SHA256 faster and use it unconditionally. See PR56121
49 // for some notes on this.
50 CC_SHA256(data, len, output);
51#else
52 ArrayRef<uint8_t> block(data, len);
53 std::array<uint8_t, 32> hash = SHA256::hash(Data: block);
54 static_assert(hash.size() == CodeSignatureSection::hashSize);
55 memcpy(dest: output, src: hash.data(), n: hash.size());
56#endif
57}
58
59InStruct macho::in;
60std::vector<SyntheticSection *> macho::syntheticSections;
61
62SyntheticSection::SyntheticSection(const char *segname, const char *name)
63 : OutputSection(SyntheticKind, name) {
64 std::tie(args&: this->segname, args&: this->name) = maybeRenameSection(key: {segname, name});
65 isec = makeSyntheticInputSection(segName: segname, sectName: name);
66 isec->parent = this;
67 syntheticSections.push_back(x: this);
68}
69
70// dyld3's MachOLoaded::getSlide() assumes that the __TEXT segment starts
71// from the beginning of the file (i.e. the header).
72MachHeaderSection::MachHeaderSection()
73 : SyntheticSection(segment_names::text, section_names::header) {
74 // XXX: This is a hack. (See D97007)
75 // Setting the index to 1 to pretend that this section is the text
76 // section.
77 index = 1;
78 isec->isFinal = true;
79}
80
81void MachHeaderSection::addLoadCommand(LoadCommand *lc) {
82 loadCommands.push_back(x: lc);
83 sizeOfCmds += lc->getSize();
84}
85
86uint64_t MachHeaderSection::getSize() const {
87 uint64_t size = target->headerSize + sizeOfCmds + config->headerPad;
88 // If we are emitting an encryptable binary, our load commands must have a
89 // separate (non-encrypted) page to themselves.
90 if (config->emitEncryptionInfo)
91 size = alignToPowerOf2(Value: size, Align: target->getPageSize());
92 return size;
93}
94
95static uint32_t cpuSubtype() {
96 uint32_t subtype = target->cpuSubtype;
97
98 if (config->outputType == MH_EXECUTE && !config->staticLink &&
99 target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL &&
100 config->platform() == PLATFORM_MACOS &&
101 config->platformInfo.target.MinDeployment >= VersionTuple(10, 5))
102 subtype |= CPU_SUBTYPE_LIB64;
103
104 return subtype;
105}
106
107static bool hasWeakBinding() {
108 return config->emitChainedFixups ? in.chainedFixups->hasWeakBinding()
109 : in.weakBinding->hasEntry();
110}
111
112static bool hasNonWeakDefinition() {
113 return config->emitChainedFixups ? in.chainedFixups->hasNonWeakDefinition()
114 : in.weakBinding->hasNonWeakDefinition();
115}
116
117void MachHeaderSection::writeTo(uint8_t *buf) const {
118 auto *hdr = reinterpret_cast<mach_header *>(buf);
119 hdr->magic = target->magic;
120 hdr->cputype = target->cpuType;
121 hdr->cpusubtype = cpuSubtype();
122 hdr->filetype = config->outputType;
123 hdr->ncmds = loadCommands.size();
124 hdr->sizeofcmds = sizeOfCmds;
125 hdr->flags = MH_DYLDLINK;
126
127 if (config->namespaceKind == NamespaceKind::twolevel)
128 hdr->flags |= MH_NOUNDEFS | MH_TWOLEVEL;
129
130 if (config->outputType == MH_DYLIB && !config->hasReexports)
131 hdr->flags |= MH_NO_REEXPORTED_DYLIBS;
132
133 if (config->markDeadStrippableDylib)
134 hdr->flags |= MH_DEAD_STRIPPABLE_DYLIB;
135
136 if (config->outputType == MH_EXECUTE && config->isPic)
137 hdr->flags |= MH_PIE;
138
139 if (config->outputType == MH_DYLIB && config->applicationExtension)
140 hdr->flags |= MH_APP_EXTENSION_SAFE;
141
142 if (in.exports->hasWeakSymbol || hasNonWeakDefinition())
143 hdr->flags |= MH_WEAK_DEFINES;
144
145 if (in.exports->hasWeakSymbol || hasWeakBinding())
146 hdr->flags |= MH_BINDS_TO_WEAK;
147
148 for (const OutputSegment *seg : outputSegments) {
149 for (const OutputSection *osec : seg->getSections()) {
150 if (isThreadLocalVariables(flags: osec->flags)) {
151 hdr->flags |= MH_HAS_TLV_DESCRIPTORS;
152 break;
153 }
154 }
155 }
156
157 uint8_t *p = reinterpret_cast<uint8_t *>(hdr) + target->headerSize;
158 for (const LoadCommand *lc : loadCommands) {
159 lc->writeTo(buf: p);
160 p += lc->getSize();
161 }
162}
163
164PageZeroSection::PageZeroSection()
165 : SyntheticSection(segment_names::pageZero, section_names::pageZero) {}
166
167RebaseSection::RebaseSection()
168 : LinkEditSection(segment_names::linkEdit, section_names::rebase) {}
169
170namespace {
171struct RebaseState {
172 uint64_t sequenceLength;
173 uint64_t skipLength;
174};
175} // namespace
176
177static void emitIncrement(uint64_t incr, raw_svector_ostream &os) {
178 assert(incr != 0);
179
180 if ((incr >> target->p2WordSize) <= REBASE_IMMEDIATE_MASK &&
181 (incr % target->wordSize) == 0) {
182 os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_IMM_SCALED |
183 (incr >> target->p2WordSize));
184 } else {
185 os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_ULEB);
186 encodeULEB128(Value: incr, OS&: os);
187 }
188}
189
190static void flushRebase(const RebaseState &state, raw_svector_ostream &os) {
191 assert(state.sequenceLength > 0);
192
193 if (state.skipLength == target->wordSize) {
194 if (state.sequenceLength <= REBASE_IMMEDIATE_MASK) {
195 os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_IMM_TIMES |
196 state.sequenceLength);
197 } else {
198 os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
199 encodeULEB128(Value: state.sequenceLength, OS&: os);
200 }
201 } else if (state.sequenceLength == 1) {
202 os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB);
203 encodeULEB128(Value: state.skipLength - target->wordSize, OS&: os);
204 } else {
205 os << static_cast<uint8_t>(
206 REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB);
207 encodeULEB128(Value: state.sequenceLength, OS&: os);
208 encodeULEB128(Value: state.skipLength - target->wordSize, OS&: os);
209 }
210}
211
212// Rebases are communicated to dyld using a bytecode, whose opcodes cause the
213// memory location at a specific address to be rebased and/or the address to be
214// incremented.
215//
216// Opcode REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB is the most generic
217// one, encoding a series of evenly spaced addresses. This algorithm works by
218// splitting up the sorted list of addresses into such chunks. If the locations
219// are consecutive or the sequence consists of a single location, flushRebase
220// will use a smaller, more specialized encoding.
221static void encodeRebases(const OutputSegment *seg,
222 MutableArrayRef<Location> locations,
223 raw_svector_ostream &os) {
224 // dyld operates on segments. Translate section offsets into segment offsets.
225 for (Location &loc : locations)
226 loc.offset =
227 loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(off: loc.offset);
228 // The algorithm assumes that locations are unique.
229 Location *end =
230 llvm::unique(R&: locations, P: [](const Location &a, const Location &b) {
231 return a.offset == b.offset;
232 });
233 size_t count = end - locations.begin();
234
235 os << static_cast<uint8_t>(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
236 seg->index);
237 assert(!locations.empty());
238 uint64_t offset = locations[0].offset;
239 encodeULEB128(Value: offset, OS&: os);
240
241 RebaseState state{.sequenceLength: 1, .skipLength: target->wordSize};
242
243 for (size_t i = 1; i < count; ++i) {
244 offset = locations[i].offset;
245
246 uint64_t skip = offset - locations[i - 1].offset;
247 assert(skip != 0 && "duplicate locations should have been weeded out");
248
249 if (skip == state.skipLength) {
250 ++state.sequenceLength;
251 } else if (state.sequenceLength == 1) {
252 ++state.sequenceLength;
253 state.skipLength = skip;
254 } else if (skip < state.skipLength) {
255 // The address is lower than what the rebase pointer would be if the last
256 // location would be part of a sequence. We start a new sequence from the
257 // previous location.
258 --state.sequenceLength;
259 flushRebase(state, os);
260
261 state.sequenceLength = 2;
262 state.skipLength = skip;
263 } else {
264 // The address is at some positive offset from the rebase pointer. We
265 // start a new sequence which begins with the current location.
266 flushRebase(state, os);
267 emitIncrement(incr: skip - state.skipLength, os);
268 state.sequenceLength = 1;
269 state.skipLength = target->wordSize;
270 }
271 }
272 flushRebase(state, os);
273}
274
275void RebaseSection::finalizeContents() {
276 if (locations.empty())
277 return;
278
279 raw_svector_ostream os{contents};
280 os << static_cast<uint8_t>(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER);
281
282 llvm::sort(C&: locations, Comp: [](const Location &a, const Location &b) {
283 return a.isec->getVA(off: a.offset) < b.isec->getVA(off: b.offset);
284 });
285
286 for (size_t i = 0, count = locations.size(); i < count;) {
287 const OutputSegment *seg = locations[i].isec->parent->parent;
288 size_t j = i + 1;
289 while (j < count && locations[j].isec->parent->parent == seg)
290 ++j;
291 encodeRebases(seg, locations: {locations.data() + i, locations.data() + j}, os);
292 i = j;
293 }
294 os << static_cast<uint8_t>(REBASE_OPCODE_DONE);
295}
296
297void RebaseSection::writeTo(uint8_t *buf) const {
298 memcpy(dest: buf, src: contents.data(), n: contents.size());
299}
300
301NonLazyPointerSectionBase::NonLazyPointerSectionBase(const char *segname,
302 const char *name)
303 : SyntheticSection(segname, name) {
304 align = target->wordSize;
305}
306
307void macho::addNonLazyBindingEntries(const Symbol *sym,
308 const InputSection *isec, uint64_t offset,
309 int64_t addend) {
310 if (config->emitChainedFixups) {
311 if (needsBinding(sym))
312 in.chainedFixups->addBinding(dysym: sym, isec, offset, addend);
313 else if (isa<Defined>(Val: sym))
314 in.chainedFixups->addRebase(isec, offset);
315 else
316 llvm_unreachable("cannot bind to an undefined symbol");
317 return;
318 }
319
320 if (const auto *dysym = dyn_cast<DylibSymbol>(Val: sym)) {
321 in.binding->addEntry(dysym, isec, offset, addend);
322 if (dysym->isWeakDef())
323 in.weakBinding->addEntry(symbol: sym, isec, offset, addend);
324 } else if (const auto *defined = dyn_cast<Defined>(Val: sym)) {
325 in.rebase->addEntry(isec, offset);
326 if (defined->isExternalWeakDef())
327 in.weakBinding->addEntry(symbol: sym, isec, offset, addend);
328 else if (defined->interposable)
329 in.binding->addEntry(dysym: sym, isec, offset, addend);
330 } else {
331 // Undefined symbols are filtered out in scanRelocations(); we should never
332 // get here
333 llvm_unreachable("cannot bind to an undefined symbol");
334 }
335}
336
337void NonLazyPointerSectionBase::addEntry(Symbol *sym) {
338 if (entries.insert(X: sym)) {
339 assert(!sym->isInGot());
340 sym->gotIndex = entries.size() - 1;
341
342 addNonLazyBindingEntries(sym, isec, offset: sym->gotIndex * target->wordSize);
343 }
344}
345
346void macho::writeChainedRebase(uint8_t *buf, uint64_t targetVA) {
347 assert(config->emitChainedFixups);
348 assert(target->wordSize == 8 && "Only 64-bit platforms are supported");
349 auto *rebase = reinterpret_cast<dyld_chained_ptr_64_rebase *>(buf);
350 rebase->target = targetVA & 0xf'ffff'ffff;
351 rebase->high8 = (targetVA >> 56);
352 rebase->reserved = 0;
353 rebase->next = 0;
354 rebase->bind = 0;
355
356 // The fixup format places a 64 GiB limit on the output's size.
357 // Should we handle this gracefully?
358 uint64_t encodedVA = rebase->target | ((uint64_t)rebase->high8 << 56);
359 if (encodedVA != targetVA)
360 error(msg: "rebase target address 0x" + Twine::utohexstr(Val: targetVA) +
361 " does not fit into chained fixup. Re-link with -no_fixup_chains");
362}
363
364static void writeChainedBind(uint8_t *buf, const Symbol *sym, int64_t addend) {
365 assert(config->emitChainedFixups);
366 assert(target->wordSize == 8 && "Only 64-bit platforms are supported");
367 auto *bind = reinterpret_cast<dyld_chained_ptr_64_bind *>(buf);
368 auto [ordinal, inlineAddend] = in.chainedFixups->getBinding(sym, addend);
369 bind->ordinal = ordinal;
370 bind->addend = inlineAddend;
371 bind->reserved = 0;
372 bind->next = 0;
373 bind->bind = 1;
374}
375
376void macho::writeChainedFixup(uint8_t *buf, const Symbol *sym, int64_t addend) {
377 if (needsBinding(sym))
378 writeChainedBind(buf, sym, addend);
379 else
380 writeChainedRebase(buf, targetVA: sym->getVA() + addend);
381}
382
383void NonLazyPointerSectionBase::writeTo(uint8_t *buf) const {
384 if (config->emitChainedFixups) {
385 for (const auto &[i, entry] : llvm::enumerate(First: entries))
386 writeChainedFixup(buf: &buf[i * target->wordSize], sym: entry, addend: 0);
387 } else {
388 for (const auto &[i, entry] : llvm::enumerate(First: entries))
389 if (auto *defined = dyn_cast<Defined>(Val: entry))
390 write64le(P: &buf[i * target->wordSize], V: defined->getVA());
391 }
392}
393
394GotSection::GotSection()
395 : NonLazyPointerSectionBase(segment_names::data, section_names::got) {
396 flags = S_NON_LAZY_SYMBOL_POINTERS;
397}
398
399TlvPointerSection::TlvPointerSection()
400 : NonLazyPointerSectionBase(segment_names::data,
401 section_names::threadPtrs) {
402 flags = S_THREAD_LOCAL_VARIABLE_POINTERS;
403}
404
405BindingSection::BindingSection()
406 : LinkEditSection(segment_names::linkEdit, section_names::binding) {}
407
408namespace {
409struct Binding {
410 OutputSegment *segment = nullptr;
411 uint64_t offset = 0;
412 int64_t addend = 0;
413};
414struct BindIR {
415 // Default value of 0xF0 is not valid opcode and should make the program
416 // scream instead of accidentally writing "valid" values.
417 uint8_t opcode = 0xF0;
418 uint64_t data = 0;
419 uint64_t consecutiveCount = 0;
420};
421} // namespace
422
423// Encode a sequence of opcodes that tell dyld to write the address of symbol +
424// addend at osec->addr + outSecOff.
425//
426// The bind opcode "interpreter" remembers the values of each binding field, so
427// we only need to encode the differences between bindings. Hence the use of
428// lastBinding.
429static void encodeBinding(const OutputSection *osec, uint64_t outSecOff,
430 int64_t addend, Binding &lastBinding,
431 std::vector<BindIR> &opcodes) {
432 OutputSegment *seg = osec->parent;
433 uint64_t offset = osec->getSegmentOffset() + outSecOff;
434 if (lastBinding.segment != seg) {
435 opcodes.push_back(
436 x: {.opcode: static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
437 seg->index),
438 .data: offset});
439 lastBinding.segment = seg;
440 lastBinding.offset = offset;
441 } else if (lastBinding.offset != offset) {
442 opcodes.push_back(x: {.opcode: BIND_OPCODE_ADD_ADDR_ULEB, .data: offset - lastBinding.offset});
443 lastBinding.offset = offset;
444 }
445
446 if (lastBinding.addend != addend) {
447 opcodes.push_back(
448 x: {.opcode: BIND_OPCODE_SET_ADDEND_SLEB, .data: static_cast<uint64_t>(addend)});
449 lastBinding.addend = addend;
450 }
451
452 opcodes.push_back(x: {.opcode: BIND_OPCODE_DO_BIND, .data: 0});
453 // DO_BIND causes dyld to both perform the binding and increment the offset
454 lastBinding.offset += target->wordSize;
455}
456
457static void optimizeOpcodes(std::vector<BindIR> &opcodes) {
458 // Pass 1: Combine bind/add pairs
459 size_t i;
460 int pWrite = 0;
461 for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
462 if ((opcodes[i].opcode == BIND_OPCODE_ADD_ADDR_ULEB) &&
463 (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND)) {
464 opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB;
465 opcodes[pWrite].data = opcodes[i].data;
466 ++i;
467 } else {
468 opcodes[pWrite] = opcodes[i - 1];
469 }
470 }
471 if (i == opcodes.size())
472 opcodes[pWrite] = opcodes[i - 1];
473 opcodes.resize(new_size: pWrite + 1);
474
475 // Pass 2: Compress two or more bind_add opcodes
476 pWrite = 0;
477 for (i = 1; i < opcodes.size(); ++i, ++pWrite) {
478 if ((opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
479 (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
480 (opcodes[i].data == opcodes[i - 1].data)) {
481 opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB;
482 opcodes[pWrite].consecutiveCount = 2;
483 opcodes[pWrite].data = opcodes[i].data;
484 ++i;
485 while (i < opcodes.size() &&
486 (opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
487 (opcodes[i].data == opcodes[i - 1].data)) {
488 opcodes[pWrite].consecutiveCount++;
489 ++i;
490 }
491 } else {
492 opcodes[pWrite] = opcodes[i - 1];
493 }
494 }
495 if (i == opcodes.size())
496 opcodes[pWrite] = opcodes[i - 1];
497 opcodes.resize(new_size: pWrite + 1);
498
499 // Pass 3: Use immediate encodings
500 // Every binding is the size of one pointer. If the next binding is a
501 // multiple of wordSize away that is within BIND_IMMEDIATE_MASK, the
502 // opcode can be scaled by wordSize into a single byte and dyld will
503 // expand it to the correct address.
504 for (auto &p : opcodes) {
505 // It's unclear why the check needs to be less than BIND_IMMEDIATE_MASK,
506 // but ld64 currently does this. This could be a potential bug, but
507 // for now, perform the same behavior to prevent mysterious bugs.
508 if ((p.opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) &&
509 ((p.data / target->wordSize) < BIND_IMMEDIATE_MASK) &&
510 ((p.data % target->wordSize) == 0)) {
511 p.opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED;
512 p.data /= target->wordSize;
513 }
514 }
515}
516
517static void flushOpcodes(const BindIR &op, raw_svector_ostream &os) {
518 uint8_t opcode = op.opcode & BIND_OPCODE_MASK;
519 switch (opcode) {
520 case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB:
521 case BIND_OPCODE_ADD_ADDR_ULEB:
522 case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB:
523 os << op.opcode;
524 encodeULEB128(Value: op.data, OS&: os);
525 break;
526 case BIND_OPCODE_SET_ADDEND_SLEB:
527 os << op.opcode;
528 encodeSLEB128(Value: static_cast<int64_t>(op.data), OS&: os);
529 break;
530 case BIND_OPCODE_DO_BIND:
531 os << op.opcode;
532 break;
533 case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
534 os << op.opcode;
535 encodeULEB128(Value: op.consecutiveCount, OS&: os);
536 encodeULEB128(Value: op.data, OS&: os);
537 break;
538 case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED:
539 os << static_cast<uint8_t>(op.opcode | op.data);
540 break;
541 default:
542 llvm_unreachable("cannot bind to an unrecognized symbol");
543 }
544}
545
546// Non-weak bindings need to have their dylib ordinal encoded as well.
547static int16_t ordinalForDylibSymbol(const DylibSymbol &dysym) {
548 if (config->namespaceKind == NamespaceKind::flat || dysym.isDynamicLookup())
549 return static_cast<int16_t>(BIND_SPECIAL_DYLIB_FLAT_LOOKUP);
550 assert(dysym.getFile()->isReferenced());
551 return dysym.getFile()->ordinal;
552}
553
554static int16_t ordinalForSymbol(const Symbol &sym) {
555 if (const auto *dysym = dyn_cast<DylibSymbol>(Val: &sym))
556 return ordinalForDylibSymbol(dysym: *dysym);
557 assert(cast<Defined>(&sym)->interposable);
558 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
559}
560
561static void encodeDylibOrdinal(int16_t ordinal, raw_svector_ostream &os) {
562 if (ordinal <= 0) {
563 os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM |
564 (ordinal & BIND_IMMEDIATE_MASK));
565 } else if (ordinal <= BIND_IMMEDIATE_MASK) {
566 os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal);
567 } else {
568 os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
569 encodeULEB128(Value: ordinal, OS&: os);
570 }
571}
572
573static void encodeWeakOverride(const Defined *defined,
574 raw_svector_ostream &os) {
575 os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM |
576 BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION)
577 << defined->getName() << '\0';
578}
579
580// Organize the bindings so we can encoded them with fewer opcodes.
581//
582// First, all bindings for a given symbol should be grouped together.
583// BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM is the largest opcode (since it
584// has an associated symbol string), so we only want to emit it once per symbol.
585//
586// Within each group, we sort the bindings by address. Since bindings are
587// delta-encoded, sorting them allows for a more compact result. Note that
588// sorting by address alone ensures that bindings for the same segment / section
589// are located together, minimizing the number of times we have to emit
590// BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB.
591//
592// Finally, we sort the symbols by the address of their first binding, again
593// to facilitate the delta-encoding process.
594template <class Sym>
595std::vector<std::pair<const Sym *, std::vector<BindingEntry>>>
596sortBindings(const BindingsMap<const Sym *> &bindingsMap) {
597 std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> bindingsVec(
598 bindingsMap.begin(), bindingsMap.end());
599 for (auto &p : bindingsVec) {
600 std::vector<BindingEntry> &bindings = p.second;
601 llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) {
602 return a.target.getVA() < b.target.getVA();
603 });
604 }
605 llvm::sort(bindingsVec, [](const auto &a, const auto &b) {
606 return a.second[0].target.getVA() < b.second[0].target.getVA();
607 });
608 return bindingsVec;
609}
610
611// Emit bind opcodes, which are a stream of byte-sized opcodes that dyld
612// interprets to update a record with the following fields:
613// * segment index (of the segment to write the symbol addresses to, typically
614// the __DATA_CONST segment which contains the GOT)
615// * offset within the segment, indicating the next location to write a binding
616// * symbol type
617// * symbol library ordinal (the index of its library's LC_LOAD_DYLIB command)
618// * symbol name
619// * addend
620// When dyld sees BIND_OPCODE_DO_BIND, it uses the current record state to bind
621// a symbol in the GOT, and increments the segment offset to point to the next
622// entry. It does *not* clear the record state after doing the bind, so
623// subsequent opcodes only need to encode the differences between bindings.
624void BindingSection::finalizeContents() {
625 raw_svector_ostream os{contents};
626 Binding lastBinding;
627 int16_t lastOrdinal = 0;
628
629 for (auto &p : sortBindings(bindingsMap)) {
630 const Symbol *sym = p.first;
631 std::vector<BindingEntry> &bindings = p.second;
632 uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
633 if (sym->isWeakRef())
634 flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
635 os << flags << sym->getName() << '\0'
636 << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
637 int16_t ordinal = ordinalForSymbol(sym: *sym);
638 if (ordinal != lastOrdinal) {
639 encodeDylibOrdinal(ordinal, os);
640 lastOrdinal = ordinal;
641 }
642 std::vector<BindIR> opcodes;
643 for (const BindingEntry &b : bindings)
644 encodeBinding(osec: b.target.isec->parent,
645 outSecOff: b.target.isec->getOffset(off: b.target.offset), addend: b.addend,
646 lastBinding, opcodes);
647 if (config->optimize > 1)
648 optimizeOpcodes(opcodes);
649 for (const auto &op : opcodes)
650 flushOpcodes(op, os);
651 }
652 if (!bindingsMap.empty())
653 os << static_cast<uint8_t>(BIND_OPCODE_DONE);
654}
655
656void BindingSection::writeTo(uint8_t *buf) const {
657 memcpy(dest: buf, src: contents.data(), n: contents.size());
658}
659
660WeakBindingSection::WeakBindingSection()
661 : LinkEditSection(segment_names::linkEdit, section_names::weakBinding) {}
662
663void WeakBindingSection::finalizeContents() {
664 raw_svector_ostream os{contents};
665 Binding lastBinding;
666
667 for (const Defined *defined : definitions)
668 encodeWeakOverride(defined, os);
669
670 for (auto &p : sortBindings(bindingsMap)) {
671 const Symbol *sym = p.first;
672 std::vector<BindingEntry> &bindings = p.second;
673 os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
674 << sym->getName() << '\0'
675 << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER);
676 std::vector<BindIR> opcodes;
677 for (const BindingEntry &b : bindings)
678 encodeBinding(osec: b.target.isec->parent,
679 outSecOff: b.target.isec->getOffset(off: b.target.offset), addend: b.addend,
680 lastBinding, opcodes);
681 if (config->optimize > 1)
682 optimizeOpcodes(opcodes);
683 for (const auto &op : opcodes)
684 flushOpcodes(op, os);
685 }
686 if (!bindingsMap.empty() || !definitions.empty())
687 os << static_cast<uint8_t>(BIND_OPCODE_DONE);
688}
689
690void WeakBindingSection::writeTo(uint8_t *buf) const {
691 memcpy(dest: buf, src: contents.data(), n: contents.size());
692}
693
694StubsSection::StubsSection()
695 : SyntheticSection(segment_names::text, section_names::stubs) {
696 flags = S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
697 // The stubs section comprises machine instructions, which are aligned to
698 // 4 bytes on the archs we care about.
699 align = 4;
700 reserved2 = target->stubSize;
701}
702
703uint64_t StubsSection::getSize() const {
704 return entries.size() * target->stubSize;
705}
706
707void StubsSection::writeTo(uint8_t *buf) const {
708 size_t off = 0;
709 for (const Symbol *sym : entries) {
710 uint64_t pointerVA =
711 config->emitChainedFixups ? sym->getGotVA() : sym->getLazyPtrVA();
712 target->writeStub(buf: buf + off, *sym, pointerVA);
713 off += target->stubSize;
714 }
715}
716
717void StubsSection::finalize() { isFinal = true; }
718
719static void addBindingsForStub(Symbol *sym) {
720 assert(!config->emitChainedFixups);
721 if (auto *dysym = dyn_cast<DylibSymbol>(Val: sym)) {
722 if (sym->isWeakDef()) {
723 in.binding->addEntry(dysym, isec: in.lazyPointers->isec,
724 offset: sym->stubsIndex * target->wordSize);
725 in.weakBinding->addEntry(symbol: sym, isec: in.lazyPointers->isec,
726 offset: sym->stubsIndex * target->wordSize);
727 } else {
728 in.lazyBinding->addEntry(dysym);
729 }
730 } else if (auto *defined = dyn_cast<Defined>(Val: sym)) {
731 if (defined->isExternalWeakDef()) {
732 in.rebase->addEntry(isec: in.lazyPointers->isec,
733 offset: sym->stubsIndex * target->wordSize);
734 in.weakBinding->addEntry(symbol: sym, isec: in.lazyPointers->isec,
735 offset: sym->stubsIndex * target->wordSize);
736 } else if (defined->interposable) {
737 in.lazyBinding->addEntry(dysym: sym);
738 } else {
739 llvm_unreachable("invalid stub target");
740 }
741 } else {
742 llvm_unreachable("invalid stub target symbol type");
743 }
744}
745
746void StubsSection::addEntry(Symbol *sym) {
747 bool inserted = entries.insert(X: sym);
748 if (inserted) {
749 sym->stubsIndex = entries.size() - 1;
750
751 if (config->emitChainedFixups)
752 in.got->addEntry(sym);
753 else
754 addBindingsForStub(sym);
755 }
756}
757
758StubHelperSection::StubHelperSection()
759 : SyntheticSection(segment_names::text, section_names::stubHelper) {
760 flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
761 align = 4; // This section comprises machine instructions
762}
763
764uint64_t StubHelperSection::getSize() const {
765 return target->stubHelperHeaderSize +
766 in.lazyBinding->getEntries().size() * target->stubHelperEntrySize;
767}
768
769bool StubHelperSection::isNeeded() const { return in.lazyBinding->isNeeded(); }
770
771void StubHelperSection::writeTo(uint8_t *buf) const {
772 target->writeStubHelperHeader(buf);
773 size_t off = target->stubHelperHeaderSize;
774 for (const Symbol *sym : in.lazyBinding->getEntries()) {
775 target->writeStubHelperEntry(buf: buf + off, *sym, entryAddr: addr + off);
776 off += target->stubHelperEntrySize;
777 }
778}
779
780void StubHelperSection::setUp() {
781 Symbol *binder = symtab->addUndefined(name: "dyld_stub_binder", /*file=*/nullptr,
782 /*isWeakRef=*/false);
783 if (auto *undefined = dyn_cast<Undefined>(Val: binder))
784 treatUndefinedSymbol(*undefined,
785 source: "lazy binding (normally in libSystem.dylib)");
786
787 // treatUndefinedSymbol() can replace binder with a DylibSymbol; re-check.
788 stubBinder = dyn_cast_or_null<DylibSymbol>(Val: binder);
789 if (stubBinder == nullptr)
790 return;
791
792 in.got->addEntry(sym: stubBinder);
793
794 in.imageLoaderCache->parent =
795 ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache);
796 inputSections.push_back(x: in.imageLoaderCache);
797 // Since this isn't in the symbol table or in any input file, the noDeadStrip
798 // argument doesn't matter.
799 dyldPrivate =
800 make<Defined>(args: "__dyld_private", args: nullptr, args&: in.imageLoaderCache, args: 0, args: 0,
801 /*isWeakDef=*/args: false,
802 /*isExternal=*/args: false, /*isPrivateExtern=*/args: false,
803 /*includeInSymtab=*/args: true,
804 /*isReferencedDynamically=*/args: false,
805 /*noDeadStrip=*/args: false);
806 dyldPrivate->used = true;
807}
808
809ObjCStubsSection::ObjCStubsSection()
810 : SyntheticSection(segment_names::text, section_names::objcStubs) {
811 flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS;
812 align = config->objcStubsMode == ObjCStubsMode::fast
813 ? target->objcStubsFastAlignment
814 : target->objcStubsSmallAlignment;
815}
816
817bool ObjCStubsSection::isObjCStubSymbol(Symbol *sym) {
818 return sym->getName().starts_with(Prefix: symbolPrefix);
819}
820
821StringRef ObjCStubsSection::getMethname(Symbol *sym) {
822 assert(isObjCStubSymbol(sym) && "not an objc stub");
823 auto name = sym->getName();
824 StringRef methname = name.drop_front(N: symbolPrefix.size());
825 return methname;
826}
827
828void ObjCStubsSection::initialize() {
829 // Do not fold selrefs without ICF.
830 if (config->icfLevel == ICFLevel::none)
831 return;
832
833 // Search methnames already referenced in __objc_selrefs
834 // Map the name to the corresponding selref entry
835 // which we will reuse when creating objc stubs.
836 for (ConcatInputSection *isec : inputSections) {
837 if (isec->shouldOmitFromOutput())
838 continue;
839 if (isec->getName() != section_names::objcSelrefs)
840 continue;
841 // We expect a single relocation per selref entry to __objc_methname that
842 // might be aggregated.
843 assert(isec->relocs.size() == 1);
844 auto Reloc = isec->relocs[0];
845 if (const auto *sym = Reloc.referent.dyn_cast<Symbol *>()) {
846 if (const auto *d = dyn_cast<Defined>(Val: sym)) {
847 auto *cisec = cast<CStringInputSection>(Val: d->isec);
848 auto methname = cisec->getStringRefAtOffset(off: d->value);
849 methnameToSelref[CachedHashStringRef(methname)] = isec;
850 }
851 }
852 }
853}
854
855void ObjCStubsSection::addEntry(Symbol *sym) {
856 StringRef methname = getMethname(sym);
857 // We create a selref entry for each unique methname.
858 if (!methnameToSelref.count(Val: CachedHashStringRef(methname))) {
859 auto methnameOffset =
860 in.objcMethnameSection->getStringOffset(str: methname).outSecOff;
861
862 size_t wordSize = target->wordSize;
863 uint8_t *selrefData = bAlloc().Allocate<uint8_t>(Num: wordSize);
864 write64le(P: selrefData, V: methnameOffset);
865 auto *objcSelref = makeSyntheticInputSection(
866 segName: segment_names::data, sectName: section_names::objcSelrefs,
867 flags: S_LITERAL_POINTERS | S_ATTR_NO_DEAD_STRIP,
868 data: ArrayRef<uint8_t>{selrefData, wordSize},
869 /*align=*/wordSize);
870 objcSelref->live = true;
871 objcSelref->relocs.push_back(
872 x: {/*type=*/target->unsignedRelocType,
873 /*pcrel=*/false, /*length=*/3,
874 /*offset=*/0,
875 /*addend=*/static_cast<int64_t>(methnameOffset),
876 /*referent=*/in.objcMethnameSection->isec});
877 objcSelref->parent = ConcatOutputSection::getOrCreateForInput(objcSelref);
878 inputSections.push_back(x: objcSelref);
879 objcSelref->isFinal = true;
880 methnameToSelref[CachedHashStringRef(methname)] = objcSelref;
881 }
882
883 auto stubSize = config->objcStubsMode == ObjCStubsMode::fast
884 ? target->objcStubsFastSize
885 : target->objcStubsSmallSize;
886 Defined *newSym = replaceSymbol<Defined>(
887 s: sym, arg: sym->getName(), arg: nullptr, arg&: isec,
888 /*value=*/arg: symbols.size() * stubSize,
889 /*size=*/arg&: stubSize,
890 /*isWeakDef=*/arg: false, /*isExternal=*/arg: true, /*isPrivateExtern=*/arg: true,
891 /*includeInSymtab=*/arg: true, /*isReferencedDynamically=*/arg: false,
892 /*noDeadStrip=*/arg: false);
893 symbols.push_back(x: newSym);
894}
895
896void ObjCStubsSection::setUp() {
897 objcMsgSend = symtab->addUndefined(name: "_objc_msgSend", /*file=*/nullptr,
898 /*isWeakRef=*/false);
899 if (auto *undefined = dyn_cast<Undefined>(Val: objcMsgSend))
900 treatUndefinedSymbol(*undefined,
901 source: "lazy binding (normally in libobjc.dylib)");
902 objcMsgSend->used = true;
903 if (config->objcStubsMode == ObjCStubsMode::fast) {
904 in.got->addEntry(sym: objcMsgSend);
905 assert(objcMsgSend->isInGot());
906 } else {
907 assert(config->objcStubsMode == ObjCStubsMode::small);
908 // In line with ld64's behavior, when objc_msgSend is a direct symbol,
909 // we directly reference it.
910 // In other cases, typically when binding in libobjc.dylib,
911 // we generate a stub to invoke objc_msgSend.
912 if (!isa<Defined>(Val: objcMsgSend))
913 in.stubs->addEntry(sym: objcMsgSend);
914 }
915}
916
917uint64_t ObjCStubsSection::getSize() const {
918 auto stubSize = config->objcStubsMode == ObjCStubsMode::fast
919 ? target->objcStubsFastSize
920 : target->objcStubsSmallSize;
921 return stubSize * symbols.size();
922}
923
924void ObjCStubsSection::writeTo(uint8_t *buf) const {
925 uint64_t stubOffset = 0;
926 for (size_t i = 0, n = symbols.size(); i < n; ++i) {
927 Defined *sym = symbols[i];
928
929 auto methname = getMethname(sym);
930 auto j = methnameToSelref.find(Val: CachedHashStringRef(methname));
931 assert(j != methnameToSelref.end());
932 auto selrefAddr = j->second->getVA(off: 0);
933 target->writeObjCMsgSendStub(buf: buf + stubOffset, sym, stubsAddr: in.objcStubs->addr,
934 stubOffset, selrefVA: selrefAddr, objcMsgSend);
935 }
936}
937
938LazyPointerSection::LazyPointerSection()
939 : SyntheticSection(segment_names::data, section_names::lazySymbolPtr) {
940 align = target->wordSize;
941 flags = S_LAZY_SYMBOL_POINTERS;
942}
943
944uint64_t LazyPointerSection::getSize() const {
945 return in.stubs->getEntries().size() * target->wordSize;
946}
947
948bool LazyPointerSection::isNeeded() const {
949 return !in.stubs->getEntries().empty();
950}
951
952void LazyPointerSection::writeTo(uint8_t *buf) const {
953 size_t off = 0;
954 for (const Symbol *sym : in.stubs->getEntries()) {
955 if (const auto *dysym = dyn_cast<DylibSymbol>(Val: sym)) {
956 if (dysym->hasStubsHelper()) {
957 uint64_t stubHelperOffset =
958 target->stubHelperHeaderSize +
959 dysym->stubsHelperIndex * target->stubHelperEntrySize;
960 write64le(P: buf + off, V: in.stubHelper->addr + stubHelperOffset);
961 }
962 } else {
963 write64le(P: buf + off, V: sym->getVA());
964 }
965 off += target->wordSize;
966 }
967}
968
969LazyBindingSection::LazyBindingSection()
970 : LinkEditSection(segment_names::linkEdit, section_names::lazyBinding) {}
971
972void LazyBindingSection::finalizeContents() {
973 // TODO: Just precompute output size here instead of writing to a temporary
974 // buffer
975 for (Symbol *sym : entries)
976 sym->lazyBindOffset = encode(*sym);
977}
978
979void LazyBindingSection::writeTo(uint8_t *buf) const {
980 memcpy(dest: buf, src: contents.data(), n: contents.size());
981}
982
983void LazyBindingSection::addEntry(Symbol *sym) {
984 assert(!config->emitChainedFixups && "Chained fixups always bind eagerly");
985 if (entries.insert(X: sym)) {
986 sym->stubsHelperIndex = entries.size() - 1;
987 in.rebase->addEntry(isec: in.lazyPointers->isec,
988 offset: sym->stubsIndex * target->wordSize);
989 }
990}
991
992// Unlike the non-lazy binding section, the bind opcodes in this section aren't
993// interpreted all at once. Rather, dyld will start interpreting opcodes at a
994// given offset, typically only binding a single symbol before it finds a
995// BIND_OPCODE_DONE terminator. As such, unlike in the non-lazy-binding case,
996// we cannot encode just the differences between symbols; we have to emit the
997// complete bind information for each symbol.
998uint32_t LazyBindingSection::encode(const Symbol &sym) {
999 uint32_t opstreamOffset = contents.size();
1000 OutputSegment *dataSeg = in.lazyPointers->parent;
1001 os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB |
1002 dataSeg->index);
1003 uint64_t offset =
1004 in.lazyPointers->addr - dataSeg->addr + sym.stubsIndex * target->wordSize;
1005 encodeULEB128(Value: offset, OS&: os);
1006 encodeDylibOrdinal(ordinal: ordinalForSymbol(sym), os);
1007
1008 uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM;
1009 if (sym.isWeakRef())
1010 flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT;
1011
1012 os << flags << sym.getName() << '\0'
1013 << static_cast<uint8_t>(BIND_OPCODE_DO_BIND)
1014 << static_cast<uint8_t>(BIND_OPCODE_DONE);
1015 return opstreamOffset;
1016}
1017
1018ExportSection::ExportSection()
1019 : LinkEditSection(segment_names::linkEdit, section_names::export_) {}
1020
1021void ExportSection::finalizeContents() {
1022 trieBuilder.setImageBase(in.header->addr);
1023 for (const Symbol *sym : symtab->getSymbols()) {
1024 if (const auto *defined = dyn_cast<Defined>(Val: sym)) {
1025 if (defined->privateExtern || !defined->isLive())
1026 continue;
1027 trieBuilder.addSymbol(sym: *defined);
1028 hasWeakSymbol = hasWeakSymbol || sym->isWeakDef();
1029 } else if (auto *dysym = dyn_cast<DylibSymbol>(Val: sym)) {
1030 if (dysym->shouldReexport)
1031 trieBuilder.addSymbol(sym: *dysym);
1032 }
1033 }
1034 size = trieBuilder.build();
1035}
1036
1037void ExportSection::writeTo(uint8_t *buf) const { trieBuilder.writeTo(buf); }
1038
1039DataInCodeSection::DataInCodeSection()
1040 : LinkEditSection(segment_names::linkEdit, section_names::dataInCode) {}
1041
1042template <class LP>
1043static std::vector<MachO::data_in_code_entry> collectDataInCodeEntries() {
1044 std::vector<MachO::data_in_code_entry> dataInCodeEntries;
1045 for (const InputFile *inputFile : inputFiles) {
1046 if (!isa<ObjFile>(Val: inputFile))
1047 continue;
1048 const ObjFile *objFile = cast<ObjFile>(Val: inputFile);
1049 ArrayRef<MachO::data_in_code_entry> entries = objFile->getDataInCode();
1050 if (entries.empty())
1051 continue;
1052
1053 assert(is_sorted(entries, [](const data_in_code_entry &lhs,
1054 const data_in_code_entry &rhs) {
1055 return lhs.offset < rhs.offset;
1056 }));
1057 // For each code subsection find 'data in code' entries residing in it.
1058 // Compute the new offset values as
1059 // <offset within subsection> + <subsection address> - <__TEXT address>.
1060 for (const Section *section : objFile->sections) {
1061 for (const Subsection &subsec : section->subsections) {
1062 const InputSection *isec = subsec.isec;
1063 if (!isCodeSection(isec))
1064 continue;
1065 if (cast<ConcatInputSection>(Val: isec)->shouldOmitFromOutput())
1066 continue;
1067 const uint64_t beginAddr = section->addr + subsec.offset;
1068 auto it = llvm::lower_bound(
1069 entries, beginAddr,
1070 [](const MachO::data_in_code_entry &entry, uint64_t addr) {
1071 return entry.offset < addr;
1072 });
1073 const uint64_t endAddr = beginAddr + isec->getSize();
1074 for (const auto end = entries.end();
1075 it != end && it->offset + it->length <= endAddr; ++it)
1076 dataInCodeEntries.push_back(
1077 {static_cast<uint32_t>(isec->getVA(off: it->offset - beginAddr) -
1078 in.header->addr),
1079 it->length, it->kind});
1080 }
1081 }
1082 }
1083
1084 // ld64 emits the table in sorted order too.
1085 llvm::sort(dataInCodeEntries,
1086 [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) {
1087 return lhs.offset < rhs.offset;
1088 });
1089 return dataInCodeEntries;
1090}
1091
1092void DataInCodeSection::finalizeContents() {
1093 entries = target->wordSize == 8 ? collectDataInCodeEntries<LP64>()
1094 : collectDataInCodeEntries<ILP32>();
1095}
1096
1097void DataInCodeSection::writeTo(uint8_t *buf) const {
1098 if (!entries.empty())
1099 memcpy(dest: buf, src: entries.data(), n: getRawSize());
1100}
1101
1102FunctionStartsSection::FunctionStartsSection()
1103 : LinkEditSection(segment_names::linkEdit, section_names::functionStarts) {}
1104
1105void FunctionStartsSection::finalizeContents() {
1106 raw_svector_ostream os{contents};
1107 std::vector<uint64_t> addrs;
1108 for (const InputFile *file : inputFiles) {
1109 if (auto *objFile = dyn_cast<ObjFile>(Val: file)) {
1110 for (const Symbol *sym : objFile->symbols) {
1111 if (const auto *defined = dyn_cast_or_null<Defined>(Val: sym)) {
1112 if (!defined->isec || !isCodeSection(defined->isec) ||
1113 !defined->isLive())
1114 continue;
1115 addrs.push_back(x: defined->getVA());
1116 }
1117 }
1118 }
1119 }
1120 llvm::sort(C&: addrs);
1121 uint64_t addr = in.header->addr;
1122 for (uint64_t nextAddr : addrs) {
1123 uint64_t delta = nextAddr - addr;
1124 if (delta == 0)
1125 continue;
1126 encodeULEB128(Value: delta, OS&: os);
1127 addr = nextAddr;
1128 }
1129 os << '\0';
1130}
1131
1132void FunctionStartsSection::writeTo(uint8_t *buf) const {
1133 memcpy(dest: buf, src: contents.data(), n: contents.size());
1134}
1135
1136SymtabSection::SymtabSection(StringTableSection &stringTableSection)
1137 : LinkEditSection(segment_names::linkEdit, section_names::symbolTable),
1138 stringTableSection(stringTableSection) {}
1139
1140void SymtabSection::emitBeginSourceStab(StringRef sourceFile) {
1141 StabsEntry stab(N_SO);
1142 stab.strx = stringTableSection.addString(saver().save(S: sourceFile));
1143 stabs.emplace_back(args: std::move(stab));
1144}
1145
1146void SymtabSection::emitEndSourceStab() {
1147 StabsEntry stab(N_SO);
1148 stab.sect = 1;
1149 stabs.emplace_back(args: std::move(stab));
1150}
1151
1152void SymtabSection::emitObjectFileStab(ObjFile *file) {
1153 StabsEntry stab(N_OSO);
1154 stab.sect = target->cpuSubtype;
1155 SmallString<261> path(!file->archiveName.empty() ? file->archiveName
1156 : file->getName());
1157 std::error_code ec = sys::fs::make_absolute(path);
1158 if (ec)
1159 fatal(msg: "failed to get absolute path for " + path);
1160
1161 if (!file->archiveName.empty())
1162 path.append(Refs: {"(", file->getName(), ")"});
1163
1164 StringRef adjustedPath = saver().save(S: path.str());
1165 adjustedPath.consume_front(Prefix: config->osoPrefix);
1166
1167 stab.strx = stringTableSection.addString(adjustedPath);
1168 stab.desc = 1;
1169 stab.value = file->modTime;
1170 stabs.emplace_back(args: std::move(stab));
1171}
1172
1173void SymtabSection::emitEndFunStab(Defined *defined) {
1174 StabsEntry stab(N_FUN);
1175 stab.value = defined->size;
1176 stabs.emplace_back(args: std::move(stab));
1177}
1178
1179void SymtabSection::emitStabs() {
1180 if (config->omitDebugInfo)
1181 return;
1182
1183 for (const std::string &s : config->astPaths) {
1184 StabsEntry astStab(N_AST);
1185 astStab.strx = stringTableSection.addString(s);
1186 stabs.emplace_back(args: std::move(astStab));
1187 }
1188
1189 // Cache the file ID for each symbol in an std::pair for faster sorting.
1190 using SortingPair = std::pair<Defined *, int>;
1191 std::vector<SortingPair> symbolsNeedingStabs;
1192 for (const SymtabEntry &entry :
1193 concat<SymtabEntry>(Ranges&: localSymbols, Ranges&: externalSymbols)) {
1194 Symbol *sym = entry.sym;
1195 assert(sym->isLive() &&
1196 "dead symbols should not be in localSymbols, externalSymbols");
1197 if (auto *defined = dyn_cast<Defined>(Val: sym)) {
1198 // Excluded symbols should have been filtered out in finalizeContents().
1199 assert(defined->includeInSymtab);
1200
1201 if (defined->isAbsolute())
1202 continue;
1203
1204 // Constant-folded symbols go in the executable's symbol table, but don't
1205 // get a stabs entry.
1206 if (defined->wasIdenticalCodeFolded)
1207 continue;
1208
1209 ObjFile *file = defined->getObjectFile();
1210 if (!file || !file->compileUnit)
1211 continue;
1212
1213 symbolsNeedingStabs.emplace_back(args&: defined, args: defined->isec->getFile()->id);
1214 }
1215 }
1216
1217 llvm::stable_sort(Range&: symbolsNeedingStabs,
1218 C: [&](const SortingPair &a, const SortingPair &b) {
1219 return a.second < b.second;
1220 });
1221
1222 // Emit STABS symbols so that dsymutil and/or the debugger can map address
1223 // regions in the final binary to the source and object files from which they
1224 // originated.
1225 InputFile *lastFile = nullptr;
1226 for (SortingPair &pair : symbolsNeedingStabs) {
1227 Defined *defined = pair.first;
1228 InputSection *isec = defined->isec;
1229 ObjFile *file = cast<ObjFile>(Val: isec->getFile());
1230
1231 if (lastFile == nullptr || lastFile != file) {
1232 if (lastFile != nullptr)
1233 emitEndSourceStab();
1234 lastFile = file;
1235
1236 emitBeginSourceStab(sourceFile: file->sourceFile());
1237 emitObjectFileStab(file);
1238 }
1239
1240 StabsEntry symStab;
1241 symStab.sect = defined->isec->parent->index;
1242 symStab.strx = stringTableSection.addString(defined->getName());
1243 symStab.value = defined->getVA();
1244
1245 if (isCodeSection(isec)) {
1246 symStab.type = N_FUN;
1247 stabs.emplace_back(args: std::move(symStab));
1248 emitEndFunStab(defined);
1249 } else {
1250 symStab.type = defined->isExternal() ? N_GSYM : N_STSYM;
1251 stabs.emplace_back(args: std::move(symStab));
1252 }
1253 }
1254
1255 if (!stabs.empty())
1256 emitEndSourceStab();
1257}
1258
1259void SymtabSection::finalizeContents() {
1260 auto addSymbol = [&](std::vector<SymtabEntry> &symbols, Symbol *sym) {
1261 uint32_t strx = stringTableSection.addString(sym->getName());
1262 symbols.push_back(x: {.sym: sym, .strx: strx});
1263 };
1264
1265 std::function<void(Symbol *)> localSymbolsHandler;
1266 switch (config->localSymbolsPresence) {
1267 case SymtabPresence::All:
1268 localSymbolsHandler = [&](Symbol *sym) { addSymbol(localSymbols, sym); };
1269 break;
1270 case SymtabPresence::None:
1271 localSymbolsHandler = [&](Symbol *) { /* Do nothing*/ };
1272 break;
1273 case SymtabPresence::SelectivelyIncluded:
1274 localSymbolsHandler = [&](Symbol *sym) {
1275 if (config->localSymbolPatterns.match(symbolName: sym->getName()))
1276 addSymbol(localSymbols, sym);
1277 };
1278 break;
1279 case SymtabPresence::SelectivelyExcluded:
1280 localSymbolsHandler = [&](Symbol *sym) {
1281 if (!config->localSymbolPatterns.match(symbolName: sym->getName()))
1282 addSymbol(localSymbols, sym);
1283 };
1284 break;
1285 }
1286
1287 // Local symbols aren't in the SymbolTable, so we walk the list of object
1288 // files to gather them.
1289 // But if `-x` is set, then we don't need to. localSymbolsHandler() will do
1290 // the right thing regardless, but this check is a perf optimization because
1291 // iterating through all the input files and their symbols is expensive.
1292 if (config->localSymbolsPresence != SymtabPresence::None) {
1293 for (const InputFile *file : inputFiles) {
1294 if (auto *objFile = dyn_cast<ObjFile>(Val: file)) {
1295 for (Symbol *sym : objFile->symbols) {
1296 if (auto *defined = dyn_cast_or_null<Defined>(Val: sym)) {
1297 if (defined->isExternal() || !defined->isLive() ||
1298 !defined->includeInSymtab)
1299 continue;
1300 localSymbolsHandler(sym);
1301 }
1302 }
1303 }
1304 }
1305 }
1306
1307 // __dyld_private is a local symbol too. It's linker-created and doesn't
1308 // exist in any object file.
1309 if (in.stubHelper && in.stubHelper->dyldPrivate)
1310 localSymbolsHandler(in.stubHelper->dyldPrivate);
1311
1312 for (Symbol *sym : symtab->getSymbols()) {
1313 if (!sym->isLive())
1314 continue;
1315 if (auto *defined = dyn_cast<Defined>(Val: sym)) {
1316 if (!defined->includeInSymtab)
1317 continue;
1318 assert(defined->isExternal());
1319 if (defined->privateExtern)
1320 localSymbolsHandler(defined);
1321 else
1322 addSymbol(externalSymbols, defined);
1323 } else if (auto *dysym = dyn_cast<DylibSymbol>(Val: sym)) {
1324 if (dysym->isReferenced())
1325 addSymbol(undefinedSymbols, sym);
1326 }
1327 }
1328
1329 emitStabs();
1330 uint32_t symtabIndex = stabs.size();
1331 for (const SymtabEntry &entry :
1332 concat<SymtabEntry>(Ranges&: localSymbols, Ranges&: externalSymbols, Ranges&: undefinedSymbols)) {
1333 entry.sym->symtabIndex = symtabIndex++;
1334 }
1335}
1336
1337uint32_t SymtabSection::getNumSymbols() const {
1338 return stabs.size() + localSymbols.size() + externalSymbols.size() +
1339 undefinedSymbols.size();
1340}
1341
1342// This serves to hide (type-erase) the template parameter from SymtabSection.
1343template <class LP> class SymtabSectionImpl final : public SymtabSection {
1344public:
1345 SymtabSectionImpl(StringTableSection &stringTableSection)
1346 : SymtabSection(stringTableSection) {}
1347 uint64_t getRawSize() const override;
1348 void writeTo(uint8_t *buf) const override;
1349};
1350
1351template <class LP> uint64_t SymtabSectionImpl<LP>::getRawSize() const {
1352 return getNumSymbols() * sizeof(typename LP::nlist);
1353}
1354
1355template <class LP> void SymtabSectionImpl<LP>::writeTo(uint8_t *buf) const {
1356 auto *nList = reinterpret_cast<typename LP::nlist *>(buf);
1357 // Emit the stabs entries before the "real" symbols. We cannot emit them
1358 // after as that would render Symbol::symtabIndex inaccurate.
1359 for (const StabsEntry &entry : stabs) {
1360 nList->n_strx = entry.strx;
1361 nList->n_type = entry.type;
1362 nList->n_sect = entry.sect;
1363 nList->n_desc = entry.desc;
1364 nList->n_value = entry.value;
1365 ++nList;
1366 }
1367
1368 for (const SymtabEntry &entry : concat<const SymtabEntry>(
1369 localSymbols, externalSymbols, undefinedSymbols)) {
1370 nList->n_strx = entry.strx;
1371 // TODO populate n_desc with more flags
1372 if (auto *defined = dyn_cast<Defined>(Val: entry.sym)) {
1373 uint8_t scope = 0;
1374 if (defined->privateExtern) {
1375 // Private external -- dylib scoped symbol.
1376 // Promote to non-external at link time.
1377 scope = N_PEXT;
1378 } else if (defined->isExternal()) {
1379 // Normal global symbol.
1380 scope = N_EXT;
1381 } else {
1382 // TU-local symbol from localSymbols.
1383 scope = 0;
1384 }
1385
1386 if (defined->isAbsolute()) {
1387 nList->n_type = scope | N_ABS;
1388 nList->n_sect = NO_SECT;
1389 nList->n_value = defined->value;
1390 } else {
1391 nList->n_type = scope | N_SECT;
1392 nList->n_sect = defined->isec->parent->index;
1393 // For the N_SECT symbol type, n_value is the address of the symbol
1394 nList->n_value = defined->getVA();
1395 }
1396 nList->n_desc |= defined->isExternalWeakDef() ? N_WEAK_DEF : 0;
1397 nList->n_desc |=
1398 defined->referencedDynamically ? REFERENCED_DYNAMICALLY : 0;
1399 } else if (auto *dysym = dyn_cast<DylibSymbol>(Val: entry.sym)) {
1400 uint16_t n_desc = nList->n_desc;
1401 int16_t ordinal = ordinalForDylibSymbol(dysym: *dysym);
1402 if (ordinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP)
1403 SET_LIBRARY_ORDINAL(n_desc, ordinal: DYNAMIC_LOOKUP_ORDINAL);
1404 else if (ordinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE)
1405 SET_LIBRARY_ORDINAL(n_desc, ordinal: EXECUTABLE_ORDINAL);
1406 else {
1407 assert(ordinal > 0);
1408 SET_LIBRARY_ORDINAL(n_desc, ordinal: static_cast<uint8_t>(ordinal));
1409 }
1410
1411 nList->n_type = N_EXT;
1412 n_desc |= dysym->isWeakDef() ? N_WEAK_DEF : 0;
1413 n_desc |= dysym->isWeakRef() ? N_WEAK_REF : 0;
1414 nList->n_desc = n_desc;
1415 }
1416 ++nList;
1417 }
1418}
1419
1420template <class LP>
1421SymtabSection *
1422macho::makeSymtabSection(StringTableSection &stringTableSection) {
1423 return make<SymtabSectionImpl<LP>>(stringTableSection);
1424}
1425
1426IndirectSymtabSection::IndirectSymtabSection()
1427 : LinkEditSection(segment_names::linkEdit,
1428 section_names::indirectSymbolTable) {}
1429
1430uint32_t IndirectSymtabSection::getNumSymbols() const {
1431 uint32_t size = in.got->getEntries().size() +
1432 in.tlvPointers->getEntries().size() +
1433 in.stubs->getEntries().size();
1434 if (!config->emitChainedFixups)
1435 size += in.stubs->getEntries().size();
1436 return size;
1437}
1438
1439bool IndirectSymtabSection::isNeeded() const {
1440 return in.got->isNeeded() || in.tlvPointers->isNeeded() ||
1441 in.stubs->isNeeded();
1442}
1443
1444void IndirectSymtabSection::finalizeContents() {
1445 uint32_t off = 0;
1446 in.got->reserved1 = off;
1447 off += in.got->getEntries().size();
1448 in.tlvPointers->reserved1 = off;
1449 off += in.tlvPointers->getEntries().size();
1450 in.stubs->reserved1 = off;
1451 if (in.lazyPointers) {
1452 off += in.stubs->getEntries().size();
1453 in.lazyPointers->reserved1 = off;
1454 }
1455}
1456
1457static uint32_t indirectValue(const Symbol *sym) {
1458 if (sym->symtabIndex == UINT32_MAX)
1459 return INDIRECT_SYMBOL_LOCAL;
1460 if (auto *defined = dyn_cast<Defined>(Val: sym))
1461 if (defined->privateExtern)
1462 return INDIRECT_SYMBOL_LOCAL;
1463 return sym->symtabIndex;
1464}
1465
1466void IndirectSymtabSection::writeTo(uint8_t *buf) const {
1467 uint32_t off = 0;
1468 for (const Symbol *sym : in.got->getEntries()) {
1469 write32le(P: buf + off * sizeof(uint32_t), V: indirectValue(sym));
1470 ++off;
1471 }
1472 for (const Symbol *sym : in.tlvPointers->getEntries()) {
1473 write32le(P: buf + off * sizeof(uint32_t), V: indirectValue(sym));
1474 ++off;
1475 }
1476 for (const Symbol *sym : in.stubs->getEntries()) {
1477 write32le(P: buf + off * sizeof(uint32_t), V: indirectValue(sym));
1478 ++off;
1479 }
1480
1481 if (in.lazyPointers) {
1482 // There is a 1:1 correspondence between stubs and LazyPointerSection
1483 // entries. But giving __stubs and __la_symbol_ptr the same reserved1
1484 // (the offset into the indirect symbol table) so that they both refer
1485 // to the same range of offsets confuses `strip`, so write the stubs
1486 // symbol table offsets a second time.
1487 for (const Symbol *sym : in.stubs->getEntries()) {
1488 write32le(P: buf + off * sizeof(uint32_t), V: indirectValue(sym));
1489 ++off;
1490 }
1491 }
1492}
1493
1494StringTableSection::StringTableSection()
1495 : LinkEditSection(segment_names::linkEdit, section_names::stringTable) {}
1496
1497uint32_t StringTableSection::addString(StringRef str) {
1498 uint32_t strx = size;
1499 strings.push_back(x: str); // TODO: consider deduplicating strings
1500 size += str.size() + 1; // account for null terminator
1501 return strx;
1502}
1503
1504void StringTableSection::writeTo(uint8_t *buf) const {
1505 uint32_t off = 0;
1506 for (StringRef str : strings) {
1507 memcpy(dest: buf + off, src: str.data(), n: str.size());
1508 off += str.size() + 1; // account for null terminator
1509 }
1510}
1511
1512static_assert((CodeSignatureSection::blobHeadersSize % 8) == 0);
1513static_assert((CodeSignatureSection::fixedHeadersSize % 8) == 0);
1514
1515CodeSignatureSection::CodeSignatureSection()
1516 : LinkEditSection(segment_names::linkEdit, section_names::codeSignature) {
1517 align = 16; // required by libstuff
1518
1519 // XXX: This mimics LD64, where it uses the install-name as codesign
1520 // identifier, if available.
1521 if (!config->installName.empty())
1522 fileName = config->installName;
1523 else
1524 // FIXME: Consider using finalOutput instead of outputFile.
1525 fileName = config->outputFile;
1526
1527 size_t slashIndex = fileName.rfind(Str: "/");
1528 if (slashIndex != std::string::npos)
1529 fileName = fileName.drop_front(N: slashIndex + 1);
1530
1531 // NOTE: Any changes to these calculations should be repeated
1532 // in llvm-objcopy's MachOLayoutBuilder::layoutTail.
1533 allHeadersSize = alignTo<16>(Value: fixedHeadersSize + fileName.size() + 1);
1534 fileNamePad = allHeadersSize - fixedHeadersSize - fileName.size();
1535}
1536
1537uint32_t CodeSignatureSection::getBlockCount() const {
1538 return (fileOff + blockSize - 1) / blockSize;
1539}
1540
1541uint64_t CodeSignatureSection::getRawSize() const {
1542 return allHeadersSize + getBlockCount() * hashSize;
1543}
1544
1545void CodeSignatureSection::writeHashes(uint8_t *buf) const {
1546 // NOTE: Changes to this functionality should be repeated in llvm-objcopy's
1547 // MachOWriter::writeSignatureData.
1548 uint8_t *hashes = buf + fileOff + allHeadersSize;
1549 parallelFor(Begin: 0, End: getBlockCount(), Fn: [&](size_t i) {
1550 sha256(data: buf + i * blockSize,
1551 len: std::min(a: static_cast<size_t>(fileOff - i * blockSize), b: blockSize),
1552 output: hashes + i * hashSize);
1553 });
1554#if defined(__APPLE__)
1555 // This is macOS-specific work-around and makes no sense for any
1556 // other host OS. See https://openradar.appspot.com/FB8914231
1557 //
1558 // The macOS kernel maintains a signature-verification cache to
1559 // quickly validate applications at time of execve(2). The trouble
1560 // is that for the kernel creates the cache entry at the time of the
1561 // mmap(2) call, before we have a chance to write either the code to
1562 // sign or the signature header+hashes. The fix is to invalidate
1563 // all cached data associated with the output file, thus discarding
1564 // the bogus prematurely-cached signature.
1565 msync(buf, fileOff + getSize(), MS_INVALIDATE);
1566#endif
1567}
1568
1569void CodeSignatureSection::writeTo(uint8_t *buf) const {
1570 // NOTE: Changes to this functionality should be repeated in llvm-objcopy's
1571 // MachOWriter::writeSignatureData.
1572 uint32_t signatureSize = static_cast<uint32_t>(getSize());
1573 auto *superBlob = reinterpret_cast<CS_SuperBlob *>(buf);
1574 write32be(P: &superBlob->magic, V: CSMAGIC_EMBEDDED_SIGNATURE);
1575 write32be(P: &superBlob->length, V: signatureSize);
1576 write32be(P: &superBlob->count, V: 1);
1577 auto *blobIndex = reinterpret_cast<CS_BlobIndex *>(&superBlob[1]);
1578 write32be(P: &blobIndex->type, V: CSSLOT_CODEDIRECTORY);
1579 write32be(P: &blobIndex->offset, V: blobHeadersSize);
1580 auto *codeDirectory =
1581 reinterpret_cast<CS_CodeDirectory *>(buf + blobHeadersSize);
1582 write32be(P: &codeDirectory->magic, V: CSMAGIC_CODEDIRECTORY);
1583 write32be(P: &codeDirectory->length, V: signatureSize - blobHeadersSize);
1584 write32be(P: &codeDirectory->version, V: CS_SUPPORTSEXECSEG);
1585 write32be(P: &codeDirectory->flags, V: CS_ADHOC | CS_LINKER_SIGNED);
1586 write32be(P: &codeDirectory->hashOffset,
1587 V: sizeof(CS_CodeDirectory) + fileName.size() + fileNamePad);
1588 write32be(P: &codeDirectory->identOffset, V: sizeof(CS_CodeDirectory));
1589 codeDirectory->nSpecialSlots = 0;
1590 write32be(P: &codeDirectory->nCodeSlots, V: getBlockCount());
1591 write32be(P: &codeDirectory->codeLimit, V: fileOff);
1592 codeDirectory->hashSize = static_cast<uint8_t>(hashSize);
1593 codeDirectory->hashType = kSecCodeSignatureHashSHA256;
1594 codeDirectory->platform = 0;
1595 codeDirectory->pageSize = blockSizeShift;
1596 codeDirectory->spare2 = 0;
1597 codeDirectory->scatterOffset = 0;
1598 codeDirectory->teamOffset = 0;
1599 codeDirectory->spare3 = 0;
1600 codeDirectory->codeLimit64 = 0;
1601 OutputSegment *textSeg = getOrCreateOutputSegment(name: segment_names::text);
1602 write64be(P: &codeDirectory->execSegBase, V: textSeg->fileOff);
1603 write64be(P: &codeDirectory->execSegLimit, V: textSeg->fileSize);
1604 write64be(P: &codeDirectory->execSegFlags,
1605 V: config->outputType == MH_EXECUTE ? CS_EXECSEG_MAIN_BINARY : 0);
1606 auto *id = reinterpret_cast<char *>(&codeDirectory[1]);
1607 memcpy(dest: id, src: fileName.begin(), n: fileName.size());
1608 memset(s: id + fileName.size(), c: 0, n: fileNamePad);
1609}
1610
1611CStringSection::CStringSection(const char *name)
1612 : SyntheticSection(segment_names::text, name) {
1613 flags = S_CSTRING_LITERALS;
1614}
1615
1616void CStringSection::addInput(CStringInputSection *isec) {
1617 isec->parent = this;
1618 inputs.push_back(x: isec);
1619 if (isec->align > align)
1620 align = isec->align;
1621}
1622
1623void CStringSection::writeTo(uint8_t *buf) const {
1624 for (const CStringInputSection *isec : inputs) {
1625 for (const auto &[i, piece] : llvm::enumerate(First: isec->pieces)) {
1626 if (!piece.live)
1627 continue;
1628 StringRef string = isec->getStringRef(i);
1629 memcpy(dest: buf + piece.outSecOff, src: string.data(), n: string.size());
1630 }
1631 }
1632}
1633
1634void CStringSection::finalizeContents() {
1635 uint64_t offset = 0;
1636 for (CStringInputSection *isec : inputs) {
1637 for (const auto &[i, piece] : llvm::enumerate(First&: isec->pieces)) {
1638 if (!piece.live)
1639 continue;
1640 // See comment above DeduplicatedCStringSection for how alignment is
1641 // handled.
1642 uint32_t pieceAlign = 1
1643 << llvm::countr_zero(Val: isec->align | piece.inSecOff);
1644 offset = alignToPowerOf2(Value: offset, Align: pieceAlign);
1645 piece.outSecOff = offset;
1646 isec->isFinal = true;
1647 StringRef string = isec->getStringRef(i);
1648 offset += string.size() + 1; // account for null terminator
1649 }
1650 }
1651 size = offset;
1652}
1653
1654// Mergeable cstring literals are found under the __TEXT,__cstring section. In
1655// contrast to ELF, which puts strings that need different alignments into
1656// different sections, clang's Mach-O backend puts them all in one section.
1657// Strings that need to be aligned have the .p2align directive emitted before
1658// them, which simply translates into zero padding in the object file. In other
1659// words, we have to infer the desired alignment of these cstrings from their
1660// addresses.
1661//
1662// We differ slightly from ld64 in how we've chosen to align these cstrings.
1663// Both LLD and ld64 preserve the number of trailing zeros in each cstring's
1664// address in the input object files. When deduplicating identical cstrings,
1665// both linkers pick the cstring whose address has more trailing zeros, and
1666// preserve the alignment of that address in the final binary. However, ld64
1667// goes a step further and also preserves the offset of the cstring from the
1668// last section-aligned address. I.e. if a cstring is at offset 18 in the
1669// input, with a section alignment of 16, then both LLD and ld64 will ensure the
1670// final address is 2-byte aligned (since 18 == 16 + 2). But ld64 will also
1671// ensure that the final address is of the form 16 * k + 2 for some k.
1672//
1673// Note that ld64's heuristic means that a dedup'ed cstring's final address is
1674// dependent on the order of the input object files. E.g. if in addition to the
1675// cstring at offset 18 above, we have a duplicate one in another file with a
1676// `.cstring` section alignment of 2 and an offset of zero, then ld64 will pick
1677// the cstring from the object file earlier on the command line (since both have
1678// the same number of trailing zeros in their address). So the final cstring may
1679// either be at some address `16 * k + 2` or at some address `2 * k`.
1680//
1681// I've opted not to follow this behavior primarily for implementation
1682// simplicity, and secondarily to save a few more bytes. It's not clear to me
1683// that preserving the section alignment + offset is ever necessary, and there
1684// are many cases that are clearly redundant. In particular, if an x86_64 object
1685// file contains some strings that are accessed via SIMD instructions, then the
1686// .cstring section in the object file will be 16-byte-aligned (since SIMD
1687// requires its operand addresses to be 16-byte aligned). However, there will
1688// typically also be other cstrings in the same file that aren't used via SIMD
1689// and don't need this alignment. They will be emitted at some arbitrary address
1690// `A`, but ld64 will treat them as being 16-byte aligned with an offset of `16
1691// % A`.
1692void DeduplicatedCStringSection::finalizeContents() {
1693 // Find the largest alignment required for each string.
1694 for (const CStringInputSection *isec : inputs) {
1695 for (const auto &[i, piece] : llvm::enumerate(First: isec->pieces)) {
1696 if (!piece.live)
1697 continue;
1698 auto s = isec->getCachedHashStringRef(i);
1699 assert(isec->align != 0);
1700 uint8_t trailingZeros = llvm::countr_zero(Val: isec->align | piece.inSecOff);
1701 auto it = stringOffsetMap.insert(
1702 KV: std::make_pair(x&: s, y: StringOffset(trailingZeros)));
1703 if (!it.second && it.first->second.trailingZeros < trailingZeros)
1704 it.first->second.trailingZeros = trailingZeros;
1705 }
1706 }
1707
1708 // Assign an offset for each string and save it to the corresponding
1709 // StringPieces for easy access.
1710 for (CStringInputSection *isec : inputs) {
1711 for (const auto &[i, piece] : llvm::enumerate(First&: isec->pieces)) {
1712 if (!piece.live)
1713 continue;
1714 auto s = isec->getCachedHashStringRef(i);
1715 auto it = stringOffsetMap.find(Val: s);
1716 assert(it != stringOffsetMap.end());
1717 StringOffset &offsetInfo = it->second;
1718 if (offsetInfo.outSecOff == UINT64_MAX) {
1719 offsetInfo.outSecOff =
1720 alignToPowerOf2(Value: size, Align: 1ULL << offsetInfo.trailingZeros);
1721 size =
1722 offsetInfo.outSecOff + s.size() + 1; // account for null terminator
1723 }
1724 piece.outSecOff = offsetInfo.outSecOff;
1725 }
1726 isec->isFinal = true;
1727 }
1728}
1729
1730void DeduplicatedCStringSection::writeTo(uint8_t *buf) const {
1731 for (const auto &p : stringOffsetMap) {
1732 StringRef data = p.first.val();
1733 uint64_t off = p.second.outSecOff;
1734 if (!data.empty())
1735 memcpy(dest: buf + off, src: data.data(), n: data.size());
1736 }
1737}
1738
1739DeduplicatedCStringSection::StringOffset
1740DeduplicatedCStringSection::getStringOffset(StringRef str) const {
1741 // StringPiece uses 31 bits to store the hashes, so we replicate that
1742 uint32_t hash = xxh3_64bits(data: str) & 0x7fffffff;
1743 auto offset = stringOffsetMap.find(Val: CachedHashStringRef(str, hash));
1744 assert(offset != stringOffsetMap.end() &&
1745 "Looked-up strings should always exist in section");
1746 return offset->second;
1747}
1748
1749// This section is actually emitted as __TEXT,__const by ld64, but clang may
1750// emit input sections of that name, and LLD doesn't currently support mixing
1751// synthetic and concat-type OutputSections. To work around this, I've given
1752// our merged-literals section a different name.
1753WordLiteralSection::WordLiteralSection()
1754 : SyntheticSection(segment_names::text, section_names::literals) {
1755 align = 16;
1756}
1757
1758void WordLiteralSection::addInput(WordLiteralInputSection *isec) {
1759 isec->parent = this;
1760 inputs.push_back(x: isec);
1761}
1762
1763void WordLiteralSection::finalizeContents() {
1764 for (WordLiteralInputSection *isec : inputs) {
1765 // We do all processing of the InputSection here, so it will be effectively
1766 // finalized.
1767 isec->isFinal = true;
1768 const uint8_t *buf = isec->data.data();
1769 switch (sectionType(flags: isec->getFlags())) {
1770 case S_4BYTE_LITERALS: {
1771 for (size_t off = 0, e = isec->data.size(); off < e; off += 4) {
1772 if (!isec->isLive(off))
1773 continue;
1774 uint32_t value = *reinterpret_cast<const uint32_t *>(buf + off);
1775 literal4Map.emplace(args&: value, args: literal4Map.size());
1776 }
1777 break;
1778 }
1779 case S_8BYTE_LITERALS: {
1780 for (size_t off = 0, e = isec->data.size(); off < e; off += 8) {
1781 if (!isec->isLive(off))
1782 continue;
1783 uint64_t value = *reinterpret_cast<const uint64_t *>(buf + off);
1784 literal8Map.emplace(args&: value, args: literal8Map.size());
1785 }
1786 break;
1787 }
1788 case S_16BYTE_LITERALS: {
1789 for (size_t off = 0, e = isec->data.size(); off < e; off += 16) {
1790 if (!isec->isLive(off))
1791 continue;
1792 UInt128 value = *reinterpret_cast<const UInt128 *>(buf + off);
1793 literal16Map.emplace(args&: value, args: literal16Map.size());
1794 }
1795 break;
1796 }
1797 default:
1798 llvm_unreachable("invalid literal section type");
1799 }
1800 }
1801}
1802
1803void WordLiteralSection::writeTo(uint8_t *buf) const {
1804 // Note that we don't attempt to do any endianness conversion in addInput(),
1805 // so we don't do it here either -- just write out the original value,
1806 // byte-for-byte.
1807 for (const auto &p : literal16Map)
1808 memcpy(dest: buf + p.second * 16, src: &p.first, n: 16);
1809 buf += literal16Map.size() * 16;
1810
1811 for (const auto &p : literal8Map)
1812 memcpy(dest: buf + p.second * 8, src: &p.first, n: 8);
1813 buf += literal8Map.size() * 8;
1814
1815 for (const auto &p : literal4Map)
1816 memcpy(dest: buf + p.second * 4, src: &p.first, n: 4);
1817}
1818
1819ObjCImageInfoSection::ObjCImageInfoSection()
1820 : SyntheticSection(segment_names::data, section_names::objCImageInfo) {}
1821
1822ObjCImageInfoSection::ImageInfo
1823ObjCImageInfoSection::parseImageInfo(const InputFile *file) {
1824 ImageInfo info;
1825 ArrayRef<uint8_t> data = file->objCImageInfo;
1826 // The image info struct has the following layout:
1827 // struct {
1828 // uint32_t version;
1829 // uint32_t flags;
1830 // };
1831 if (data.size() < 8) {
1832 warn(msg: toString(file) + ": invalid __objc_imageinfo size");
1833 return info;
1834 }
1835
1836 auto *buf = reinterpret_cast<const uint32_t *>(data.data());
1837 if (read32le(P: buf) != 0) {
1838 warn(msg: toString(file) + ": invalid __objc_imageinfo version");
1839 return info;
1840 }
1841
1842 uint32_t flags = read32le(P: buf + 1);
1843 info.swiftVersion = (flags >> 8) & 0xff;
1844 info.hasCategoryClassProperties = flags & 0x40;
1845 return info;
1846}
1847
1848static std::string swiftVersionString(uint8_t version) {
1849 switch (version) {
1850 case 1:
1851 return "1.0";
1852 case 2:
1853 return "1.1";
1854 case 3:
1855 return "2.0";
1856 case 4:
1857 return "3.0";
1858 case 5:
1859 return "4.0";
1860 default:
1861 return ("0x" + Twine::utohexstr(Val: version)).str();
1862 }
1863}
1864
1865// Validate each object file's __objc_imageinfo and use them to generate the
1866// image info for the output binary. Only two pieces of info are relevant:
1867// 1. The Swift version (should be identical across inputs)
1868// 2. `bool hasCategoryClassProperties` (true only if true for all inputs)
1869void ObjCImageInfoSection::finalizeContents() {
1870 assert(files.size() != 0); // should have already been checked via isNeeded()
1871
1872 info.hasCategoryClassProperties = true;
1873 const InputFile *firstFile;
1874 for (const InputFile *file : files) {
1875 ImageInfo inputInfo = parseImageInfo(file);
1876 info.hasCategoryClassProperties &= inputInfo.hasCategoryClassProperties;
1877
1878 // swiftVersion 0 means no Swift is present, so no version checking required
1879 if (inputInfo.swiftVersion == 0)
1880 continue;
1881
1882 if (info.swiftVersion != 0 && info.swiftVersion != inputInfo.swiftVersion) {
1883 error(msg: "Swift version mismatch: " + toString(file: firstFile) + " has version " +
1884 swiftVersionString(version: info.swiftVersion) + " but " + toString(file) +
1885 " has version " + swiftVersionString(version: inputInfo.swiftVersion));
1886 } else {
1887 info.swiftVersion = inputInfo.swiftVersion;
1888 firstFile = file;
1889 }
1890 }
1891}
1892
1893void ObjCImageInfoSection::writeTo(uint8_t *buf) const {
1894 uint32_t flags = info.hasCategoryClassProperties ? 0x40 : 0x0;
1895 flags |= info.swiftVersion << 8;
1896 write32le(P: buf + 4, V: flags);
1897}
1898
1899InitOffsetsSection::InitOffsetsSection()
1900 : SyntheticSection(segment_names::text, section_names::initOffsets) {
1901 flags = S_INIT_FUNC_OFFSETS;
1902 align = 4; // This section contains 32-bit integers.
1903}
1904
1905uint64_t InitOffsetsSection::getSize() const {
1906 size_t count = 0;
1907 for (const ConcatInputSection *isec : sections)
1908 count += isec->relocs.size();
1909 return count * sizeof(uint32_t);
1910}
1911
1912void InitOffsetsSection::writeTo(uint8_t *buf) const {
1913 // FIXME: Add function specified by -init when that argument is implemented.
1914 for (ConcatInputSection *isec : sections) {
1915 for (const Reloc &rel : isec->relocs) {
1916 const Symbol *referent = rel.referent.dyn_cast<Symbol *>();
1917 assert(referent && "section relocation should have been rejected");
1918 uint64_t offset = referent->getVA() - in.header->addr;
1919 // FIXME: Can we handle this gracefully?
1920 if (offset > UINT32_MAX)
1921 fatal(msg: isec->getLocation(off: rel.offset) + ": offset to initializer " +
1922 referent->getName() + " (" + utohexstr(X: offset) +
1923 ") does not fit in 32 bits");
1924
1925 // Entries need to be added in the order they appear in the section, but
1926 // relocations aren't guaranteed to be sorted.
1927 size_t index = rel.offset >> target->p2WordSize;
1928 write32le(P: &buf[index * sizeof(uint32_t)], V: offset);
1929 }
1930 buf += isec->relocs.size() * sizeof(uint32_t);
1931 }
1932}
1933
1934// The inputs are __mod_init_func sections, which contain pointers to
1935// initializer functions, therefore all relocations should be of the UNSIGNED
1936// type. InitOffsetsSection stores offsets, so if the initializer's address is
1937// not known at link time, stub-indirection has to be used.
1938void InitOffsetsSection::setUp() {
1939 for (const ConcatInputSection *isec : sections) {
1940 for (const Reloc &rel : isec->relocs) {
1941 RelocAttrs attrs = target->getRelocAttrs(type: rel.type);
1942 if (!attrs.hasAttr(b: RelocAttrBits::UNSIGNED))
1943 error(msg: isec->getLocation(off: rel.offset) +
1944 ": unsupported relocation type: " + attrs.name);
1945 if (rel.addend != 0)
1946 error(msg: isec->getLocation(off: rel.offset) +
1947 ": relocation addend is not representable in __init_offsets");
1948 if (rel.referent.is<InputSection *>())
1949 error(msg: isec->getLocation(off: rel.offset) +
1950 ": unexpected section relocation");
1951
1952 Symbol *sym = rel.referent.dyn_cast<Symbol *>();
1953 if (auto *undefined = dyn_cast<Undefined>(Val: sym))
1954 treatUndefinedSymbol(*undefined, isec, offset: rel.offset);
1955 if (needsBinding(sym))
1956 in.stubs->addEntry(sym);
1957 }
1958 }
1959}
1960
1961void macho::createSyntheticSymbols() {
1962 auto addHeaderSymbol = [](const char *name) {
1963 symtab->addSynthetic(name, in.header->isec, /*value=*/0,
1964 /*isPrivateExtern=*/true, /*includeInSymtab=*/false,
1965 /*referencedDynamically=*/false);
1966 };
1967
1968 switch (config->outputType) {
1969 // FIXME: Assign the right address value for these symbols
1970 // (rather than 0). But we need to do that after assignAddresses().
1971 case MH_EXECUTE:
1972 // If linking PIE, __mh_execute_header is a defined symbol in
1973 // __TEXT, __text)
1974 // Otherwise, it's an absolute symbol.
1975 if (config->isPic)
1976 symtab->addSynthetic(name: "__mh_execute_header", in.header->isec, /*value=*/0,
1977 /*isPrivateExtern=*/false, /*includeInSymtab=*/true,
1978 /*referencedDynamically=*/true);
1979 else
1980 symtab->addSynthetic(name: "__mh_execute_header", /*isec=*/nullptr, /*value=*/0,
1981 /*isPrivateExtern=*/false, /*includeInSymtab=*/true,
1982 /*referencedDynamically=*/true);
1983 break;
1984
1985 // The following symbols are N_SECT symbols, even though the header is not
1986 // part of any section and that they are private to the bundle/dylib/object
1987 // they are part of.
1988 case MH_BUNDLE:
1989 addHeaderSymbol("__mh_bundle_header");
1990 break;
1991 case MH_DYLIB:
1992 addHeaderSymbol("__mh_dylib_header");
1993 break;
1994 case MH_DYLINKER:
1995 addHeaderSymbol("__mh_dylinker_header");
1996 break;
1997 case MH_OBJECT:
1998 addHeaderSymbol("__mh_object_header");
1999 break;
2000 default:
2001 llvm_unreachable("unexpected outputType");
2002 break;
2003 }
2004
2005 // The Itanium C++ ABI requires dylibs to pass a pointer to __cxa_atexit
2006 // which does e.g. cleanup of static global variables. The ABI document
2007 // says that the pointer can point to any address in one of the dylib's
2008 // segments, but in practice ld64 seems to set it to point to the header,
2009 // so that's what's implemented here.
2010 addHeaderSymbol("___dso_handle");
2011}
2012
2013ChainedFixupsSection::ChainedFixupsSection()
2014 : LinkEditSection(segment_names::linkEdit, section_names::chainFixups) {}
2015
2016bool ChainedFixupsSection::isNeeded() const {
2017 assert(config->emitChainedFixups);
2018 // dyld always expects LC_DYLD_CHAINED_FIXUPS to point to a valid
2019 // dyld_chained_fixups_header, so we create this section even if there aren't
2020 // any fixups.
2021 return true;
2022}
2023
2024static bool needsWeakBind(const Symbol &sym) {
2025 if (auto *dysym = dyn_cast<DylibSymbol>(Val: &sym))
2026 return dysym->isWeakDef();
2027 if (auto *defined = dyn_cast<Defined>(Val: &sym))
2028 return defined->isExternalWeakDef();
2029 return false;
2030}
2031
2032void ChainedFixupsSection::addBinding(const Symbol *sym,
2033 const InputSection *isec, uint64_t offset,
2034 int64_t addend) {
2035 locations.emplace_back(args&: isec, args&: offset);
2036 int64_t outlineAddend = (addend < 0 || addend > 0xFF) ? addend : 0;
2037 auto [it, inserted] = bindings.insert(
2038 KV: {{sym, outlineAddend}, static_cast<uint32_t>(bindings.size())});
2039
2040 if (inserted) {
2041 symtabSize += sym->getName().size() + 1;
2042 hasWeakBind = hasWeakBind || needsWeakBind(sym: *sym);
2043 if (!isInt<23>(x: outlineAddend))
2044 needsLargeAddend = true;
2045 else if (outlineAddend != 0)
2046 needsAddend = true;
2047 }
2048}
2049
2050std::pair<uint32_t, uint8_t>
2051ChainedFixupsSection::getBinding(const Symbol *sym, int64_t addend) const {
2052 int64_t outlineAddend = (addend < 0 || addend > 0xFF) ? addend : 0;
2053 auto it = bindings.find(Key: {sym, outlineAddend});
2054 assert(it != bindings.end() && "binding not found in the imports table");
2055 if (outlineAddend == 0)
2056 return {it->second, addend};
2057 return {it->second, 0};
2058}
2059
2060static size_t writeImport(uint8_t *buf, int format, uint32_t libOrdinal,
2061 bool weakRef, uint32_t nameOffset, int64_t addend) {
2062 switch (format) {
2063 case DYLD_CHAINED_IMPORT: {
2064 auto *import = reinterpret_cast<dyld_chained_import *>(buf);
2065 import->lib_ordinal = libOrdinal;
2066 import->weak_import = weakRef;
2067 import->name_offset = nameOffset;
2068 return sizeof(dyld_chained_import);
2069 }
2070 case DYLD_CHAINED_IMPORT_ADDEND: {
2071 auto *import = reinterpret_cast<dyld_chained_import_addend *>(buf);
2072 import->lib_ordinal = libOrdinal;
2073 import->weak_import = weakRef;
2074 import->name_offset = nameOffset;
2075 import->addend = addend;
2076 return sizeof(dyld_chained_import_addend);
2077 }
2078 case DYLD_CHAINED_IMPORT_ADDEND64: {
2079 auto *import = reinterpret_cast<dyld_chained_import_addend64 *>(buf);
2080 import->lib_ordinal = libOrdinal;
2081 import->weak_import = weakRef;
2082 import->name_offset = nameOffset;
2083 import->addend = addend;
2084 return sizeof(dyld_chained_import_addend64);
2085 }
2086 default:
2087 llvm_unreachable("Unknown import format");
2088 }
2089}
2090
2091size_t ChainedFixupsSection::SegmentInfo::getSize() const {
2092 assert(pageStarts.size() > 0 && "SegmentInfo for segment with no fixups?");
2093 return alignTo<8>(Value: sizeof(dyld_chained_starts_in_segment) +
2094 pageStarts.back().first * sizeof(uint16_t));
2095}
2096
2097size_t ChainedFixupsSection::SegmentInfo::writeTo(uint8_t *buf) const {
2098 auto *segInfo = reinterpret_cast<dyld_chained_starts_in_segment *>(buf);
2099 segInfo->size = getSize();
2100 segInfo->page_size = target->getPageSize();
2101 // FIXME: Use DYLD_CHAINED_PTR_64_OFFSET on newer OS versions.
2102 segInfo->pointer_format = DYLD_CHAINED_PTR_64;
2103 segInfo->segment_offset = oseg->addr - in.header->addr;
2104 segInfo->max_valid_pointer = 0; // not used on 64-bit
2105 segInfo->page_count = pageStarts.back().first + 1;
2106
2107 uint16_t *starts = segInfo->page_start;
2108 for (size_t i = 0; i < segInfo->page_count; ++i)
2109 starts[i] = DYLD_CHAINED_PTR_START_NONE;
2110
2111 for (auto [pageIdx, startAddr] : pageStarts)
2112 starts[pageIdx] = startAddr;
2113 return segInfo->size;
2114}
2115
2116static size_t importEntrySize(int format) {
2117 switch (format) {
2118 case DYLD_CHAINED_IMPORT:
2119 return sizeof(dyld_chained_import);
2120 case DYLD_CHAINED_IMPORT_ADDEND:
2121 return sizeof(dyld_chained_import_addend);
2122 case DYLD_CHAINED_IMPORT_ADDEND64:
2123 return sizeof(dyld_chained_import_addend64);
2124 default:
2125 llvm_unreachable("Unknown import format");
2126 }
2127}
2128
2129// This is step 3 of the algorithm described in the class comment of
2130// ChainedFixupsSection.
2131//
2132// LC_DYLD_CHAINED_FIXUPS data consists of (in this order):
2133// * A dyld_chained_fixups_header
2134// * A dyld_chained_starts_in_image
2135// * One dyld_chained_starts_in_segment per segment
2136// * List of all imports (dyld_chained_import, dyld_chained_import_addend, or
2137// dyld_chained_import_addend64)
2138// * Names of imported symbols
2139void ChainedFixupsSection::writeTo(uint8_t *buf) const {
2140 auto *header = reinterpret_cast<dyld_chained_fixups_header *>(buf);
2141 header->fixups_version = 0;
2142 header->imports_count = bindings.size();
2143 header->imports_format = importFormat;
2144 header->symbols_format = 0;
2145
2146 buf += alignTo<8>(Value: sizeof(*header));
2147
2148 auto curOffset = [&buf, &header]() -> uint32_t {
2149 return buf - reinterpret_cast<uint8_t *>(header);
2150 };
2151
2152 header->starts_offset = curOffset();
2153
2154 auto *imageInfo = reinterpret_cast<dyld_chained_starts_in_image *>(buf);
2155 imageInfo->seg_count = outputSegments.size();
2156 uint32_t *segStarts = imageInfo->seg_info_offset;
2157
2158 // dyld_chained_starts_in_image ends in a flexible array member containing an
2159 // uint32_t for each segment. Leave room for it, and fill it via segStarts.
2160 buf += alignTo<8>(offsetof(dyld_chained_starts_in_image, seg_info_offset) +
2161 outputSegments.size() * sizeof(uint32_t));
2162
2163 // Initialize all offsets to 0, which indicates that the segment does not have
2164 // fixups. Those that do have them will be filled in below.
2165 for (size_t i = 0; i < outputSegments.size(); ++i)
2166 segStarts[i] = 0;
2167
2168 for (const SegmentInfo &seg : fixupSegments) {
2169 segStarts[seg.oseg->index] = curOffset() - header->starts_offset;
2170 buf += seg.writeTo(buf);
2171 }
2172
2173 // Write imports table.
2174 header->imports_offset = curOffset();
2175 uint64_t nameOffset = 0;
2176 for (auto [import, idx] : bindings) {
2177 const Symbol &sym = *import.first;
2178 int16_t libOrdinal = needsWeakBind(sym)
2179 ? (int64_t)BIND_SPECIAL_DYLIB_WEAK_LOOKUP
2180 : ordinalForSymbol(sym);
2181 buf += writeImport(buf, format: importFormat, libOrdinal, weakRef: sym.isWeakRef(),
2182 nameOffset, addend: import.second);
2183 nameOffset += sym.getName().size() + 1;
2184 }
2185
2186 // Write imported symbol names.
2187 header->symbols_offset = curOffset();
2188 for (auto [import, idx] : bindings) {
2189 StringRef name = import.first->getName();
2190 memcpy(dest: buf, src: name.data(), n: name.size());
2191 buf += name.size() + 1; // account for null terminator
2192 }
2193
2194 assert(curOffset() == getRawSize());
2195}
2196
2197// This is step 2 of the algorithm described in the class comment of
2198// ChainedFixupsSection.
2199void ChainedFixupsSection::finalizeContents() {
2200 assert(target->wordSize == 8 && "Only 64-bit platforms are supported");
2201 assert(config->emitChainedFixups);
2202
2203 if (!isUInt<32>(x: symtabSize))
2204 error(msg: "cannot encode chained fixups: imported symbols table size " +
2205 Twine(symtabSize) + " exceeds 4 GiB");
2206
2207 if (needsLargeAddend || !isUInt<23>(x: symtabSize))
2208 importFormat = DYLD_CHAINED_IMPORT_ADDEND64;
2209 else if (needsAddend)
2210 importFormat = DYLD_CHAINED_IMPORT_ADDEND;
2211 else
2212 importFormat = DYLD_CHAINED_IMPORT;
2213
2214 for (Location &loc : locations)
2215 loc.offset =
2216 loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(off: loc.offset);
2217
2218 llvm::sort(C&: locations, Comp: [](const Location &a, const Location &b) {
2219 const OutputSegment *segA = a.isec->parent->parent;
2220 const OutputSegment *segB = b.isec->parent->parent;
2221 if (segA == segB)
2222 return a.offset < b.offset;
2223 return segA->addr < segB->addr;
2224 });
2225
2226 auto sameSegment = [](const Location &a, const Location &b) {
2227 return a.isec->parent->parent == b.isec->parent->parent;
2228 };
2229
2230 const uint64_t pageSize = target->getPageSize();
2231 for (size_t i = 0, count = locations.size(); i < count;) {
2232 const Location &firstLoc = locations[i];
2233 fixupSegments.emplace_back(Args&: firstLoc.isec->parent->parent);
2234 while (i < count && sameSegment(locations[i], firstLoc)) {
2235 uint32_t pageIdx = locations[i].offset / pageSize;
2236 fixupSegments.back().pageStarts.emplace_back(
2237 Args&: pageIdx, Args: locations[i].offset % pageSize);
2238 ++i;
2239 while (i < count && sameSegment(locations[i], firstLoc) &&
2240 locations[i].offset / pageSize == pageIdx)
2241 ++i;
2242 }
2243 }
2244
2245 // Compute expected encoded size.
2246 size = alignTo<8>(Value: sizeof(dyld_chained_fixups_header));
2247 size += alignTo<8>(offsetof(dyld_chained_starts_in_image, seg_info_offset) +
2248 outputSegments.size() * sizeof(uint32_t));
2249 for (const SegmentInfo &seg : fixupSegments)
2250 size += seg.getSize();
2251 size += importEntrySize(format: importFormat) * bindings.size();
2252 size += symtabSize;
2253}
2254
2255template SymtabSection *macho::makeSymtabSection<LP64>(StringTableSection &);
2256template SymtabSection *macho::makeSymtabSection<ILP32>(StringTableSection &);
2257

source code of lld/MachO/SyntheticSections.cpp