1//===- Writer.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "Writer.h"
10#include "COFFLinkerContext.h"
11#include "CallGraphSort.h"
12#include "Config.h"
13#include "DLL.h"
14#include "InputFiles.h"
15#include "LLDMapFile.h"
16#include "MapFile.h"
17#include "PDB.h"
18#include "SymbolTable.h"
19#include "Symbols.h"
20#include "lld/Common/ErrorHandler.h"
21#include "lld/Common/Memory.h"
22#include "lld/Common/Timer.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/StringSet.h"
26#include "llvm/BinaryFormat/COFF.h"
27#include "llvm/Support/Endian.h"
28#include "llvm/Support/FileOutputBuffer.h"
29#include "llvm/Support/Parallel.h"
30#include "llvm/Support/RandomNumberGenerator.h"
31#include "llvm/Support/TimeProfiler.h"
32#include "llvm/Support/xxhash.h"
33#include <algorithm>
34#include <cstdio>
35#include <map>
36#include <memory>
37#include <utility>
38
39using namespace llvm;
40using namespace llvm::COFF;
41using namespace llvm::object;
42using namespace llvm::support;
43using namespace llvm::support::endian;
44using namespace lld;
45using namespace lld::coff;
46
47/* To re-generate DOSProgram:
48$ cat > /tmp/DOSProgram.asm
49org 0
50 ; Copy cs to ds.
51 push cs
52 pop ds
53 ; Point ds:dx at the $-terminated string.
54 mov dx, str
55 ; Int 21/AH=09h: Write string to standard output.
56 mov ah, 0x9
57 int 0x21
58 ; Int 21/AH=4Ch: Exit with return code (in AL).
59 mov ax, 0x4C01
60 int 0x21
61str:
62 db 'This program cannot be run in DOS mode.$'
63align 8, db 0
64$ nasm -fbin /tmp/DOSProgram.asm -o /tmp/DOSProgram.bin
65$ xxd -i /tmp/DOSProgram.bin
66*/
67static unsigned char dosProgram[] = {
68 0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, 0x21, 0xb8, 0x01, 0x4c,
69 0xcd, 0x21, 0x54, 0x68, 0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72,
70 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65,
71 0x20, 0x72, 0x75, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20,
72 0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x24, 0x00, 0x00
73};
74static_assert(sizeof(dosProgram) % 8 == 0,
75 "DOSProgram size must be multiple of 8");
76static_assert((sizeof(dos_header) + sizeof(dosProgram)) % 8 == 0,
77 "DOSStub size must be multiple of 8");
78
79static const int numberOfDataDirectory = 16;
80
81namespace {
82
83class DebugDirectoryChunk : public NonSectionChunk {
84public:
85 DebugDirectoryChunk(const COFFLinkerContext &c,
86 const std::vector<std::pair<COFF::DebugType, Chunk *>> &r,
87 bool writeRepro)
88 : records(r), writeRepro(writeRepro), ctx(c) {}
89
90 size_t getSize() const override {
91 return (records.size() + int(writeRepro)) * sizeof(debug_directory);
92 }
93
94 void writeTo(uint8_t *b) const override {
95 auto *d = reinterpret_cast<debug_directory *>(b);
96
97 for (const std::pair<COFF::DebugType, Chunk *>& record : records) {
98 Chunk *c = record.second;
99 const OutputSection *os = ctx.getOutputSection(c);
100 uint64_t offs = os->getFileOff() + (c->getRVA() - os->getRVA());
101 fillEntry(d, debugType: record.first, size: c->getSize(), rva: c->getRVA(), offs);
102 ++d;
103 }
104
105 if (writeRepro) {
106 // FIXME: The COFF spec allows either a 0-sized entry to just say
107 // "the timestamp field is really a hash", or a 4-byte size field
108 // followed by that many bytes containing a longer hash (with the
109 // lowest 4 bytes usually being the timestamp in little-endian order).
110 // Consider storing the full 8 bytes computed by xxh3_64bits here.
111 fillEntry(d, debugType: COFF::IMAGE_DEBUG_TYPE_REPRO, size: 0, rva: 0, offs: 0);
112 }
113 }
114
115 void setTimeDateStamp(uint32_t timeDateStamp) {
116 for (support::ulittle32_t *tds : timeDateStamps)
117 *tds = timeDateStamp;
118 }
119
120private:
121 void fillEntry(debug_directory *d, COFF::DebugType debugType, size_t size,
122 uint64_t rva, uint64_t offs) const {
123 d->Characteristics = 0;
124 d->TimeDateStamp = 0;
125 d->MajorVersion = 0;
126 d->MinorVersion = 0;
127 d->Type = debugType;
128 d->SizeOfData = size;
129 d->AddressOfRawData = rva;
130 d->PointerToRawData = offs;
131
132 timeDateStamps.push_back(x: &d->TimeDateStamp);
133 }
134
135 mutable std::vector<support::ulittle32_t *> timeDateStamps;
136 const std::vector<std::pair<COFF::DebugType, Chunk *>> &records;
137 bool writeRepro;
138 const COFFLinkerContext &ctx;
139};
140
141class CVDebugRecordChunk : public NonSectionChunk {
142public:
143 CVDebugRecordChunk(const COFFLinkerContext &c) : ctx(c) {}
144
145 size_t getSize() const override {
146 return sizeof(codeview::DebugInfo) + ctx.config.pdbAltPath.size() + 1;
147 }
148
149 void writeTo(uint8_t *b) const override {
150 // Save off the DebugInfo entry to backfill the file signature (build id)
151 // in Writer::writeBuildId
152 buildId = reinterpret_cast<codeview::DebugInfo *>(b);
153
154 // variable sized field (PDB Path)
155 char *p = reinterpret_cast<char *>(b + sizeof(*buildId));
156 if (!ctx.config.pdbAltPath.empty())
157 memcpy(dest: p, src: ctx.config.pdbAltPath.data(), n: ctx.config.pdbAltPath.size());
158 p[ctx.config.pdbAltPath.size()] = '\0';
159 }
160
161 mutable codeview::DebugInfo *buildId = nullptr;
162
163private:
164 const COFFLinkerContext &ctx;
165};
166
167class ExtendedDllCharacteristicsChunk : public NonSectionChunk {
168public:
169 ExtendedDllCharacteristicsChunk(uint32_t c) : characteristics(c) {}
170
171 size_t getSize() const override { return 4; }
172
173 void writeTo(uint8_t *buf) const override { write32le(P: buf, V: characteristics); }
174
175 uint32_t characteristics = 0;
176};
177
178// PartialSection represents a group of chunks that contribute to an
179// OutputSection. Collating a collection of PartialSections of same name and
180// characteristics constitutes the OutputSection.
181class PartialSectionKey {
182public:
183 StringRef name;
184 unsigned characteristics;
185
186 bool operator<(const PartialSectionKey &other) const {
187 int c = name.compare(RHS: other.name);
188 if (c > 0)
189 return false;
190 if (c == 0)
191 return characteristics < other.characteristics;
192 return true;
193 }
194};
195
196struct ChunkRange {
197 Chunk *first = nullptr, *last;
198};
199
200// The writer writes a SymbolTable result to a file.
201class Writer {
202public:
203 Writer(COFFLinkerContext &c)
204 : buffer(c.e.outputBuffer), delayIdata(c), ctx(c) {}
205 void run();
206
207private:
208 void calculateStubDependentSizes();
209 void createSections();
210 void createMiscChunks();
211 void createImportTables();
212 void appendImportThunks();
213 void locateImportTables();
214 void createExportTable();
215 void mergeSection(const std::map<StringRef, StringRef>::value_type &p);
216 void mergeSections();
217 void sortECChunks();
218 void appendECImportTables();
219 void removeUnusedSections();
220 void assignAddresses();
221 bool isInRange(uint16_t relType, uint64_t s, uint64_t p, int margin,
222 MachineTypes machine);
223 std::pair<Defined *, bool> getThunk(DenseMap<uint64_t, Defined *> &lastThunks,
224 Defined *target, uint64_t p,
225 uint16_t type, int margin,
226 MachineTypes machine);
227 bool createThunks(OutputSection *os, int margin);
228 bool verifyRanges(const std::vector<Chunk *> chunks);
229 void createECCodeMap();
230 void finalizeAddresses();
231 void removeEmptySections();
232 void assignOutputSectionIndices();
233 void createSymbolAndStringTable();
234 void openFile(StringRef outputPath);
235 template <typename PEHeaderTy> void writeHeader();
236 void createSEHTable();
237 void createRuntimePseudoRelocs();
238 void createECChunks();
239 void insertCtorDtorSymbols();
240 void insertBssDataStartEndSymbols();
241 void markSymbolsWithRelocations(ObjFile *file, SymbolRVASet &usedSymbols);
242 void createGuardCFTables();
243 void markSymbolsForRVATable(ObjFile *file,
244 ArrayRef<SectionChunk *> symIdxChunks,
245 SymbolRVASet &tableSymbols);
246 void getSymbolsFromSections(ObjFile *file,
247 ArrayRef<SectionChunk *> symIdxChunks,
248 std::vector<Symbol *> &symbols);
249 void maybeAddRVATable(SymbolRVASet tableSymbols, StringRef tableSym,
250 StringRef countSym, bool hasFlag=false);
251 void setSectionPermissions();
252 void setECSymbols();
253 void writeSections();
254 void writeBuildId();
255 void writePEChecksum();
256 void sortSections();
257 template <typename T> void sortExceptionTable(ChunkRange &exceptionTable);
258 void sortExceptionTables();
259 void sortCRTSectionChunks(std::vector<Chunk *> &chunks);
260 void addSyntheticIdata();
261 void sortBySectionOrder(std::vector<Chunk *> &chunks);
262 void fixPartialSectionChars(StringRef name, uint32_t chars);
263 bool fixGnuImportChunks();
264 void fixTlsAlignment();
265 PartialSection *createPartialSection(StringRef name, uint32_t outChars);
266 PartialSection *findPartialSection(StringRef name, uint32_t outChars);
267
268 std::optional<coff_symbol16> createSymbol(Defined *d);
269 size_t addEntryToStringTable(StringRef str);
270
271 OutputSection *findSection(StringRef name);
272 void addBaserels();
273 void addBaserelBlocks(std::vector<Baserel> &v);
274 void createDynamicRelocs();
275
276 uint32_t getSizeOfInitializedData();
277
278 void prepareLoadConfig();
279 template <typename T>
280 void prepareLoadConfig(SymbolTable &symtab, T *loadConfig);
281
282 std::unique_ptr<FileOutputBuffer> &buffer;
283 std::map<PartialSectionKey, PartialSection *> partialSections;
284 std::vector<char> strtab;
285 std::vector<llvm::object::coff_symbol16> outputSymtab;
286 std::vector<ECCodeMapEntry> codeMap;
287 IdataContents idata;
288 Chunk *importTableStart = nullptr;
289 uint64_t importTableSize = 0;
290 Chunk *iatStart = nullptr;
291 uint64_t iatSize = 0;
292 DelayLoadContents delayIdata;
293 bool setNoSEHCharacteristic = false;
294 uint32_t tlsAlignment = 0;
295
296 DebugDirectoryChunk *debugDirectory = nullptr;
297 std::vector<std::pair<COFF::DebugType, Chunk *>> debugRecords;
298 CVDebugRecordChunk *buildId = nullptr;
299 ArrayRef<uint8_t> sectionTable;
300
301 // List of Arm64EC export thunks.
302 std::vector<std::pair<Chunk *, Defined *>> exportThunks;
303
304 uint64_t fileSize;
305 uint32_t pointerToSymbolTable = 0;
306 uint64_t sizeOfImage;
307 uint64_t sizeOfHeaders;
308
309 uint32_t dosStubSize;
310 uint32_t coffHeaderOffset;
311 uint32_t peHeaderOffset;
312 uint32_t dataDirOffset64;
313
314 OutputSection *textSec;
315 OutputSection *hexpthkSec;
316 OutputSection *bssSec;
317 OutputSection *rdataSec;
318 OutputSection *buildidSec;
319 OutputSection *dataSec;
320 OutputSection *pdataSec;
321 OutputSection *idataSec;
322 OutputSection *edataSec;
323 OutputSection *didatSec;
324 OutputSection *a64xrmSec;
325 OutputSection *rsrcSec;
326 OutputSection *relocSec;
327 OutputSection *ctorsSec;
328 OutputSection *dtorsSec;
329 // Either .rdata section or .buildid section.
330 OutputSection *debugInfoSec;
331
332 // The range of .pdata sections in the output file.
333 //
334 // We need to keep track of the location of .pdata in whichever section it
335 // gets merged into so that we can sort its contents and emit a correct data
336 // directory entry for the exception table. This is also the case for some
337 // other sections (such as .edata) but because the contents of those sections
338 // are entirely linker-generated we can keep track of their locations using
339 // the chunks that the linker creates. All .pdata chunks come from input
340 // files, so we need to keep track of them separately.
341 ChunkRange pdata;
342
343 // x86_64 .pdata sections on ARM64EC/ARM64X targets.
344 ChunkRange hybridPdata;
345
346 // CHPE metadata symbol on ARM64C target.
347 DefinedRegular *chpeSym = nullptr;
348
349 COFFLinkerContext &ctx;
350};
351} // anonymous namespace
352
353void lld::coff::writeResult(COFFLinkerContext &ctx) {
354 llvm::TimeTraceScope timeScope("Write output(s)");
355 Writer(ctx).run();
356}
357
358void OutputSection::addChunk(Chunk *c) {
359 chunks.push_back(x: c);
360}
361
362void OutputSection::insertChunkAtStart(Chunk *c) {
363 chunks.insert(position: chunks.begin(), x: c);
364}
365
366void OutputSection::setPermissions(uint32_t c) {
367 header.Characteristics &= ~permMask;
368 header.Characteristics |= c;
369}
370
371void OutputSection::merge(OutputSection *other) {
372 chunks.insert(position: chunks.end(), first: other->chunks.begin(), last: other->chunks.end());
373 other->chunks.clear();
374 contribSections.insert(position: contribSections.end(), first: other->contribSections.begin(),
375 last: other->contribSections.end());
376 other->contribSections.clear();
377
378 // MS link.exe compatibility: when merging a code section into a data section,
379 // mark the target section as a code section.
380 if (other->header.Characteristics & IMAGE_SCN_CNT_CODE) {
381 header.Characteristics |= IMAGE_SCN_CNT_CODE;
382 header.Characteristics &=
383 ~(IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_CNT_UNINITIALIZED_DATA);
384 }
385}
386
387// Write the section header to a given buffer.
388void OutputSection::writeHeaderTo(uint8_t *buf, bool isDebug) {
389 auto *hdr = reinterpret_cast<coff_section *>(buf);
390 *hdr = header;
391 if (stringTableOff) {
392 // If name is too long, write offset into the string table as a name.
393 encodeSectionName(Out: hdr->Name, Offset: stringTableOff);
394 } else {
395 assert(!isDebug || name.size() <= COFF::NameSize ||
396 (hdr->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) == 0);
397 strncpy(dest: hdr->Name, src: name.data(),
398 n: std::min(a: name.size(), b: (size_t)COFF::NameSize));
399 }
400}
401
402void OutputSection::addContributingPartialSection(PartialSection *sec) {
403 contribSections.push_back(x: sec);
404}
405
406void OutputSection::splitECChunks() {
407 llvm::stable_sort(Range&: chunks, C: [=](const Chunk *a, const Chunk *b) {
408 return (a->getMachine() != ARM64) < (b->getMachine() != ARM64);
409 });
410}
411
412// Check whether the target address S is in range from a relocation
413// of type relType at address P.
414bool Writer::isInRange(uint16_t relType, uint64_t s, uint64_t p, int margin,
415 MachineTypes machine) {
416 if (machine == ARMNT) {
417 int64_t diff = AbsoluteDifference(X: s, Y: p + 4) + margin;
418 switch (relType) {
419 case IMAGE_REL_ARM_BRANCH20T:
420 return isInt<21>(x: diff);
421 case IMAGE_REL_ARM_BRANCH24T:
422 case IMAGE_REL_ARM_BLX23T:
423 return isInt<25>(x: diff);
424 default:
425 return true;
426 }
427 } else if (isAnyArm64(Machine: machine)) {
428 int64_t diff = AbsoluteDifference(X: s, Y: p) + margin;
429 switch (relType) {
430 case IMAGE_REL_ARM64_BRANCH26:
431 return isInt<28>(x: diff);
432 case IMAGE_REL_ARM64_BRANCH19:
433 return isInt<21>(x: diff);
434 case IMAGE_REL_ARM64_BRANCH14:
435 return isInt<16>(x: diff);
436 default:
437 return true;
438 }
439 } else {
440 return true;
441 }
442}
443
444// Return the last thunk for the given target if it is in range,
445// or create a new one.
446std::pair<Defined *, bool>
447Writer::getThunk(DenseMap<uint64_t, Defined *> &lastThunks, Defined *target,
448 uint64_t p, uint16_t type, int margin, MachineTypes machine) {
449 Defined *&lastThunk = lastThunks[target->getRVA()];
450 if (lastThunk && isInRange(relType: type, s: lastThunk->getRVA(), p, margin, machine))
451 return {lastThunk, false};
452 Chunk *c;
453 switch (getMachineArchType(machine)) {
454 case Triple::thumb:
455 c = make<RangeExtensionThunkARM>(args&: ctx, args&: target);
456 break;
457 case Triple::aarch64:
458 c = make<RangeExtensionThunkARM64>(args&: machine, args&: target);
459 break;
460 default:
461 llvm_unreachable("Unexpected architecture");
462 }
463 Defined *d = make<DefinedSynthetic>(args: "range_extension_thunk", args&: c);
464 lastThunk = d;
465 return {d, true};
466}
467
468// This checks all relocations, and for any relocation which isn't in range
469// it adds a thunk after the section chunk that contains the relocation.
470// If the latest thunk for the specific target is in range, that is used
471// instead of creating a new thunk. All range checks are done with the
472// specified margin, to make sure that relocations that originally are in
473// range, but only barely, also get thunks - in case other added thunks makes
474// the target go out of range.
475//
476// After adding thunks, we verify that all relocations are in range (with
477// no extra margin requirements). If this failed, we restart (throwing away
478// the previously created thunks) and retry with a wider margin.
479bool Writer::createThunks(OutputSection *os, int margin) {
480 bool addressesChanged = false;
481 DenseMap<uint64_t, Defined *> lastThunks;
482 DenseMap<std::pair<ObjFile *, Defined *>, uint32_t> thunkSymtabIndices;
483 size_t thunksSize = 0;
484 // Recheck Chunks.size() each iteration, since we can insert more
485 // elements into it.
486 for (size_t i = 0; i != os->chunks.size(); ++i) {
487 SectionChunk *sc = dyn_cast<SectionChunk>(Val: os->chunks[i]);
488 if (!sc) {
489 auto chunk = cast<NonSectionChunk>(Val: os->chunks[i]);
490 if (uint32_t size = chunk->extendRanges()) {
491 thunksSize += size;
492 addressesChanged = true;
493 }
494 continue;
495 }
496 MachineTypes machine = sc->getMachine();
497 size_t thunkInsertionSpot = i + 1;
498
499 // Try to get a good enough estimate of where new thunks will be placed.
500 // Offset this by the size of the new thunks added so far, to make the
501 // estimate slightly better.
502 size_t thunkInsertionRVA = sc->getRVA() + sc->getSize() + thunksSize;
503 ObjFile *file = sc->file;
504 std::vector<std::pair<uint32_t, uint32_t>> relocReplacements;
505 ArrayRef<coff_relocation> originalRelocs =
506 file->getCOFFObj()->getRelocations(Sec: sc->header);
507 for (size_t j = 0, e = originalRelocs.size(); j < e; ++j) {
508 const coff_relocation &rel = originalRelocs[j];
509 Symbol *relocTarget = file->getSymbol(symbolIndex: rel.SymbolTableIndex);
510
511 // The estimate of the source address P should be pretty accurate,
512 // but we don't know whether the target Symbol address should be
513 // offset by thunksSize or not (or by some of thunksSize but not all of
514 // it), giving us some uncertainty once we have added one thunk.
515 uint64_t p = sc->getRVA() + rel.VirtualAddress + thunksSize;
516
517 Defined *sym = dyn_cast_or_null<Defined>(Val: relocTarget);
518 if (!sym)
519 continue;
520
521 uint64_t s = sym->getRVA();
522
523 if (isInRange(relType: rel.Type, s, p, margin, machine))
524 continue;
525
526 // If the target isn't in range, hook it up to an existing or new thunk.
527 auto [thunk, wasNew] =
528 getThunk(lastThunks, target: sym, p, type: rel.Type, margin, machine);
529 if (wasNew) {
530 Chunk *thunkChunk = thunk->getChunk();
531 thunkChunk->setRVA(
532 thunkInsertionRVA); // Estimate of where it will be located.
533 os->chunks.insert(position: os->chunks.begin() + thunkInsertionSpot, x: thunkChunk);
534 thunkInsertionSpot++;
535 thunksSize += thunkChunk->getSize();
536 thunkInsertionRVA += thunkChunk->getSize();
537 addressesChanged = true;
538 }
539
540 // To redirect the relocation, add a symbol to the parent object file's
541 // symbol table, and replace the relocation symbol table index with the
542 // new index.
543 auto insertion = thunkSymtabIndices.insert(KV: {{file, thunk}, ~0U});
544 uint32_t &thunkSymbolIndex = insertion.first->second;
545 if (insertion.second)
546 thunkSymbolIndex = file->addRangeThunkSymbol(thunk);
547 relocReplacements.emplace_back(args&: j, args&: thunkSymbolIndex);
548 }
549
550 // Get a writable copy of this section's relocations so they can be
551 // modified. If the relocations point into the object file, allocate new
552 // memory. Otherwise, this must be previously allocated memory that can be
553 // modified in place.
554 ArrayRef<coff_relocation> curRelocs = sc->getRelocs();
555 MutableArrayRef<coff_relocation> newRelocs;
556 if (originalRelocs.data() == curRelocs.data()) {
557 newRelocs = MutableArrayRef(
558 bAlloc().Allocate<coff_relocation>(Num: originalRelocs.size()),
559 originalRelocs.size());
560 } else {
561 newRelocs = MutableArrayRef(
562 const_cast<coff_relocation *>(curRelocs.data()), curRelocs.size());
563 }
564
565 // Copy each relocation, but replace the symbol table indices which need
566 // thunks.
567 auto nextReplacement = relocReplacements.begin();
568 auto endReplacement = relocReplacements.end();
569 for (size_t i = 0, e = originalRelocs.size(); i != e; ++i) {
570 newRelocs[i] = originalRelocs[i];
571 if (nextReplacement != endReplacement && nextReplacement->first == i) {
572 newRelocs[i].SymbolTableIndex = nextReplacement->second;
573 ++nextReplacement;
574 }
575 }
576
577 sc->setRelocs(newRelocs);
578 }
579 return addressesChanged;
580}
581
582// Create a code map for CHPE metadata.
583void Writer::createECCodeMap() {
584 if (!ctx.symtab.isEC())
585 return;
586
587 // Clear the map in case we were're recomputing the map after adding
588 // a range extension thunk.
589 codeMap.clear();
590
591 std::optional<chpe_range_type> lastType;
592 Chunk *first, *last;
593
594 auto closeRange = [&]() {
595 if (lastType) {
596 codeMap.push_back(x: {first, last, *lastType});
597 lastType.reset();
598 }
599 };
600
601 for (OutputSection *sec : ctx.outputSections) {
602 for (Chunk *c : sec->chunks) {
603 // Skip empty section chunks. MS link.exe does not seem to do that and
604 // generates empty code ranges in some cases.
605 if (isa<SectionChunk>(Val: c) && !c->getSize())
606 continue;
607
608 std::optional<chpe_range_type> chunkType = c->getArm64ECRangeType();
609 if (chunkType != lastType) {
610 closeRange();
611 first = c;
612 lastType = chunkType;
613 }
614 last = c;
615 }
616 }
617
618 closeRange();
619
620 Symbol *tableCountSym = ctx.symtab.findUnderscore(name: "__hybrid_code_map_count");
621 cast<DefinedAbsolute>(Val: tableCountSym)->setVA(codeMap.size());
622}
623
624// Verify that all relocations are in range, with no extra margin requirements.
625bool Writer::verifyRanges(const std::vector<Chunk *> chunks) {
626 for (Chunk *c : chunks) {
627 SectionChunk *sc = dyn_cast<SectionChunk>(Val: c);
628 if (!sc) {
629 if (!cast<NonSectionChunk>(Val: c)->verifyRanges())
630 return false;
631 continue;
632 }
633 MachineTypes machine = sc->getMachine();
634
635 ArrayRef<coff_relocation> relocs = sc->getRelocs();
636 for (const coff_relocation &rel : relocs) {
637 Symbol *relocTarget = sc->file->getSymbol(symbolIndex: rel.SymbolTableIndex);
638
639 Defined *sym = dyn_cast_or_null<Defined>(Val: relocTarget);
640 if (!sym)
641 continue;
642
643 uint64_t p = sc->getRVA() + rel.VirtualAddress;
644 uint64_t s = sym->getRVA();
645
646 if (!isInRange(relType: rel.Type, s, p, margin: 0, machine))
647 return false;
648 }
649 }
650 return true;
651}
652
653// Assign addresses and add thunks if necessary.
654void Writer::finalizeAddresses() {
655 assignAddresses();
656 if (ctx.config.machine != ARMNT && !isAnyArm64(Machine: ctx.config.machine))
657 return;
658
659 size_t origNumChunks = 0;
660 for (OutputSection *sec : ctx.outputSections) {
661 sec->origChunks = sec->chunks;
662 origNumChunks += sec->chunks.size();
663 }
664
665 int pass = 0;
666 int margin = 1024 * 100;
667 while (true) {
668 llvm::TimeTraceScope timeScope2("Add thunks pass");
669
670 // First check whether we need thunks at all, or if the previous pass of
671 // adding them turned out ok.
672 bool rangesOk = true;
673 size_t numChunks = 0;
674 {
675 llvm::TimeTraceScope timeScope3("Verify ranges");
676 for (OutputSection *sec : ctx.outputSections) {
677 if (!verifyRanges(chunks: sec->chunks)) {
678 rangesOk = false;
679 break;
680 }
681 numChunks += sec->chunks.size();
682 }
683 }
684 if (rangesOk) {
685 if (pass > 0)
686 Log(ctx) << "Added " << (numChunks - origNumChunks) << " thunks with "
687 << "margin " << margin << " in " << pass << " passes";
688 return;
689 }
690
691 if (pass >= 10)
692 Fatal(ctx) << "adding thunks hasn't converged after " << pass
693 << " passes";
694
695 if (pass > 0) {
696 // If the previous pass didn't work out, reset everything back to the
697 // original conditions before retrying with a wider margin. This should
698 // ideally never happen under real circumstances.
699 for (OutputSection *sec : ctx.outputSections)
700 sec->chunks = sec->origChunks;
701 margin *= 2;
702 }
703
704 // Try adding thunks everywhere where it is needed, with a margin
705 // to avoid things going out of range due to the added thunks.
706 bool addressesChanged = false;
707 {
708 llvm::TimeTraceScope timeScope3("Create thunks");
709 for (OutputSection *sec : ctx.outputSections)
710 addressesChanged |= createThunks(os: sec, margin);
711 }
712 // If the verification above thought we needed thunks, we should have
713 // added some.
714 assert(addressesChanged);
715 (void)addressesChanged;
716
717 // Recalculate the layout for the whole image (and verify the ranges at
718 // the start of the next round).
719 assignAddresses();
720
721 pass++;
722 }
723}
724
725void Writer::writePEChecksum() {
726 if (!ctx.config.writeCheckSum) {
727 return;
728 }
729
730 llvm::TimeTraceScope timeScope("PE checksum");
731
732 // https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#checksum
733 uint32_t *buf = (uint32_t *)buffer->getBufferStart();
734 uint32_t size = (uint32_t)(buffer->getBufferSize());
735
736 pe32_header *peHeader = (pe32_header *)((uint8_t *)buf + coffHeaderOffset +
737 sizeof(coff_file_header));
738
739 uint64_t sum = 0;
740 uint32_t count = size;
741 ulittle16_t *addr = (ulittle16_t *)buf;
742
743 // The PE checksum algorithm, implemented as suggested in RFC1071
744 while (count > 1) {
745 sum += *addr++;
746 count -= 2;
747 }
748
749 // Add left-over byte, if any
750 if (count > 0)
751 sum += *(unsigned char *)addr;
752
753 // Fold 32-bit sum to 16 bits
754 while (sum >> 16) {
755 sum = (sum & 0xffff) + (sum >> 16);
756 }
757
758 sum += size;
759 peHeader->CheckSum = sum;
760}
761
762// The main function of the writer.
763void Writer::run() {
764 {
765 llvm::TimeTraceScope timeScope("Write PE");
766 ScopedTimer t1(ctx.codeLayoutTimer);
767
768 calculateStubDependentSizes();
769 if (ctx.config.machine == ARM64X)
770 ctx.dynamicRelocs = make<DynamicRelocsChunk>();
771 createImportTables();
772 createSections();
773 appendImportThunks();
774 // Import thunks must be added before the Control Flow Guard tables are
775 // added.
776 createMiscChunks();
777 createExportTable();
778 mergeSections();
779 sortECChunks();
780 appendECImportTables();
781 createDynamicRelocs();
782 removeUnusedSections();
783 finalizeAddresses();
784 removeEmptySections();
785 assignOutputSectionIndices();
786 setSectionPermissions();
787 setECSymbols();
788 createSymbolAndStringTable();
789
790 if (fileSize > UINT32_MAX)
791 Fatal(ctx) << "image size (" << fileSize << ") "
792 << "exceeds maximum allowable size (" << UINT32_MAX << ")";
793
794 openFile(outputPath: ctx.config.outputFile);
795 if (ctx.config.is64()) {
796 writeHeader<pe32plus_header>();
797 } else {
798 writeHeader<pe32_header>();
799 }
800 writeSections();
801 prepareLoadConfig();
802 sortExceptionTables();
803
804 // Fix up the alignment in the TLS Directory's characteristic field,
805 // if a specific alignment value is needed
806 if (tlsAlignment)
807 fixTlsAlignment();
808 }
809
810 if (!ctx.config.pdbPath.empty() && ctx.config.debug) {
811 assert(buildId);
812 createPDB(ctx, sectionTable, buildId: buildId->buildId);
813 }
814 writeBuildId();
815
816 writeLLDMapFile(ctx);
817 writeMapFile(ctx);
818
819 writePEChecksum();
820
821 if (errorCount())
822 return;
823
824 llvm::TimeTraceScope timeScope("Commit PE to disk");
825 ScopedTimer t2(ctx.outputCommitTimer);
826 if (auto e = buffer->commit())
827 Fatal(ctx) << "failed to write output '" << buffer->getPath()
828 << "': " << toString(E: std::move(e));
829}
830
831static StringRef getOutputSectionName(StringRef name) {
832 StringRef s = name.split(Separator: '$').first;
833
834 // Treat a later period as a separator for MinGW, for sections like
835 // ".ctors.01234".
836 return s.substr(Start: 0, N: s.find(C: '.', From: 1));
837}
838
839// For /order.
840void Writer::sortBySectionOrder(std::vector<Chunk *> &chunks) {
841 auto getPriority = [&ctx = ctx](const Chunk *c) {
842 if (auto *sec = dyn_cast<SectionChunk>(Val: c))
843 if (sec->sym)
844 return ctx.config.order.lookup(Key: sec->sym->getName());
845 return 0;
846 };
847
848 llvm::stable_sort(Range&: chunks, C: [=](const Chunk *a, const Chunk *b) {
849 return getPriority(a) < getPriority(b);
850 });
851}
852
853// Change the characteristics of existing PartialSections that belong to the
854// section Name to Chars.
855void Writer::fixPartialSectionChars(StringRef name, uint32_t chars) {
856 for (auto it : partialSections) {
857 PartialSection *pSec = it.second;
858 StringRef curName = pSec->name;
859 if (!curName.consume_front(Prefix: name) ||
860 (!curName.empty() && !curName.starts_with(Prefix: "$")))
861 continue;
862 if (pSec->characteristics == chars)
863 continue;
864 PartialSection *destSec = createPartialSection(name: pSec->name, outChars: chars);
865 destSec->chunks.insert(position: destSec->chunks.end(), first: pSec->chunks.begin(),
866 last: pSec->chunks.end());
867 pSec->chunks.clear();
868 }
869}
870
871// Sort concrete section chunks from GNU import libraries.
872//
873// GNU binutils doesn't use short import files, but instead produces import
874// libraries that consist of object files, with section chunks for the .idata$*
875// sections. These are linked just as regular static libraries. Each import
876// library consists of one header object, one object file for every imported
877// symbol, and one trailer object. In order for the .idata tables/lists to
878// be formed correctly, the section chunks within each .idata$* section need
879// to be grouped by library, and sorted alphabetically within each library
880// (which makes sure the header comes first and the trailer last).
881bool Writer::fixGnuImportChunks() {
882 uint32_t rdata = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ;
883
884 // Make sure all .idata$* section chunks are mapped as RDATA in order to
885 // be sorted into the same sections as our own synthesized .idata chunks.
886 fixPartialSectionChars(name: ".idata", chars: rdata);
887
888 bool hasIdata = false;
889 // Sort all .idata$* chunks, grouping chunks from the same library,
890 // with alphabetical ordering of the object files within a library.
891 for (auto it : partialSections) {
892 PartialSection *pSec = it.second;
893 if (!pSec->name.starts_with(Prefix: ".idata"))
894 continue;
895
896 if (!pSec->chunks.empty())
897 hasIdata = true;
898 llvm::stable_sort(Range&: pSec->chunks, C: [&](Chunk *s, Chunk *t) {
899 SectionChunk *sc1 = dyn_cast<SectionChunk>(Val: s);
900 SectionChunk *sc2 = dyn_cast<SectionChunk>(Val: t);
901 if (!sc1 || !sc2) {
902 // if SC1, order them ascending. If SC2 or both null,
903 // S is not less than T.
904 return sc1 != nullptr;
905 }
906 // Make a string with "libraryname/objectfile" for sorting, achieving
907 // both grouping by library and sorting of objects within a library,
908 // at once.
909 std::string key1 =
910 (sc1->file->parentName + "/" + sc1->file->getName()).str();
911 std::string key2 =
912 (sc2->file->parentName + "/" + sc2->file->getName()).str();
913 return key1 < key2;
914 });
915 }
916 return hasIdata;
917}
918
919// Add generated idata chunks, for imported symbols and DLLs, and a
920// terminator in .idata$2.
921void Writer::addSyntheticIdata() {
922 uint32_t rdata = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ;
923 idata.create(ctx);
924
925 // Add the .idata content in the right section groups, to allow
926 // chunks from other linked in object files to be grouped together.
927 // See Microsoft PE/COFF spec 5.4 for details.
928 auto add = [&](StringRef n, std::vector<Chunk *> &v) {
929 PartialSection *pSec = createPartialSection(name: n, outChars: rdata);
930 pSec->chunks.insert(position: pSec->chunks.end(), first: v.begin(), last: v.end());
931 };
932
933 // The loader assumes a specific order of data.
934 // Add each type in the correct order.
935 add(".idata$2", idata.dirs);
936 add(".idata$4", idata.lookups);
937 add(".idata$5", idata.addresses);
938 if (!idata.hints.empty())
939 add(".idata$6", idata.hints);
940 add(".idata$7", idata.dllNames);
941 if (!idata.auxIat.empty())
942 add(".idata$9", idata.auxIat);
943 if (!idata.auxIatCopy.empty())
944 add(".idata$a", idata.auxIatCopy);
945}
946
947void Writer::appendECImportTables() {
948 if (!isArm64EC(Machine: ctx.config.machine))
949 return;
950
951 const uint32_t rdata = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ;
952
953 // IAT is always placed at the beginning of .rdata section and its size
954 // is aligned to 4KB. Insert it here, after all merges all done.
955 if (PartialSection *importAddresses = findPartialSection(name: ".idata$5", outChars: rdata)) {
956 if (!rdataSec->chunks.empty())
957 rdataSec->chunks.front()->setAlignment(
958 std::max(a: 0x1000u, b: rdataSec->chunks.front()->getAlignment()));
959 iatSize = alignTo(Value: iatSize, Align: 0x1000);
960
961 rdataSec->chunks.insert(position: rdataSec->chunks.begin(),
962 first: importAddresses->chunks.begin(),
963 last: importAddresses->chunks.end());
964 rdataSec->contribSections.insert(position: rdataSec->contribSections.begin(),
965 x: importAddresses);
966 }
967
968 // The auxiliary IAT is always placed at the end of the .rdata section
969 // and is aligned to 4KB.
970 if (PartialSection *auxIat = findPartialSection(name: ".idata$9", outChars: rdata)) {
971 auxIat->chunks.front()->setAlignment(0x1000);
972 rdataSec->chunks.insert(position: rdataSec->chunks.end(), first: auxIat->chunks.begin(),
973 last: auxIat->chunks.end());
974 rdataSec->addContributingPartialSection(sec: auxIat);
975 }
976
977 if (!delayIdata.getAuxIat().empty()) {
978 delayIdata.getAuxIat().front()->setAlignment(0x1000);
979 rdataSec->chunks.insert(position: rdataSec->chunks.end(),
980 first: delayIdata.getAuxIat().begin(),
981 last: delayIdata.getAuxIat().end());
982 }
983}
984
985// Locate the first Chunk and size of the import directory list and the
986// IAT.
987void Writer::locateImportTables() {
988 uint32_t rdata = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ;
989
990 if (PartialSection *importDirs = findPartialSection(name: ".idata$2", outChars: rdata)) {
991 if (!importDirs->chunks.empty())
992 importTableStart = importDirs->chunks.front();
993 for (Chunk *c : importDirs->chunks)
994 importTableSize += c->getSize();
995 }
996
997 if (PartialSection *importAddresses = findPartialSection(name: ".idata$5", outChars: rdata)) {
998 if (!importAddresses->chunks.empty())
999 iatStart = importAddresses->chunks.front();
1000 for (Chunk *c : importAddresses->chunks)
1001 iatSize += c->getSize();
1002 }
1003}
1004
1005// Return whether a SectionChunk's suffix (the dollar and any trailing
1006// suffix) should be removed and sorted into the main suffixless
1007// PartialSection.
1008static bool shouldStripSectionSuffix(SectionChunk *sc, StringRef name,
1009 bool isMinGW) {
1010 // On MinGW, comdat groups are formed by putting the comdat group name
1011 // after the '$' in the section name. For .eh_frame$<symbol>, that must
1012 // still be sorted before the .eh_frame trailer from crtend.o, thus just
1013 // strip the section name trailer. For other sections, such as
1014 // .tls$$<symbol> (where non-comdat .tls symbols are otherwise stored in
1015 // ".tls$"), they must be strictly sorted after .tls. And for the
1016 // hypothetical case of comdat .CRT$XCU, we definitely need to keep the
1017 // suffix for sorting. Thus, to play it safe, only strip the suffix for
1018 // the standard sections.
1019 if (!isMinGW)
1020 return false;
1021 if (!sc || !sc->isCOMDAT())
1022 return false;
1023 return name.starts_with(Prefix: ".text$") || name.starts_with(Prefix: ".data$") ||
1024 name.starts_with(Prefix: ".rdata$") || name.starts_with(Prefix: ".pdata$") ||
1025 name.starts_with(Prefix: ".xdata$") || name.starts_with(Prefix: ".eh_frame$");
1026}
1027
1028void Writer::sortSections() {
1029 if (!ctx.config.callGraphProfile.empty()) {
1030 DenseMap<const SectionChunk *, int> order =
1031 computeCallGraphProfileOrder(ctx);
1032 for (auto it : order) {
1033 if (DefinedRegular *sym = it.first->sym)
1034 ctx.config.order[sym->getName()] = it.second;
1035 }
1036 }
1037 if (!ctx.config.order.empty())
1038 for (auto it : partialSections)
1039 sortBySectionOrder(chunks&: it.second->chunks);
1040}
1041
1042void Writer::calculateStubDependentSizes() {
1043 if (ctx.config.dosStub)
1044 dosStubSize = alignTo(Value: ctx.config.dosStub->getBufferSize(), Align: 8);
1045 else
1046 dosStubSize = sizeof(dos_header) + sizeof(dosProgram);
1047
1048 coffHeaderOffset = dosStubSize + sizeof(PEMagic);
1049 peHeaderOffset = coffHeaderOffset + sizeof(coff_file_header);
1050 dataDirOffset64 = peHeaderOffset + sizeof(pe32plus_header);
1051}
1052
1053// Create output section objects and add them to OutputSections.
1054void Writer::createSections() {
1055 llvm::TimeTraceScope timeScope("Output sections");
1056 // First, create the builtin sections.
1057 const uint32_t data = IMAGE_SCN_CNT_INITIALIZED_DATA;
1058 const uint32_t bss = IMAGE_SCN_CNT_UNINITIALIZED_DATA;
1059 const uint32_t code = IMAGE_SCN_CNT_CODE;
1060 const uint32_t discardable = IMAGE_SCN_MEM_DISCARDABLE;
1061 const uint32_t r = IMAGE_SCN_MEM_READ;
1062 const uint32_t w = IMAGE_SCN_MEM_WRITE;
1063 const uint32_t x = IMAGE_SCN_MEM_EXECUTE;
1064
1065 SmallDenseMap<std::pair<StringRef, uint32_t>, OutputSection *> sections;
1066 auto createSection = [&](StringRef name, uint32_t outChars) {
1067 OutputSection *&sec = sections[{name, outChars}];
1068 if (!sec) {
1069 sec = make<OutputSection>(args&: name, args&: outChars);
1070 ctx.outputSections.push_back(x: sec);
1071 }
1072 return sec;
1073 };
1074
1075 // Try to match the section order used by link.exe.
1076 textSec = createSection(".text", code | r | x);
1077 if (isArm64EC(Machine: ctx.config.machine))
1078 hexpthkSec = createSection(".hexpthk", code | r | x);
1079 bssSec = createSection(".bss", bss | r | w);
1080 rdataSec = createSection(".rdata", data | r);
1081 buildidSec = createSection(".buildid", data | r);
1082 dataSec = createSection(".data", data | r | w);
1083 pdataSec = createSection(".pdata", data | r);
1084 idataSec = createSection(".idata", data | r);
1085 edataSec = createSection(".edata", data | r);
1086 didatSec = createSection(".didat", data | r);
1087 if (isArm64EC(Machine: ctx.config.machine))
1088 a64xrmSec = createSection(".a64xrm", data | r);
1089 rsrcSec = createSection(".rsrc", data | r);
1090 relocSec = createSection(".reloc", data | discardable | r);
1091 ctorsSec = createSection(".ctors", data | r | w);
1092 dtorsSec = createSection(".dtors", data | r | w);
1093
1094 // Then bin chunks by name and output characteristics.
1095 for (Chunk *c : ctx.driver.getChunks()) {
1096 auto *sc = dyn_cast<SectionChunk>(Val: c);
1097 if (sc && !sc->live) {
1098 if (ctx.config.verbose)
1099 sc->printDiscardedMessage();
1100 continue;
1101 }
1102 StringRef name = c->getSectionName();
1103 if (shouldStripSectionSuffix(sc, name, isMinGW: ctx.config.mingw))
1104 name = name.split(Separator: '$').first;
1105
1106 if (name.starts_with(Prefix: ".tls"))
1107 tlsAlignment = std::max(a: tlsAlignment, b: c->getAlignment());
1108
1109 PartialSection *pSec = createPartialSection(name,
1110 outChars: c->getOutputCharacteristics());
1111 pSec->chunks.push_back(x: c);
1112 }
1113
1114 fixPartialSectionChars(name: ".rsrc", chars: data | r);
1115 fixPartialSectionChars(name: ".edata", chars: data | r);
1116 // Even in non MinGW cases, we might need to link against GNU import
1117 // libraries.
1118 bool hasIdata = fixGnuImportChunks();
1119 if (!idata.empty())
1120 hasIdata = true;
1121
1122 if (hasIdata)
1123 addSyntheticIdata();
1124
1125 sortSections();
1126
1127 if (hasIdata)
1128 locateImportTables();
1129
1130 // Then create an OutputSection for each section.
1131 // '$' and all following characters in input section names are
1132 // discarded when determining output section. So, .text$foo
1133 // contributes to .text, for example. See PE/COFF spec 3.2.
1134 for (auto it : partialSections) {
1135 PartialSection *pSec = it.second;
1136 StringRef name = getOutputSectionName(name: pSec->name);
1137 uint32_t outChars = pSec->characteristics;
1138
1139 if (name == ".CRT") {
1140 // In link.exe, there is a special case for the I386 target where .CRT
1141 // sections are treated as if they have output characteristics DATA | R if
1142 // their characteristics are DATA | R | W. This implements the same
1143 // special case for all architectures.
1144 outChars = data | r;
1145
1146 Log(ctx) << "Processing section " << pSec->name << " -> " << name;
1147
1148 sortCRTSectionChunks(chunks&: pSec->chunks);
1149 }
1150
1151 // ARM64EC has specific placement and alignment requirements for the IAT.
1152 // Delay adding its chunks until appendECImportTables.
1153 if (isArm64EC(Machine: ctx.config.machine) &&
1154 (pSec->name == ".idata$5" || pSec->name == ".idata$9"))
1155 continue;
1156
1157 OutputSection *sec = createSection(name, outChars);
1158 for (Chunk *c : pSec->chunks)
1159 sec->addChunk(c);
1160
1161 sec->addContributingPartialSection(sec: pSec);
1162 }
1163
1164 if (ctx.hybridSymtab) {
1165 if (OutputSection *sec = findSection(name: ".CRT"))
1166 sec->splitECChunks();
1167 }
1168
1169 // Finally, move some output sections to the end.
1170 auto sectionOrder = [&](const OutputSection *s) {
1171 // Move DISCARDABLE (or non-memory-mapped) sections to the end of file
1172 // because the loader cannot handle holes. Stripping can remove other
1173 // discardable ones than .reloc, which is first of them (created early).
1174 if (s->header.Characteristics & IMAGE_SCN_MEM_DISCARDABLE) {
1175 // Move discardable sections named .debug_ to the end, after other
1176 // discardable sections. Stripping only removes the sections named
1177 // .debug_* - thus try to avoid leaving holes after stripping.
1178 if (s->name.starts_with(Prefix: ".debug_"))
1179 return 3;
1180 return 2;
1181 }
1182 // .rsrc should come at the end of the non-discardable sections because its
1183 // size may change by the Win32 UpdateResources() function, causing
1184 // subsequent sections to move (see https://crbug.com/827082).
1185 if (s == rsrcSec)
1186 return 1;
1187 return 0;
1188 };
1189 llvm::stable_sort(Range&: ctx.outputSections,
1190 C: [&](const OutputSection *s, const OutputSection *t) {
1191 return sectionOrder(s) < sectionOrder(t);
1192 });
1193}
1194
1195void Writer::createMiscChunks() {
1196 llvm::TimeTraceScope timeScope("Misc chunks");
1197 Configuration *config = &ctx.config;
1198
1199 for (MergeChunk *p : ctx.mergeChunkInstances) {
1200 if (p) {
1201 p->finalizeContents();
1202 rdataSec->addChunk(c: p);
1203 }
1204 }
1205
1206 // Create thunks for locally-dllimported symbols.
1207 ctx.forEachSymtab(f: [&](SymbolTable &symtab) {
1208 if (!symtab.localImportChunks.empty()) {
1209 for (Chunk *c : symtab.localImportChunks)
1210 rdataSec->addChunk(c);
1211 }
1212 });
1213
1214 // Create Debug Information Chunks
1215 debugInfoSec = config->mingw ? buildidSec : rdataSec;
1216 if (config->buildIDHash != BuildIDHash::None || config->debug ||
1217 config->repro || config->cetCompat) {
1218 debugDirectory =
1219 make<DebugDirectoryChunk>(args&: ctx, args&: debugRecords, args&: config->repro);
1220 debugDirectory->setAlignment(4);
1221 debugInfoSec->addChunk(c: debugDirectory);
1222 }
1223
1224 if (config->debug || config->buildIDHash != BuildIDHash::None) {
1225 // Make a CVDebugRecordChunk even when /DEBUG:CV is not specified. We
1226 // output a PDB no matter what, and this chunk provides the only means of
1227 // allowing a debugger to match a PDB and an executable. So we need it even
1228 // if we're ultimately not going to write CodeView data to the PDB.
1229 buildId = make<CVDebugRecordChunk>(args&: ctx);
1230 debugRecords.emplace_back(args: COFF::IMAGE_DEBUG_TYPE_CODEVIEW, args&: buildId);
1231 ctx.forEachSymtab(f: [&](SymbolTable &symtab) {
1232 if (Symbol *buildidSym = symtab.findUnderscore(name: "__buildid"))
1233 replaceSymbol<DefinedSynthetic>(s: buildidSym, arg: buildidSym->getName(),
1234 arg&: buildId, arg: 4);
1235 });
1236 }
1237
1238 if (config->cetCompat) {
1239 debugRecords.emplace_back(args: COFF::IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS,
1240 args: make<ExtendedDllCharacteristicsChunk>(
1241 args: IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT));
1242 }
1243
1244 // Align and add each chunk referenced by the debug data directory.
1245 for (std::pair<COFF::DebugType, Chunk *> r : debugRecords) {
1246 r.second->setAlignment(4);
1247 debugInfoSec->addChunk(c: r.second);
1248 }
1249
1250 // Create SEH table. x86-only.
1251 if (config->safeSEH)
1252 createSEHTable();
1253
1254 // Create /guard:cf tables if requested.
1255 createGuardCFTables();
1256
1257 createECChunks();
1258
1259 if (config->autoImport)
1260 createRuntimePseudoRelocs();
1261
1262 if (config->mingw) {
1263 insertCtorDtorSymbols();
1264 insertBssDataStartEndSymbols();
1265 }
1266}
1267
1268// Create .idata section for the DLL-imported symbol table.
1269// The format of this section is inherently Windows-specific.
1270// IdataContents class abstracted away the details for us,
1271// so we just let it create chunks and add them to the section.
1272void Writer::createImportTables() {
1273 llvm::TimeTraceScope timeScope("Import tables");
1274 // Initialize DLLOrder so that import entries are ordered in
1275 // the same order as in the command line. (That affects DLL
1276 // initialization order, and this ordering is MSVC-compatible.)
1277 for (ImportFile *file : ctx.importFileInstances) {
1278 if (!file->live)
1279 continue;
1280
1281 std::string dll = StringRef(file->dllName).lower();
1282 ctx.config.dllOrder.try_emplace(k: dll, args: ctx.config.dllOrder.size());
1283
1284 if (file->impSym && !isa<DefinedImportData>(Val: file->impSym))
1285 Fatal(ctx) << file->symtab.printSymbol(sym: file->impSym) << " was replaced";
1286 DefinedImportData *impSym = cast_or_null<DefinedImportData>(Val: file->impSym);
1287 if (ctx.config.delayLoads.count(x: StringRef(file->dllName).lower())) {
1288 if (!file->thunkSym)
1289 Fatal(ctx) << "cannot delay-load " << toString(file)
1290 << " due to import of data: "
1291 << file->symtab.printSymbol(sym: impSym);
1292 delayIdata.add(sym: impSym);
1293 } else {
1294 idata.add(sym: impSym);
1295 }
1296 }
1297}
1298
1299void Writer::appendImportThunks() {
1300 if (ctx.importFileInstances.empty())
1301 return;
1302
1303 llvm::TimeTraceScope timeScope("Import thunks");
1304 for (ImportFile *file : ctx.importFileInstances) {
1305 if (!file->live)
1306 continue;
1307
1308 if (file->thunkSym) {
1309 if (!isa<DefinedImportThunk>(Val: file->thunkSym))
1310 Fatal(ctx) << file->symtab.printSymbol(sym: file->thunkSym)
1311 << " was replaced";
1312 auto *chunk = cast<DefinedImportThunk>(Val: file->thunkSym)->getChunk();
1313 if (chunk->live)
1314 textSec->addChunk(c: chunk);
1315 }
1316
1317 if (file->auxThunkSym) {
1318 if (!isa<DefinedImportThunk>(Val: file->auxThunkSym))
1319 Fatal(ctx) << file->symtab.printSymbol(sym: file->auxThunkSym)
1320 << " was replaced";
1321 auto *chunk = cast<DefinedImportThunk>(Val: file->auxThunkSym)->getChunk();
1322 if (chunk->live)
1323 textSec->addChunk(c: chunk);
1324 }
1325
1326 if (file->impchkThunk)
1327 textSec->addChunk(c: file->impchkThunk);
1328 }
1329
1330 if (!delayIdata.empty()) {
1331 delayIdata.create();
1332 for (Chunk *c : delayIdata.getChunks())
1333 didatSec->addChunk(c);
1334 for (Chunk *c : delayIdata.getDataChunks())
1335 dataSec->addChunk(c);
1336 for (Chunk *c : delayIdata.getCodeChunks())
1337 textSec->addChunk(c);
1338 for (Chunk *c : delayIdata.getCodePData())
1339 pdataSec->addChunk(c);
1340 for (Chunk *c : delayIdata.getAuxIatCopy())
1341 rdataSec->addChunk(c);
1342 for (Chunk *c : delayIdata.getCodeUnwindInfo())
1343 rdataSec->addChunk(c);
1344 }
1345}
1346
1347void Writer::createExportTable() {
1348 llvm::TimeTraceScope timeScope("Export table");
1349 if (!edataSec->chunks.empty()) {
1350 // Allow using a custom built export table from input object files, instead
1351 // of having the linker synthesize the tables.
1352 if (!ctx.hybridSymtab) {
1353 ctx.symtab.edataStart = edataSec->chunks.front();
1354 ctx.symtab.edataEnd = edataSec->chunks.back();
1355 } else {
1356 // On hybrid target, split EC and native chunks.
1357 llvm::stable_sort(Range&: edataSec->chunks, C: [=](const Chunk *a, const Chunk *b) {
1358 return (a->getMachine() != ARM64) < (b->getMachine() != ARM64);
1359 });
1360
1361 for (auto chunk : edataSec->chunks) {
1362 if (chunk->getMachine() != ARM64) {
1363 ctx.symtab.edataStart = chunk;
1364 ctx.symtab.edataEnd = edataSec->chunks.back();
1365 break;
1366 }
1367
1368 if (!ctx.hybridSymtab->edataStart)
1369 ctx.hybridSymtab->edataStart = chunk;
1370 ctx.hybridSymtab->edataEnd = chunk;
1371 }
1372 }
1373 }
1374 ctx.forEachActiveSymtab(f: [&](SymbolTable &symtab) {
1375 if (symtab.edataStart) {
1376 if (symtab.hadExplicitExports)
1377 Warn(ctx) << "literal .edata sections override exports";
1378 } else if (!symtab.exports.empty()) {
1379 std::vector<Chunk *> edataChunks;
1380 createEdataChunks(symtab, chunks&: edataChunks);
1381 for (Chunk *c : edataChunks)
1382 edataSec->addChunk(c);
1383 symtab.edataStart = edataChunks.front();
1384 symtab.edataEnd = edataChunks.back();
1385 }
1386
1387 // Warn on exported deleting destructor.
1388 for (auto e : symtab.exports)
1389 if (e.sym && e.sym->getName().starts_with(Prefix: "??_G"))
1390 Warn(ctx) << "export of deleting dtor: " << toString(ctx, b&: *e.sym);
1391 });
1392}
1393
1394void Writer::removeUnusedSections() {
1395 llvm::TimeTraceScope timeScope("Remove unused sections");
1396 // Remove sections that we can be sure won't get content, to avoid
1397 // allocating space for their section headers.
1398 auto isUnused = [this](OutputSection *s) {
1399 if (s == relocSec)
1400 return false; // This section is populated later.
1401 // MergeChunks have zero size at this point, as their size is finalized
1402 // later. Only remove sections that have no Chunks at all.
1403 return s->chunks.empty();
1404 };
1405 llvm::erase_if(C&: ctx.outputSections, P: isUnused);
1406}
1407
1408// The Windows loader doesn't seem to like empty sections,
1409// so we remove them if any.
1410void Writer::removeEmptySections() {
1411 llvm::TimeTraceScope timeScope("Remove empty sections");
1412 auto isEmpty = [](OutputSection *s) { return s->getVirtualSize() == 0; };
1413 llvm::erase_if(C&: ctx.outputSections, P: isEmpty);
1414}
1415
1416void Writer::assignOutputSectionIndices() {
1417 llvm::TimeTraceScope timeScope("Output sections indices");
1418 // Assign final output section indices, and assign each chunk to its output
1419 // section.
1420 uint32_t idx = 1;
1421 for (OutputSection *os : ctx.outputSections) {
1422 os->sectionIndex = idx;
1423 for (Chunk *c : os->chunks)
1424 c->setOutputSectionIdx(idx);
1425 ++idx;
1426 }
1427
1428 // Merge chunks are containers of chunks, so assign those an output section
1429 // too.
1430 for (MergeChunk *mc : ctx.mergeChunkInstances)
1431 if (mc)
1432 for (SectionChunk *sc : mc->sections)
1433 if (sc && sc->live)
1434 sc->setOutputSectionIdx(mc->getOutputSectionIdx());
1435}
1436
1437size_t Writer::addEntryToStringTable(StringRef str) {
1438 assert(str.size() > COFF::NameSize);
1439 size_t offsetOfEntry = strtab.size() + 4; // +4 for the size field
1440 strtab.insert(position: strtab.end(), first: str.begin(), last: str.end());
1441 strtab.push_back(x: '\0');
1442 return offsetOfEntry;
1443}
1444
1445std::optional<coff_symbol16> Writer::createSymbol(Defined *def) {
1446 coff_symbol16 sym;
1447 switch (def->kind()) {
1448 case Symbol::DefinedAbsoluteKind: {
1449 auto *da = dyn_cast<DefinedAbsolute>(Val: def);
1450 // Note: COFF symbol can only store 32-bit values, so 64-bit absolute
1451 // values will be truncated.
1452 sym.Value = da->getVA();
1453 sym.SectionNumber = IMAGE_SYM_ABSOLUTE;
1454 break;
1455 }
1456 default: {
1457 // Don't write symbols that won't be written to the output to the symbol
1458 // table.
1459 // We also try to write DefinedSynthetic as a normal symbol. Some of these
1460 // symbols do point to an actual chunk, like __safe_se_handler_table. Others
1461 // like __ImageBase are outside of sections and thus cannot be represented.
1462 Chunk *c = def->getChunk();
1463 if (!c)
1464 return std::nullopt;
1465 OutputSection *os = ctx.getOutputSection(c);
1466 if (!os)
1467 return std::nullopt;
1468
1469 sym.Value = def->getRVA() - os->getRVA();
1470 sym.SectionNumber = os->sectionIndex;
1471 break;
1472 }
1473 }
1474
1475 // Symbols that are runtime pseudo relocations don't point to the actual
1476 // symbol data itself (as they are imported), but points to the IAT entry
1477 // instead. Avoid emitting them to the symbol table, as they can confuse
1478 // debuggers.
1479 if (def->isRuntimePseudoReloc)
1480 return std::nullopt;
1481
1482 StringRef name = def->getName();
1483 if (name.size() > COFF::NameSize) {
1484 sym.Name.Offset.Zeroes = 0;
1485 sym.Name.Offset.Offset = addEntryToStringTable(str: name);
1486 } else {
1487 memset(s: sym.Name.ShortName, c: 0, n: COFF::NameSize);
1488 memcpy(dest: sym.Name.ShortName, src: name.data(), n: name.size());
1489 }
1490
1491 if (auto *d = dyn_cast<DefinedCOFF>(Val: def)) {
1492 COFFSymbolRef ref = d->getCOFFSymbol();
1493 sym.Type = ref.getType();
1494 sym.StorageClass = ref.getStorageClass();
1495 } else if (def->kind() == Symbol::DefinedImportThunkKind) {
1496 sym.Type = (IMAGE_SYM_DTYPE_FUNCTION << SCT_COMPLEX_TYPE_SHIFT) |
1497 IMAGE_SYM_TYPE_NULL;
1498 sym.StorageClass = IMAGE_SYM_CLASS_EXTERNAL;
1499 } else {
1500 sym.Type = IMAGE_SYM_TYPE_NULL;
1501 sym.StorageClass = IMAGE_SYM_CLASS_EXTERNAL;
1502 }
1503 sym.NumberOfAuxSymbols = 0;
1504 return sym;
1505}
1506
1507void Writer::createSymbolAndStringTable() {
1508 llvm::TimeTraceScope timeScope("Symbol and string table");
1509 // PE/COFF images are limited to 8 byte section names. Longer names can be
1510 // supported by writing a non-standard string table, but this string table is
1511 // not mapped at runtime and the long names will therefore be inaccessible.
1512 // link.exe always truncates section names to 8 bytes, whereas binutils always
1513 // preserves long section names via the string table. LLD adopts a hybrid
1514 // solution where discardable sections have long names preserved and
1515 // non-discardable sections have their names truncated, to ensure that any
1516 // section which is mapped at runtime also has its name mapped at runtime.
1517 for (OutputSection *sec : ctx.outputSections) {
1518 if (sec->name.size() <= COFF::NameSize)
1519 continue;
1520 if ((sec->header.Characteristics & IMAGE_SCN_MEM_DISCARDABLE) == 0)
1521 continue;
1522 if (ctx.config.warnLongSectionNames) {
1523 Warn(ctx)
1524 << "section name " << sec->name
1525 << " is longer than 8 characters and will use a non-standard string "
1526 "table";
1527 }
1528 sec->setStringTableOff(addEntryToStringTable(str: sec->name));
1529 }
1530
1531 if (ctx.config.writeSymtab) {
1532 for (ObjFile *file : ctx.objFileInstances) {
1533 for (Symbol *b : file->getSymbols()) {
1534 auto *d = dyn_cast_or_null<Defined>(Val: b);
1535 if (!d || d->writtenToSymtab)
1536 continue;
1537 d->writtenToSymtab = true;
1538 if (auto *dc = dyn_cast_or_null<DefinedCOFF>(Val: d)) {
1539 COFFSymbolRef symRef = dc->getCOFFSymbol();
1540 if (symRef.isSectionDefinition() ||
1541 symRef.getStorageClass() == COFF::IMAGE_SYM_CLASS_LABEL)
1542 continue;
1543 }
1544
1545 if (std::optional<coff_symbol16> sym = createSymbol(def: d))
1546 outputSymtab.push_back(x: *sym);
1547
1548 if (auto *dthunk = dyn_cast<DefinedImportThunk>(Val: d)) {
1549 if (!dthunk->wrappedSym->writtenToSymtab) {
1550 dthunk->wrappedSym->writtenToSymtab = true;
1551 if (std::optional<coff_symbol16> sym =
1552 createSymbol(def: dthunk->wrappedSym))
1553 outputSymtab.push_back(x: *sym);
1554 }
1555 }
1556 }
1557 }
1558 }
1559
1560 if (outputSymtab.empty() && strtab.empty())
1561 return;
1562
1563 // We position the symbol table to be adjacent to the end of the last section.
1564 uint64_t fileOff = fileSize;
1565 pointerToSymbolTable = fileOff;
1566 fileOff += outputSymtab.size() * sizeof(coff_symbol16);
1567 fileOff += 4 + strtab.size();
1568 fileSize = alignTo(Value: fileOff, Align: ctx.config.fileAlign);
1569}
1570
1571void Writer::mergeSection(const std::map<StringRef, StringRef>::value_type &p) {
1572 StringRef toName = p.second;
1573 if (p.first == toName)
1574 return;
1575 StringSet<> names;
1576 while (true) {
1577 if (!names.insert(key: toName).second)
1578 Fatal(ctx) << "/merge: cycle found for section '" << p.first << "'";
1579 auto i = ctx.config.merge.find(x: toName);
1580 if (i == ctx.config.merge.end())
1581 break;
1582 toName = i->second;
1583 }
1584 OutputSection *from = findSection(name: p.first);
1585 OutputSection *to = findSection(name: toName);
1586 if (!from)
1587 return;
1588 if (!to) {
1589 from->name = toName;
1590 return;
1591 }
1592 to->merge(other: from);
1593}
1594
1595void Writer::mergeSections() {
1596 llvm::TimeTraceScope timeScope("Merge sections");
1597 if (!pdataSec->chunks.empty()) {
1598 if (isArm64EC(Machine: ctx.config.machine)) {
1599 // On ARM64EC .pdata may contain both ARM64 and X64 data. Split them by
1600 // sorting and store their regions separately.
1601 llvm::stable_sort(Range&: pdataSec->chunks, C: [=](const Chunk *a, const Chunk *b) {
1602 return (a->getMachine() == AMD64) < (b->getMachine() == AMD64);
1603 });
1604
1605 for (auto chunk : pdataSec->chunks) {
1606 if (chunk->getMachine() == AMD64) {
1607 hybridPdata.first = chunk;
1608 hybridPdata.last = pdataSec->chunks.back();
1609 break;
1610 }
1611
1612 if (!pdata.first)
1613 pdata.first = chunk;
1614 pdata.last = chunk;
1615 }
1616 } else {
1617 pdata.first = pdataSec->chunks.front();
1618 pdata.last = pdataSec->chunks.back();
1619 }
1620 }
1621
1622 for (auto &p : ctx.config.merge) {
1623 if (p.first != ".bss")
1624 mergeSection(p);
1625 }
1626
1627 // Because .bss contains all zeros, it should be merged at the end of
1628 // whatever section it is being merged into (usually .data) so that the image
1629 // need not actually contain all of the zeros.
1630 auto it = ctx.config.merge.find(x: ".bss");
1631 if (it != ctx.config.merge.end())
1632 mergeSection(p: *it);
1633}
1634
1635// EC targets may have chunks of various architectures mixed together at this
1636// point. Group code chunks of the same architecture together by sorting chunks
1637// by their EC range type.
1638void Writer::sortECChunks() {
1639 if (!isArm64EC(Machine: ctx.config.machine))
1640 return;
1641
1642 for (OutputSection *sec : ctx.outputSections) {
1643 if (sec->isCodeSection())
1644 llvm::stable_sort(Range&: sec->chunks, C: [=](const Chunk *a, const Chunk *b) {
1645 std::optional<chpe_range_type> aType = a->getArm64ECRangeType(),
1646 bType = b->getArm64ECRangeType();
1647 return bType && (!aType || *aType < *bType);
1648 });
1649 }
1650}
1651
1652// Visits all sections to assign incremental, non-overlapping RVAs and
1653// file offsets.
1654void Writer::assignAddresses() {
1655 llvm::TimeTraceScope timeScope("Assign addresses");
1656 Configuration *config = &ctx.config;
1657
1658 // We need to create EC code map so that ECCodeMapChunk knows its size.
1659 // We do it here to make sure that we account for range extension chunks.
1660 createECCodeMap();
1661
1662 sizeOfHeaders = dosStubSize + sizeof(PEMagic) + sizeof(coff_file_header) +
1663 sizeof(data_directory) * numberOfDataDirectory +
1664 sizeof(coff_section) * ctx.outputSections.size();
1665 sizeOfHeaders +=
1666 config->is64() ? sizeof(pe32plus_header) : sizeof(pe32_header);
1667 sizeOfHeaders = alignTo(Value: sizeOfHeaders, Align: config->fileAlign);
1668 fileSize = sizeOfHeaders;
1669
1670 // The first page is kept unmapped.
1671 uint64_t rva = alignTo(Value: sizeOfHeaders, Align: config->align);
1672
1673 for (OutputSection *sec : ctx.outputSections) {
1674 llvm::TimeTraceScope timeScope("Section: ", sec->name);
1675 if (sec == relocSec) {
1676 sec->chunks.clear();
1677 addBaserels();
1678 if (ctx.dynamicRelocs) {
1679 ctx.dynamicRelocs->finalize();
1680 relocSec->addChunk(c: ctx.dynamicRelocs);
1681 }
1682 }
1683 uint64_t rawSize = 0, virtualSize = 0;
1684 sec->header.VirtualAddress = rva;
1685
1686 // If /FUNCTIONPADMIN is used, functions are padded in order to create a
1687 // hotpatchable image.
1688 uint32_t padding = sec->isCodeSection() ? config->functionPadMin : 0;
1689 std::optional<chpe_range_type> prevECRange;
1690
1691 for (Chunk *c : sec->chunks) {
1692 // Alignment EC code range baudaries.
1693 if (isArm64EC(Machine: ctx.config.machine) && sec->isCodeSection()) {
1694 std::optional<chpe_range_type> rangeType = c->getArm64ECRangeType();
1695 if (rangeType != prevECRange) {
1696 virtualSize = alignTo(Value: virtualSize, Align: 4096);
1697 prevECRange = rangeType;
1698 }
1699 }
1700 if (padding && c->isHotPatchable())
1701 virtualSize += padding;
1702 // If chunk has EC entry thunk, reserve a space for an offset to the
1703 // thunk.
1704 if (c->getEntryThunk())
1705 virtualSize += sizeof(uint32_t);
1706 virtualSize = alignTo(Value: virtualSize, Align: c->getAlignment());
1707 c->setRVA(rva + virtualSize);
1708 virtualSize += c->getSize();
1709 if (c->hasData)
1710 rawSize = alignTo(Value: virtualSize, Align: config->fileAlign);
1711 }
1712 if (virtualSize > UINT32_MAX)
1713 Err(ctx) << "section larger than 4 GiB: " << sec->name;
1714 sec->header.VirtualSize = virtualSize;
1715 sec->header.SizeOfRawData = rawSize;
1716 if (rawSize != 0)
1717 sec->header.PointerToRawData = fileSize;
1718 rva += alignTo(Value: virtualSize, Align: config->align);
1719 fileSize += alignTo(Value: rawSize, Align: config->fileAlign);
1720 }
1721 sizeOfImage = alignTo(Value: rva, Align: config->align);
1722
1723 // Assign addresses to sections in MergeChunks.
1724 for (MergeChunk *mc : ctx.mergeChunkInstances)
1725 if (mc)
1726 mc->assignSubsectionRVAs();
1727}
1728
1729template <typename PEHeaderTy> void Writer::writeHeader() {
1730 // Write DOS header. For backwards compatibility, the first part of a PE/COFF
1731 // executable consists of an MS-DOS MZ executable. If the executable is run
1732 // under DOS, that program gets run (usually to just print an error message).
1733 // When run under Windows, the loader looks at AddressOfNewExeHeader and uses
1734 // the PE header instead.
1735 Configuration *config = &ctx.config;
1736
1737 uint8_t *buf = buffer->getBufferStart();
1738 auto *dos = reinterpret_cast<dos_header *>(buf);
1739
1740 // Write DOS program.
1741 if (config->dosStub) {
1742 memcpy(dest: buf, src: config->dosStub->getBufferStart(),
1743 n: config->dosStub->getBufferSize());
1744 // MS link.exe accepts an invalid `e_lfanew` (AddressOfNewExeHeader) and
1745 // updates it automatically. Replicate the same behaviour.
1746 dos->AddressOfNewExeHeader = alignTo(Value: config->dosStub->getBufferSize(), Align: 8);
1747 // Unlike MS link.exe, LLD accepts non-8-byte-aligned stubs.
1748 // In that case, we add zero paddings ourselves.
1749 buf += alignTo(Value: config->dosStub->getBufferSize(), Align: 8);
1750 } else {
1751 buf += sizeof(dos_header);
1752 dos->Magic[0] = 'M';
1753 dos->Magic[1] = 'Z';
1754 dos->UsedBytesInTheLastPage = dosStubSize % 512;
1755 dos->FileSizeInPages = divideCeil(Numerator: dosStubSize, Denominator: 512);
1756 dos->HeaderSizeInParagraphs = sizeof(dos_header) / 16;
1757
1758 dos->AddressOfRelocationTable = sizeof(dos_header);
1759 dos->AddressOfNewExeHeader = dosStubSize;
1760
1761 memcpy(dest: buf, src: dosProgram, n: sizeof(dosProgram));
1762 buf += sizeof(dosProgram);
1763 }
1764
1765 // Make sure DOS stub is aligned to 8 bytes at this point
1766 assert((buf - buffer->getBufferStart()) % 8 == 0);
1767
1768 // Write PE magic
1769 memcpy(dest: buf, src: PEMagic, n: sizeof(PEMagic));
1770 buf += sizeof(PEMagic);
1771
1772 // Write COFF header
1773 assert(coffHeaderOffset == buf - buffer->getBufferStart());
1774 auto *coff = reinterpret_cast<coff_file_header *>(buf);
1775 buf += sizeof(*coff);
1776 SymbolTable &symtab =
1777 ctx.config.machine == ARM64X ? *ctx.hybridSymtab : ctx.symtab;
1778 coff->Machine = symtab.isEC() ? AMD64 : symtab.machine;
1779 coff->NumberOfSections = ctx.outputSections.size();
1780 coff->Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE;
1781 if (config->largeAddressAware)
1782 coff->Characteristics |= IMAGE_FILE_LARGE_ADDRESS_AWARE;
1783 if (!config->is64())
1784 coff->Characteristics |= IMAGE_FILE_32BIT_MACHINE;
1785 if (config->dll)
1786 coff->Characteristics |= IMAGE_FILE_DLL;
1787 if (config->driverUponly)
1788 coff->Characteristics |= IMAGE_FILE_UP_SYSTEM_ONLY;
1789 if (!config->relocatable)
1790 coff->Characteristics |= IMAGE_FILE_RELOCS_STRIPPED;
1791 if (config->swaprunCD)
1792 coff->Characteristics |= IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP;
1793 if (config->swaprunNet)
1794 coff->Characteristics |= IMAGE_FILE_NET_RUN_FROM_SWAP;
1795 coff->SizeOfOptionalHeader =
1796 sizeof(PEHeaderTy) + sizeof(data_directory) * numberOfDataDirectory;
1797
1798 // Write PE header
1799 assert(peHeaderOffset == buf - buffer->getBufferStart());
1800 auto *pe = reinterpret_cast<PEHeaderTy *>(buf);
1801 buf += sizeof(*pe);
1802 pe->Magic = config->is64() ? PE32Header::PE32_PLUS : PE32Header::PE32;
1803
1804 // If {Major,Minor}LinkerVersion is left at 0.0, then for some
1805 // reason signing the resulting PE file with Authenticode produces a
1806 // signature that fails to validate on Windows 7 (but is OK on 10).
1807 // Set it to 14.0, which is what VS2015 outputs, and which avoids
1808 // that problem.
1809 pe->MajorLinkerVersion = 14;
1810 pe->MinorLinkerVersion = 0;
1811
1812 pe->ImageBase = config->imageBase;
1813 pe->SectionAlignment = config->align;
1814 pe->FileAlignment = config->fileAlign;
1815 pe->MajorImageVersion = config->majorImageVersion;
1816 pe->MinorImageVersion = config->minorImageVersion;
1817 pe->MajorOperatingSystemVersion = config->majorOSVersion;
1818 pe->MinorOperatingSystemVersion = config->minorOSVersion;
1819 pe->MajorSubsystemVersion = config->majorSubsystemVersion;
1820 pe->MinorSubsystemVersion = config->minorSubsystemVersion;
1821 pe->Subsystem = config->subsystem;
1822 pe->SizeOfImage = sizeOfImage;
1823 pe->SizeOfHeaders = sizeOfHeaders;
1824 if (!config->noEntry) {
1825 Defined *entry = cast<Defined>(Val: symtab.entry);
1826 pe->AddressOfEntryPoint = entry->getRVA();
1827 // Pointer to thumb code must have the LSB set, so adjust it.
1828 if (config->machine == ARMNT)
1829 pe->AddressOfEntryPoint |= 1;
1830 }
1831 pe->SizeOfStackReserve = config->stackReserve;
1832 pe->SizeOfStackCommit = config->stackCommit;
1833 pe->SizeOfHeapReserve = config->heapReserve;
1834 pe->SizeOfHeapCommit = config->heapCommit;
1835 if (config->appContainer)
1836 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_APPCONTAINER;
1837 if (config->driverWdm)
1838 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER;
1839 if (config->dynamicBase)
1840 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE;
1841 if (config->highEntropyVA)
1842 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA;
1843 if (!config->allowBind)
1844 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_BIND;
1845 if (config->nxCompat)
1846 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NX_COMPAT;
1847 if (!config->allowIsolation)
1848 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION;
1849 if (config->guardCF != GuardCFLevel::Off)
1850 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_GUARD_CF;
1851 if (config->integrityCheck)
1852 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY;
1853 if (setNoSEHCharacteristic || config->noSEH)
1854 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_NO_SEH;
1855 if (config->terminalServerAware)
1856 pe->DLLCharacteristics |= IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE;
1857 pe->NumberOfRvaAndSize = numberOfDataDirectory;
1858 if (textSec->getVirtualSize()) {
1859 pe->BaseOfCode = textSec->getRVA();
1860 pe->SizeOfCode = textSec->getRawSize();
1861 }
1862 pe->SizeOfInitializedData = getSizeOfInitializedData();
1863
1864 // Write data directory
1865 assert(!ctx.config.is64() ||
1866 dataDirOffset64 == buf - buffer->getBufferStart());
1867 auto *dir = reinterpret_cast<data_directory *>(buf);
1868 buf += sizeof(*dir) * numberOfDataDirectory;
1869 if (symtab.edataStart) {
1870 dir[EXPORT_TABLE].RelativeVirtualAddress = symtab.edataStart->getRVA();
1871 dir[EXPORT_TABLE].Size = symtab.edataEnd->getRVA() +
1872 symtab.edataEnd->getSize() -
1873 symtab.edataStart->getRVA();
1874 }
1875 if (importTableStart) {
1876 dir[IMPORT_TABLE].RelativeVirtualAddress = importTableStart->getRVA();
1877 dir[IMPORT_TABLE].Size = importTableSize;
1878 }
1879 if (iatStart) {
1880 dir[IAT].RelativeVirtualAddress = iatStart->getRVA();
1881 dir[IAT].Size = iatSize;
1882 }
1883 if (rsrcSec->getVirtualSize()) {
1884 dir[RESOURCE_TABLE].RelativeVirtualAddress = rsrcSec->getRVA();
1885 dir[RESOURCE_TABLE].Size = rsrcSec->getVirtualSize();
1886 }
1887 // ARM64EC (but not ARM64X) contains x86_64 exception table in data directory.
1888 ChunkRange &exceptionTable =
1889 ctx.config.machine == ARM64EC ? hybridPdata : pdata;
1890 if (exceptionTable.first) {
1891 dir[EXCEPTION_TABLE].RelativeVirtualAddress =
1892 exceptionTable.first->getRVA();
1893 dir[EXCEPTION_TABLE].Size = exceptionTable.last->getRVA() +
1894 exceptionTable.last->getSize() -
1895 exceptionTable.first->getRVA();
1896 }
1897 size_t relocSize = relocSec->getVirtualSize();
1898 if (ctx.dynamicRelocs)
1899 relocSize -= ctx.dynamicRelocs->getSize();
1900 if (relocSize) {
1901 dir[BASE_RELOCATION_TABLE].RelativeVirtualAddress = relocSec->getRVA();
1902 dir[BASE_RELOCATION_TABLE].Size = relocSize;
1903 }
1904 if (Symbol *sym = symtab.findUnderscore(name: "_tls_used")) {
1905 if (Defined *b = dyn_cast<Defined>(Val: sym)) {
1906 dir[TLS_TABLE].RelativeVirtualAddress = b->getRVA();
1907 dir[TLS_TABLE].Size = config->is64()
1908 ? sizeof(object::coff_tls_directory64)
1909 : sizeof(object::coff_tls_directory32);
1910 }
1911 }
1912 if (debugDirectory) {
1913 dir[DEBUG_DIRECTORY].RelativeVirtualAddress = debugDirectory->getRVA();
1914 dir[DEBUG_DIRECTORY].Size = debugDirectory->getSize();
1915 }
1916 if (symtab.loadConfigSym) {
1917 dir[LOAD_CONFIG_TABLE].RelativeVirtualAddress =
1918 symtab.loadConfigSym->getRVA();
1919 dir[LOAD_CONFIG_TABLE].Size = symtab.loadConfigSize;
1920 }
1921 if (!delayIdata.empty()) {
1922 dir[DELAY_IMPORT_DESCRIPTOR].RelativeVirtualAddress =
1923 delayIdata.getDirRVA();
1924 dir[DELAY_IMPORT_DESCRIPTOR].Size = delayIdata.getDirSize();
1925 }
1926
1927 // Write section table
1928 for (OutputSection *sec : ctx.outputSections) {
1929 sec->writeHeaderTo(buf, isDebug: config->debug);
1930 buf += sizeof(coff_section);
1931 }
1932 sectionTable = ArrayRef<uint8_t>(
1933 buf - ctx.outputSections.size() * sizeof(coff_section), buf);
1934
1935 if (outputSymtab.empty() && strtab.empty())
1936 return;
1937
1938 coff->PointerToSymbolTable = pointerToSymbolTable;
1939 uint32_t numberOfSymbols = outputSymtab.size();
1940 coff->NumberOfSymbols = numberOfSymbols;
1941 auto *symbolTable = reinterpret_cast<coff_symbol16 *>(
1942 buffer->getBufferStart() + coff->PointerToSymbolTable);
1943 for (size_t i = 0; i != numberOfSymbols; ++i)
1944 symbolTable[i] = outputSymtab[i];
1945 // Create the string table, it follows immediately after the symbol table.
1946 // The first 4 bytes is length including itself.
1947 buf = reinterpret_cast<uint8_t *>(&symbolTable[numberOfSymbols]);
1948 write32le(P: buf, V: strtab.size() + 4);
1949 if (!strtab.empty())
1950 memcpy(dest: buf + 4, src: strtab.data(), n: strtab.size());
1951}
1952
1953void Writer::openFile(StringRef path) {
1954 buffer = CHECK(
1955 FileOutputBuffer::create(path, fileSize, FileOutputBuffer::F_executable),
1956 "failed to open " + path);
1957}
1958
1959void Writer::createSEHTable() {
1960 SymbolRVASet handlers;
1961 for (ObjFile *file : ctx.objFileInstances) {
1962 if (!file->hasSafeSEH())
1963 Err(ctx) << "/safeseh: " << file->getName()
1964 << " is not compatible with SEH";
1965 markSymbolsForRVATable(file, symIdxChunks: file->getSXDataChunks(), tableSymbols&: handlers);
1966 }
1967
1968 // Set the "no SEH" characteristic if there really were no handlers, or if
1969 // there is no load config object to point to the table of handlers.
1970 setNoSEHCharacteristic =
1971 handlers.empty() || !ctx.symtab.findUnderscore(name: "_load_config_used");
1972
1973 maybeAddRVATable(tableSymbols: std::move(handlers), tableSym: "__safe_se_handler_table",
1974 countSym: "__safe_se_handler_count");
1975}
1976
1977// Add a symbol to an RVA set. Two symbols may have the same RVA, but an RVA set
1978// cannot contain duplicates. Therefore, the set is uniqued by Chunk and the
1979// symbol's offset into that Chunk.
1980static void addSymbolToRVASet(SymbolRVASet &rvaSet, Defined *s) {
1981 Chunk *c = s->getChunk();
1982 if (!c)
1983 return;
1984 if (auto *sc = dyn_cast<SectionChunk>(Val: c))
1985 c = sc->repl; // Look through ICF replacement.
1986 uint32_t off = s->getRVA() - (c ? c->getRVA() : 0);
1987 rvaSet.insert(V: {.inputChunk: c, .offset: off});
1988}
1989
1990// Given a symbol, add it to the GFIDs table if it is a live, defined, function
1991// symbol in an executable section.
1992static void maybeAddAddressTakenFunction(SymbolRVASet &addressTakenSyms,
1993 Symbol *s) {
1994 if (!s)
1995 return;
1996
1997 switch (s->kind()) {
1998 case Symbol::DefinedLocalImportKind:
1999 case Symbol::DefinedImportDataKind:
2000 // Defines an __imp_ pointer, so it is data, so it is ignored.
2001 break;
2002 case Symbol::DefinedCommonKind:
2003 // Common is always data, so it is ignored.
2004 break;
2005 case Symbol::DefinedAbsoluteKind:
2006 // Absolute is never code, synthetic generally isn't and usually isn't
2007 // determinable.
2008 break;
2009 case Symbol::DefinedSyntheticKind:
2010 // For EC export thunks, mark both the thunk itself and its target.
2011 if (auto expChunk = dyn_cast_or_null<ECExportThunkChunk>(
2012 Val: cast<Defined>(Val: s)->getChunk())) {
2013 addSymbolToRVASet(rvaSet&: addressTakenSyms, s: cast<Defined>(Val: s));
2014 addSymbolToRVASet(rvaSet&: addressTakenSyms, s: expChunk->target);
2015 }
2016 break;
2017 case Symbol::LazyArchiveKind:
2018 case Symbol::LazyObjectKind:
2019 case Symbol::LazyDLLSymbolKind:
2020 case Symbol::UndefinedKind:
2021 // Undefined symbols resolve to zero, so they don't have an RVA. Lazy
2022 // symbols shouldn't have relocations.
2023 break;
2024
2025 case Symbol::DefinedImportThunkKind:
2026 // Thunks are always code, include them.
2027 addSymbolToRVASet(rvaSet&: addressTakenSyms, s: cast<Defined>(Val: s));
2028 break;
2029
2030 case Symbol::DefinedRegularKind: {
2031 // This is a regular, defined, symbol from a COFF file. Mark the symbol as
2032 // address taken if the symbol type is function and it's in an executable
2033 // section.
2034 auto *d = cast<DefinedRegular>(Val: s);
2035 if (d->getCOFFSymbol().getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION) {
2036 SectionChunk *sc = dyn_cast<SectionChunk>(Val: d->getChunk());
2037 if (sc && sc->live &&
2038 sc->getOutputCharacteristics() & IMAGE_SCN_MEM_EXECUTE)
2039 addSymbolToRVASet(rvaSet&: addressTakenSyms, s: d);
2040 }
2041 break;
2042 }
2043 }
2044}
2045
2046// Visit all relocations from all section contributions of this object file and
2047// mark the relocation target as address-taken.
2048void Writer::markSymbolsWithRelocations(ObjFile *file,
2049 SymbolRVASet &usedSymbols) {
2050 for (Chunk *c : file->getChunks()) {
2051 // We only care about live section chunks. Common chunks and other chunks
2052 // don't generally contain relocations.
2053 SectionChunk *sc = dyn_cast<SectionChunk>(Val: c);
2054 if (!sc || !sc->live)
2055 continue;
2056
2057 for (const coff_relocation &reloc : sc->getRelocs()) {
2058 if (ctx.config.machine == I386 &&
2059 reloc.Type == COFF::IMAGE_REL_I386_REL32)
2060 // Ignore relative relocations on x86. On x86_64 they can't be ignored
2061 // since they're also used to compute absolute addresses.
2062 continue;
2063
2064 Symbol *ref = sc->file->getSymbol(symbolIndex: reloc.SymbolTableIndex);
2065 maybeAddAddressTakenFunction(addressTakenSyms&: usedSymbols, s: ref);
2066 }
2067 }
2068}
2069
2070// Create the guard function id table. This is a table of RVAs of all
2071// address-taken functions. It is sorted and uniqued, just like the safe SEH
2072// table.
2073void Writer::createGuardCFTables() {
2074 Configuration *config = &ctx.config;
2075
2076 if (config->guardCF == GuardCFLevel::Off) {
2077 // MSVC marks the entire image as instrumented if any input object was built
2078 // with /guard:cf.
2079 for (ObjFile *file : ctx.objFileInstances) {
2080 if (file->hasGuardCF()) {
2081 ctx.forEachSymtab(f: [&](SymbolTable &symtab) {
2082 Symbol *flagSym = symtab.findUnderscore(name: "__guard_flags");
2083 cast<DefinedAbsolute>(Val: flagSym)->setVA(
2084 uint32_t(GuardFlags::CF_INSTRUMENTED));
2085 });
2086 break;
2087 }
2088 }
2089 return;
2090 }
2091
2092 SymbolRVASet addressTakenSyms;
2093 SymbolRVASet giatsRVASet;
2094 std::vector<Symbol *> giatsSymbols;
2095 SymbolRVASet longJmpTargets;
2096 SymbolRVASet ehContTargets;
2097 for (ObjFile *file : ctx.objFileInstances) {
2098 // If the object was compiled with /guard:cf, the address taken symbols
2099 // are in .gfids$y sections, and the longjmp targets are in .gljmp$y
2100 // sections. If the object was not compiled with /guard:cf, we assume there
2101 // were no setjmp targets, and that all code symbols with relocations are
2102 // possibly address-taken.
2103 if (file->hasGuardCF()) {
2104 markSymbolsForRVATable(file, symIdxChunks: file->getGuardFidChunks(), tableSymbols&: addressTakenSyms);
2105 markSymbolsForRVATable(file, symIdxChunks: file->getGuardIATChunks(), tableSymbols&: giatsRVASet);
2106 getSymbolsFromSections(file, symIdxChunks: file->getGuardIATChunks(), symbols&: giatsSymbols);
2107 markSymbolsForRVATable(file, symIdxChunks: file->getGuardLJmpChunks(), tableSymbols&: longJmpTargets);
2108 } else {
2109 markSymbolsWithRelocations(file, usedSymbols&: addressTakenSyms);
2110 }
2111 // If the object was compiled with /guard:ehcont, the ehcont targets are in
2112 // .gehcont$y sections.
2113 if (file->hasGuardEHCont())
2114 markSymbolsForRVATable(file, symIdxChunks: file->getGuardEHContChunks(), tableSymbols&: ehContTargets);
2115 }
2116
2117 // Mark the image entry as address-taken.
2118 ctx.forEachSymtab(f: [&](SymbolTable &symtab) {
2119 if (symtab.entry)
2120 maybeAddAddressTakenFunction(addressTakenSyms, s: symtab.entry);
2121
2122 // Mark exported symbols in executable sections as address-taken.
2123 for (Export &e : symtab.exports)
2124 maybeAddAddressTakenFunction(addressTakenSyms, s: e.sym);
2125 });
2126
2127 // For each entry in the .giats table, check if it has a corresponding load
2128 // thunk (e.g. because the DLL that defines it will be delay-loaded) and, if
2129 // so, add the load thunk to the address taken (.gfids) table.
2130 for (Symbol *s : giatsSymbols) {
2131 if (auto *di = dyn_cast<DefinedImportData>(Val: s)) {
2132 if (di->loadThunkSym)
2133 addSymbolToRVASet(rvaSet&: addressTakenSyms, s: di->loadThunkSym);
2134 }
2135 }
2136
2137 // Ensure sections referenced in the gfid table are 16-byte aligned.
2138 for (const ChunkAndOffset &c : addressTakenSyms)
2139 if (c.inputChunk->getAlignment() < 16)
2140 c.inputChunk->setAlignment(16);
2141
2142 maybeAddRVATable(tableSymbols: std::move(addressTakenSyms), tableSym: "__guard_fids_table",
2143 countSym: "__guard_fids_count");
2144
2145 // Add the Guard Address Taken IAT Entry Table (.giats).
2146 maybeAddRVATable(tableSymbols: std::move(giatsRVASet), tableSym: "__guard_iat_table",
2147 countSym: "__guard_iat_count");
2148
2149 // Add the longjmp target table unless the user told us not to.
2150 if (config->guardCF & GuardCFLevel::LongJmp)
2151 maybeAddRVATable(tableSymbols: std::move(longJmpTargets), tableSym: "__guard_longjmp_table",
2152 countSym: "__guard_longjmp_count");
2153
2154 // Add the ehcont target table unless the user told us not to.
2155 if (config->guardCF & GuardCFLevel::EHCont)
2156 maybeAddRVATable(tableSymbols: std::move(ehContTargets), tableSym: "__guard_eh_cont_table",
2157 countSym: "__guard_eh_cont_count");
2158
2159 // Set __guard_flags, which will be used in the load config to indicate that
2160 // /guard:cf was enabled.
2161 uint32_t guardFlags = uint32_t(GuardFlags::CF_INSTRUMENTED) |
2162 uint32_t(GuardFlags::CF_FUNCTION_TABLE_PRESENT);
2163 if (config->guardCF & GuardCFLevel::LongJmp)
2164 guardFlags |= uint32_t(GuardFlags::CF_LONGJUMP_TABLE_PRESENT);
2165 if (config->guardCF & GuardCFLevel::EHCont)
2166 guardFlags |= uint32_t(GuardFlags::EH_CONTINUATION_TABLE_PRESENT);
2167 ctx.forEachSymtab(f: [guardFlags](SymbolTable &symtab) {
2168 Symbol *flagSym = symtab.findUnderscore(name: "__guard_flags");
2169 cast<DefinedAbsolute>(Val: flagSym)->setVA(guardFlags);
2170 });
2171}
2172
2173// Take a list of input sections containing symbol table indices and add those
2174// symbols to a vector. The challenge is that symbol RVAs are not known and
2175// depend on the table size, so we can't directly build a set of integers.
2176void Writer::getSymbolsFromSections(ObjFile *file,
2177 ArrayRef<SectionChunk *> symIdxChunks,
2178 std::vector<Symbol *> &symbols) {
2179 for (SectionChunk *c : symIdxChunks) {
2180 // Skip sections discarded by linker GC. This comes up when a .gfids section
2181 // is associated with something like a vtable and the vtable is discarded.
2182 // In this case, the associated gfids section is discarded, and we don't
2183 // mark the virtual member functions as address-taken by the vtable.
2184 if (!c->live)
2185 continue;
2186
2187 // Validate that the contents look like symbol table indices.
2188 ArrayRef<uint8_t> data = c->getContents();
2189 if (data.size() % 4 != 0) {
2190 Warn(ctx) << "ignoring " << c->getSectionName()
2191 << " symbol table index section in object " << file;
2192 continue;
2193 }
2194
2195 // Read each symbol table index and check if that symbol was included in the
2196 // final link. If so, add it to the vector of symbols.
2197 ArrayRef<ulittle32_t> symIndices(
2198 reinterpret_cast<const ulittle32_t *>(data.data()), data.size() / 4);
2199 ArrayRef<Symbol *> objSymbols = file->getSymbols();
2200 for (uint32_t symIndex : symIndices) {
2201 if (symIndex >= objSymbols.size()) {
2202 Warn(ctx) << "ignoring invalid symbol table index in section "
2203 << c->getSectionName() << " in object " << file;
2204 continue;
2205 }
2206 if (Symbol *s = objSymbols[symIndex]) {
2207 if (s->isLive())
2208 symbols.push_back(x: cast<Symbol>(Val: s));
2209 }
2210 }
2211 }
2212}
2213
2214// Take a list of input sections containing symbol table indices and add those
2215// symbols to an RVA table.
2216void Writer::markSymbolsForRVATable(ObjFile *file,
2217 ArrayRef<SectionChunk *> symIdxChunks,
2218 SymbolRVASet &tableSymbols) {
2219 std::vector<Symbol *> syms;
2220 getSymbolsFromSections(file, symIdxChunks, symbols&: syms);
2221
2222 for (Symbol *s : syms)
2223 addSymbolToRVASet(rvaSet&: tableSymbols, s: cast<Defined>(Val: s));
2224}
2225
2226// Replace the absolute table symbol with a synthetic symbol pointing to
2227// tableChunk so that we can emit base relocations for it and resolve section
2228// relative relocations.
2229void Writer::maybeAddRVATable(SymbolRVASet tableSymbols, StringRef tableSym,
2230 StringRef countSym, bool hasFlag) {
2231 if (tableSymbols.empty())
2232 return;
2233
2234 NonSectionChunk *tableChunk;
2235 if (hasFlag)
2236 tableChunk = make<RVAFlagTableChunk>(args: std::move(tableSymbols));
2237 else
2238 tableChunk = make<RVATableChunk>(args: std::move(tableSymbols));
2239 rdataSec->addChunk(c: tableChunk);
2240
2241 ctx.forEachSymtab(f: [&](SymbolTable &symtab) {
2242 Symbol *t = symtab.findUnderscore(name: tableSym);
2243 Symbol *c = symtab.findUnderscore(name: countSym);
2244 replaceSymbol<DefinedSynthetic>(s: t, arg: t->getName(), arg&: tableChunk);
2245 cast<DefinedAbsolute>(Val: c)->setVA(tableChunk->getSize() / (hasFlag ? 5 : 4));
2246 });
2247}
2248
2249// Create CHPE metadata chunks.
2250void Writer::createECChunks() {
2251 if (!ctx.symtab.isEC())
2252 return;
2253
2254 for (Symbol *s : ctx.symtab.expSymbols) {
2255 auto sym = dyn_cast<Defined>(Val: s);
2256 if (!sym || !sym->getChunk())
2257 continue;
2258 if (auto thunk = dyn_cast<ECExportThunkChunk>(Val: sym->getChunk())) {
2259 hexpthkSec->addChunk(c: thunk);
2260 exportThunks.push_back(x: {thunk, thunk->target});
2261 } else if (auto def = dyn_cast<DefinedRegular>(Val: sym)) {
2262 // Allow section chunk to be treated as an export thunk if it looks like
2263 // one.
2264 SectionChunk *chunk = def->getChunk();
2265 if (!chunk->live || chunk->getMachine() != AMD64)
2266 continue;
2267 assert(sym->getName().starts_with("EXP+"));
2268 StringRef targetName = sym->getName().substr(Start: strlen(s: "EXP+"));
2269 // If EXP+#foo is an export thunk of a hybrid patchable function,
2270 // we should use the #foo$hp_target symbol as the redirection target.
2271 // First, try to look up the $hp_target symbol. If it can't be found,
2272 // assume it's a regular function and look for #foo instead.
2273 Symbol *targetSym = ctx.symtab.find(name: (targetName + "$hp_target").str());
2274 if (!targetSym)
2275 targetSym = ctx.symtab.find(name: targetName);
2276 Defined *t = dyn_cast_or_null<Defined>(Val: targetSym);
2277 if (t && isArm64EC(Machine: t->getChunk()->getMachine()))
2278 exportThunks.push_back(x: {chunk, t});
2279 }
2280 }
2281
2282 auto codeMapChunk = make<ECCodeMapChunk>(args&: codeMap);
2283 rdataSec->addChunk(c: codeMapChunk);
2284 Symbol *codeMapSym = ctx.symtab.findUnderscore(name: "__hybrid_code_map");
2285 replaceSymbol<DefinedSynthetic>(s: codeMapSym, arg: codeMapSym->getName(),
2286 arg&: codeMapChunk);
2287
2288 CHPECodeRangesChunk *ranges = make<CHPECodeRangesChunk>(args&: exportThunks);
2289 rdataSec->addChunk(c: ranges);
2290 Symbol *rangesSym =
2291 ctx.symtab.findUnderscore(name: "__x64_code_ranges_to_entry_points");
2292 replaceSymbol<DefinedSynthetic>(s: rangesSym, arg: rangesSym->getName(), arg&: ranges);
2293
2294 CHPERedirectionChunk *entryPoints = make<CHPERedirectionChunk>(args&: exportThunks);
2295 a64xrmSec->addChunk(c: entryPoints);
2296 Symbol *entryPointsSym =
2297 ctx.symtab.findUnderscore(name: "__arm64x_redirection_metadata");
2298 replaceSymbol<DefinedSynthetic>(s: entryPointsSym, arg: entryPointsSym->getName(),
2299 arg&: entryPoints);
2300}
2301
2302// MinGW specific. Gather all relocations that are imported from a DLL even
2303// though the code didn't expect it to, produce the table that the runtime
2304// uses for fixing them up, and provide the synthetic symbols that the
2305// runtime uses for finding the table.
2306void Writer::createRuntimePseudoRelocs() {
2307 ctx.forEachSymtab(f: [&](SymbolTable &symtab) {
2308 std::vector<RuntimePseudoReloc> rels;
2309
2310 for (Chunk *c : ctx.driver.getChunks()) {
2311 auto *sc = dyn_cast<SectionChunk>(Val: c);
2312 if (!sc || !sc->live || &sc->file->symtab != &symtab)
2313 continue;
2314 // Don't create pseudo relocations for sections that won't be
2315 // mapped at runtime.
2316 if (sc->header->Characteristics & IMAGE_SCN_MEM_DISCARDABLE)
2317 continue;
2318 sc->getRuntimePseudoRelocs(res&: rels);
2319 }
2320
2321 if (!ctx.config.pseudoRelocs) {
2322 // Not writing any pseudo relocs; if some were needed, error out and
2323 // indicate what required them.
2324 for (const RuntimePseudoReloc &rpr : rels)
2325 Err(ctx) << "automatic dllimport of " << rpr.sym->getName() << " in "
2326 << toString(file: rpr.target->file)
2327 << " requires pseudo relocations";
2328 return;
2329 }
2330
2331 if (!rels.empty()) {
2332 Log(ctx) << "Writing " << Twine(rels.size())
2333 << " runtime pseudo relocations";
2334 const char *symbolName = "_pei386_runtime_relocator";
2335 Symbol *relocator = symtab.findUnderscore(name: symbolName);
2336 if (!relocator)
2337 Err(ctx)
2338 << "output image has runtime pseudo relocations, but the function "
2339 << symbolName
2340 << " is missing; it is needed for fixing the relocations at "
2341 "runtime";
2342 }
2343
2344 PseudoRelocTableChunk *table = make<PseudoRelocTableChunk>(args&: rels);
2345 rdataSec->addChunk(c: table);
2346 EmptyChunk *endOfList = make<EmptyChunk>();
2347 rdataSec->addChunk(c: endOfList);
2348
2349 Symbol *headSym = symtab.findUnderscore(name: "__RUNTIME_PSEUDO_RELOC_LIST__");
2350 Symbol *endSym = symtab.findUnderscore(name: "__RUNTIME_PSEUDO_RELOC_LIST_END__");
2351 replaceSymbol<DefinedSynthetic>(s: headSym, arg: headSym->getName(), arg&: table);
2352 replaceSymbol<DefinedSynthetic>(s: endSym, arg: endSym->getName(), arg&: endOfList);
2353 });
2354}
2355
2356// MinGW specific.
2357// The MinGW .ctors and .dtors lists have sentinels at each end;
2358// a (uintptr_t)-1 at the start and a (uintptr_t)0 at the end.
2359// There's a symbol pointing to the start sentinel pointer, __CTOR_LIST__
2360// and __DTOR_LIST__ respectively.
2361void Writer::insertCtorDtorSymbols() {
2362 ctx.forEachSymtab(f: [&](SymbolTable &symtab) {
2363 AbsolutePointerChunk *ctorListHead = make<AbsolutePointerChunk>(args&: symtab, args: -1);
2364 AbsolutePointerChunk *ctorListEnd = make<AbsolutePointerChunk>(args&: symtab, args: 0);
2365 AbsolutePointerChunk *dtorListHead = make<AbsolutePointerChunk>(args&: symtab, args: -1);
2366 AbsolutePointerChunk *dtorListEnd = make<AbsolutePointerChunk>(args&: symtab, args: 0);
2367 ctorsSec->insertChunkAtStart(c: ctorListHead);
2368 ctorsSec->addChunk(c: ctorListEnd);
2369 dtorsSec->insertChunkAtStart(c: dtorListHead);
2370 dtorsSec->addChunk(c: dtorListEnd);
2371
2372 Symbol *ctorListSym = symtab.findUnderscore(name: "__CTOR_LIST__");
2373 Symbol *dtorListSym = symtab.findUnderscore(name: "__DTOR_LIST__");
2374 replaceSymbol<DefinedSynthetic>(s: ctorListSym, arg: ctorListSym->getName(),
2375 arg&: ctorListHead);
2376 replaceSymbol<DefinedSynthetic>(s: dtorListSym, arg: dtorListSym->getName(),
2377 arg&: dtorListHead);
2378 });
2379
2380 if (ctx.hybridSymtab) {
2381 ctorsSec->splitECChunks();
2382 dtorsSec->splitECChunks();
2383 }
2384}
2385
2386// MinGW (really, Cygwin) specific.
2387// The Cygwin startup code uses __data_start__ __data_end__ __bss_start__
2388// and __bss_end__ to know what to copy during fork emulation.
2389void Writer::insertBssDataStartEndSymbols() {
2390 if (!dataSec->chunks.empty()) {
2391 Symbol *dataStartSym = ctx.symtab.find(name: "__data_start__");
2392 Symbol *dataEndSym = ctx.symtab.find(name: "__data_end__");
2393 Chunk *endChunk = dataSec->chunks.back();
2394 replaceSymbol<DefinedSynthetic>(s: dataStartSym, arg: dataStartSym->getName(),
2395 arg&: dataSec->chunks.front());
2396 replaceSymbol<DefinedSynthetic>(s: dataEndSym, arg: dataEndSym->getName(), arg&: endChunk,
2397 arg: endChunk->getSize());
2398 }
2399
2400 if (!bssSec->chunks.empty()) {
2401 Symbol *bssStartSym = ctx.symtab.find(name: "__bss_start__");
2402 Symbol *bssEndSym = ctx.symtab.find(name: "__bss_end__");
2403 Chunk *endChunk = bssSec->chunks.back();
2404 replaceSymbol<DefinedSynthetic>(s: bssStartSym, arg: bssStartSym->getName(),
2405 arg&: bssSec->chunks.front());
2406 replaceSymbol<DefinedSynthetic>(s: bssEndSym, arg: bssEndSym->getName(), arg&: endChunk,
2407 arg: endChunk->getSize());
2408 }
2409}
2410
2411// Handles /section options to allow users to overwrite
2412// section attributes.
2413void Writer::setSectionPermissions() {
2414 llvm::TimeTraceScope timeScope("Sections permissions");
2415 for (auto &p : ctx.config.section) {
2416 StringRef name = p.first;
2417 uint32_t perm = p.second;
2418 for (OutputSection *sec : ctx.outputSections)
2419 if (sec->name == name)
2420 sec->setPermissions(perm);
2421 }
2422}
2423
2424// Set symbols used by ARM64EC metadata.
2425void Writer::setECSymbols() {
2426 if (!ctx.symtab.isEC())
2427 return;
2428
2429 llvm::stable_sort(Range&: exportThunks, C: [](const std::pair<Chunk *, Defined *> &a,
2430 const std::pair<Chunk *, Defined *> &b) {
2431 return a.first->getRVA() < b.first->getRVA();
2432 });
2433
2434 ChunkRange &chpePdata = ctx.config.machine == ARM64X ? hybridPdata : pdata;
2435 Symbol *rfeTableSym = ctx.symtab.findUnderscore(name: "__arm64x_extra_rfe_table");
2436 replaceSymbol<DefinedSynthetic>(s: rfeTableSym, arg: "__arm64x_extra_rfe_table",
2437 arg&: chpePdata.first);
2438
2439 if (chpePdata.first) {
2440 Symbol *rfeSizeSym =
2441 ctx.symtab.findUnderscore(name: "__arm64x_extra_rfe_table_size");
2442 cast<DefinedAbsolute>(Val: rfeSizeSym)
2443 ->setVA(chpePdata.last->getRVA() + chpePdata.last->getSize() -
2444 chpePdata.first->getRVA());
2445 }
2446
2447 Symbol *rangesCountSym =
2448 ctx.symtab.findUnderscore(name: "__x64_code_ranges_to_entry_points_count");
2449 cast<DefinedAbsolute>(Val: rangesCountSym)->setVA(exportThunks.size());
2450
2451 Symbol *entryPointCountSym =
2452 ctx.symtab.findUnderscore(name: "__arm64x_redirection_metadata_count");
2453 cast<DefinedAbsolute>(Val: entryPointCountSym)->setVA(exportThunks.size());
2454
2455 Symbol *iatSym = ctx.symtab.findUnderscore(name: "__hybrid_auxiliary_iat");
2456 replaceSymbol<DefinedSynthetic>(s: iatSym, arg: "__hybrid_auxiliary_iat",
2457 arg: idata.auxIat.empty() ? nullptr
2458 : idata.auxIat.front());
2459
2460 Symbol *iatCopySym = ctx.symtab.findUnderscore(name: "__hybrid_auxiliary_iat_copy");
2461 replaceSymbol<DefinedSynthetic>(
2462 s: iatCopySym, arg: "__hybrid_auxiliary_iat_copy",
2463 arg: idata.auxIatCopy.empty() ? nullptr : idata.auxIatCopy.front());
2464
2465 Symbol *delayIatSym =
2466 ctx.symtab.findUnderscore(name: "__hybrid_auxiliary_delayload_iat");
2467 replaceSymbol<DefinedSynthetic>(
2468 s: delayIatSym, arg: "__hybrid_auxiliary_delayload_iat",
2469 arg: delayIdata.getAuxIat().empty() ? nullptr
2470 : delayIdata.getAuxIat().front());
2471
2472 Symbol *delayIatCopySym =
2473 ctx.symtab.findUnderscore(name: "__hybrid_auxiliary_delayload_iat_copy");
2474 replaceSymbol<DefinedSynthetic>(
2475 s: delayIatCopySym, arg: "__hybrid_auxiliary_delayload_iat_copy",
2476 arg: delayIdata.getAuxIatCopy().empty() ? nullptr
2477 : delayIdata.getAuxIatCopy().front());
2478
2479 if (ctx.config.machine == ARM64X) {
2480 // For the hybrid image, set the alternate entry point to the EC entry
2481 // point. In the hybrid view, it is swapped to the native entry point
2482 // using ARM64X relocations.
2483 if (auto altEntrySym = cast_or_null<Defined>(Val: ctx.symtab.entry)) {
2484 // If the entry is an EC export thunk, use its target instead.
2485 if (auto thunkChunk =
2486 dyn_cast<ECExportThunkChunk>(Val: altEntrySym->getChunk()))
2487 altEntrySym = thunkChunk->target;
2488 ctx.symtab.findUnderscore(name: "__arm64x_native_entrypoint")
2489 ->replaceKeepingName(other: altEntrySym, size: sizeof(SymbolUnion));
2490 }
2491
2492 if (ctx.symtab.edataStart)
2493 ctx.dynamicRelocs->set(
2494 rva: dataDirOffset64 + EXPORT_TABLE * sizeof(data_directory) +
2495 offsetof(data_directory, Size),
2496 value: ctx.symtab.edataEnd->getRVA() - ctx.symtab.edataStart->getRVA() +
2497 ctx.symtab.edataEnd->getSize());
2498 if (hybridPdata.first) {
2499 ctx.dynamicRelocs->set(
2500 rva: dataDirOffset64 + EXCEPTION_TABLE * sizeof(data_directory) +
2501 offsetof(data_directory, Size),
2502 value: hybridPdata.last->getRVA() - hybridPdata.first->getRVA() +
2503 hybridPdata.last->getSize());
2504 if (chpeSym) {
2505 size_t size = 0;
2506 if (pdata.first)
2507 size = pdata.last->getRVA() + pdata.last->getSize() -
2508 pdata.first->getRVA();
2509 ctx.dynamicRelocs->set(rva: chpeSym->getRVA() +
2510 offsetof(chpe_metadata, ExtraRFETableSize),
2511 value: size);
2512 }
2513 }
2514 }
2515}
2516
2517// Write section contents to a mmap'ed file.
2518void Writer::writeSections() {
2519 llvm::TimeTraceScope timeScope("Write sections");
2520 uint8_t *buf = buffer->getBufferStart();
2521 for (OutputSection *sec : ctx.outputSections) {
2522 uint8_t *secBuf = buf + sec->getFileOff();
2523 // Fill gaps between functions in .text with INT3 instructions
2524 // instead of leaving as NUL bytes (which can be interpreted as
2525 // ADD instructions). Only fill the gaps between chunks. Most
2526 // chunks overwrite it anyway, but uninitialized data chunks
2527 // merged into a code section don't.
2528 if ((sec->header.Characteristics & IMAGE_SCN_CNT_CODE) &&
2529 (ctx.config.machine == AMD64 || ctx.config.machine == I386)) {
2530 uint32_t prevEnd = 0;
2531 for (Chunk *c : sec->chunks) {
2532 uint32_t off = c->getRVA() - sec->getRVA();
2533 memset(s: secBuf + prevEnd, c: 0xCC, n: off - prevEnd);
2534 prevEnd = off + c->getSize();
2535 }
2536 memset(s: secBuf + prevEnd, c: 0xCC, n: sec->getRawSize() - prevEnd);
2537 }
2538
2539 parallelForEach(R&: sec->chunks, Fn: [&](Chunk *c) {
2540 c->writeTo(buf: secBuf + c->getRVA() - sec->getRVA());
2541 });
2542 }
2543}
2544
2545void Writer::writeBuildId() {
2546 llvm::TimeTraceScope timeScope("Write build ID");
2547
2548 // There are two important parts to the build ID.
2549 // 1) If building with debug info, the COFF debug directory contains a
2550 // timestamp as well as a Guid and Age of the PDB.
2551 // 2) In all cases, the PE COFF file header also contains a timestamp.
2552 // For reproducibility, instead of a timestamp we want to use a hash of the
2553 // PE contents.
2554 Configuration *config = &ctx.config;
2555 bool generateSyntheticBuildId = config->buildIDHash == BuildIDHash::Binary;
2556 if (generateSyntheticBuildId) {
2557 assert(buildId && "BuildId is not set!");
2558 // BuildId->BuildId was filled in when the PDB was written.
2559 }
2560
2561 // At this point the only fields in the COFF file which remain unset are the
2562 // "timestamp" in the COFF file header, and the ones in the coff debug
2563 // directory. Now we can hash the file and write that hash to the various
2564 // timestamp fields in the file.
2565 StringRef outputFileData(
2566 reinterpret_cast<const char *>(buffer->getBufferStart()),
2567 buffer->getBufferSize());
2568
2569 uint32_t timestamp = config->timestamp;
2570 uint64_t hash = 0;
2571
2572 if (config->repro || generateSyntheticBuildId)
2573 hash = xxh3_64bits(data: outputFileData);
2574
2575 if (config->repro)
2576 timestamp = static_cast<uint32_t>(hash);
2577
2578 if (generateSyntheticBuildId) {
2579 buildId->buildId->PDB70.CVSignature = OMF::Signature::PDB70;
2580 buildId->buildId->PDB70.Age = 1;
2581 memcpy(dest: buildId->buildId->PDB70.Signature, src: &hash, n: 8);
2582 // xxhash only gives us 8 bytes, so put some fixed data in the other half.
2583 memcpy(dest: &buildId->buildId->PDB70.Signature[8], src: "LLD PDB.", n: 8);
2584 }
2585
2586 if (debugDirectory)
2587 debugDirectory->setTimeDateStamp(timestamp);
2588
2589 uint8_t *buf = buffer->getBufferStart();
2590 buf += dosStubSize + sizeof(PEMagic);
2591 object::coff_file_header *coffHeader =
2592 reinterpret_cast<coff_file_header *>(buf);
2593 coffHeader->TimeDateStamp = timestamp;
2594}
2595
2596// Sort .pdata section contents according to PE/COFF spec 5.5.
2597template <typename T>
2598void Writer::sortExceptionTable(ChunkRange &exceptionTable) {
2599 if (!exceptionTable.first)
2600 return;
2601
2602 // We assume .pdata contains function table entries only.
2603 auto bufAddr = [&](Chunk *c) {
2604 OutputSection *os = ctx.getOutputSection(c);
2605 return buffer->getBufferStart() + os->getFileOff() + c->getRVA() -
2606 os->getRVA();
2607 };
2608 uint8_t *begin = bufAddr(exceptionTable.first);
2609 uint8_t *end = bufAddr(exceptionTable.last) + exceptionTable.last->getSize();
2610 if ((end - begin) % sizeof(T) != 0) {
2611 Fatal(ctx) << "unexpected .pdata size: " << (end - begin)
2612 << " is not a multiple of " << sizeof(T);
2613 }
2614
2615 parallelSort(MutableArrayRef<T>(reinterpret_cast<T *>(begin),
2616 reinterpret_cast<T *>(end)),
2617 [](const T &a, const T &b) { return a.begin < b.begin; });
2618}
2619
2620// Sort .pdata section contents according to PE/COFF spec 5.5.
2621void Writer::sortExceptionTables() {
2622 llvm::TimeTraceScope timeScope("Sort exception table");
2623
2624 struct EntryX64 {
2625 ulittle32_t begin, end, unwind;
2626 };
2627 struct EntryArm {
2628 ulittle32_t begin, unwind;
2629 };
2630
2631 switch (ctx.config.machine) {
2632 case AMD64:
2633 sortExceptionTable<EntryX64>(exceptionTable&: pdata);
2634 break;
2635 case ARM64EC:
2636 case ARM64X:
2637 sortExceptionTable<EntryX64>(exceptionTable&: hybridPdata);
2638 [[fallthrough]];
2639 case ARMNT:
2640 case ARM64:
2641 sortExceptionTable<EntryArm>(exceptionTable&: pdata);
2642 break;
2643 default:
2644 if (pdata.first)
2645 ctx.e.errs() << "warning: don't know how to handle .pdata\n";
2646 break;
2647 }
2648}
2649
2650// The CRT section contains, among other things, the array of function
2651// pointers that initialize every global variable that is not trivially
2652// constructed. The CRT calls them one after the other prior to invoking
2653// main().
2654//
2655// As per C++ spec, 3.6.2/2.3,
2656// "Variables with ordered initialization defined within a single
2657// translation unit shall be initialized in the order of their definitions
2658// in the translation unit"
2659//
2660// It is therefore critical to sort the chunks containing the function
2661// pointers in the order that they are listed in the object file (top to
2662// bottom), otherwise global objects might not be initialized in the
2663// correct order.
2664void Writer::sortCRTSectionChunks(std::vector<Chunk *> &chunks) {
2665 auto sectionChunkOrder = [](const Chunk *a, const Chunk *b) {
2666 auto sa = dyn_cast<SectionChunk>(Val: a);
2667 auto sb = dyn_cast<SectionChunk>(Val: b);
2668 assert(sa && sb && "Non-section chunks in CRT section!");
2669
2670 StringRef sAObj = sa->file->mb.getBufferIdentifier();
2671 StringRef sBObj = sb->file->mb.getBufferIdentifier();
2672
2673 return sAObj == sBObj && sa->getSectionNumber() < sb->getSectionNumber();
2674 };
2675 llvm::stable_sort(Range&: chunks, C: sectionChunkOrder);
2676
2677 if (ctx.config.verbose) {
2678 for (auto &c : chunks) {
2679 auto sc = dyn_cast<SectionChunk>(Val: c);
2680 Log(ctx) << " " << sc->file->mb.getBufferIdentifier().str()
2681 << ", SectionID: " << sc->getSectionNumber();
2682 }
2683 }
2684}
2685
2686OutputSection *Writer::findSection(StringRef name) {
2687 for (OutputSection *sec : ctx.outputSections)
2688 if (sec->name == name)
2689 return sec;
2690 return nullptr;
2691}
2692
2693uint32_t Writer::getSizeOfInitializedData() {
2694 uint32_t res = 0;
2695 for (OutputSection *s : ctx.outputSections)
2696 if (s->header.Characteristics & IMAGE_SCN_CNT_INITIALIZED_DATA)
2697 res += s->getRawSize();
2698 return res;
2699}
2700
2701// Add base relocations to .reloc section.
2702void Writer::addBaserels() {
2703 if (!ctx.config.relocatable)
2704 return;
2705 std::vector<Baserel> v;
2706 for (OutputSection *sec : ctx.outputSections) {
2707 if (sec->header.Characteristics & IMAGE_SCN_MEM_DISCARDABLE)
2708 continue;
2709 llvm::TimeTraceScope timeScope("Base relocations: ", sec->name);
2710 // Collect all locations for base relocations.
2711 for (Chunk *c : sec->chunks)
2712 c->getBaserels(res: &v);
2713 // Add the addresses to .reloc section.
2714 if (!v.empty())
2715 addBaserelBlocks(v);
2716 v.clear();
2717 }
2718}
2719
2720// Add addresses to .reloc section. Note that addresses are grouped by page.
2721void Writer::addBaserelBlocks(std::vector<Baserel> &v) {
2722 const uint32_t mask = ~uint32_t(pageSize - 1);
2723 uint32_t page = v[0].rva & mask;
2724 size_t i = 0, j = 1;
2725 llvm::sort(C&: v,
2726 Comp: [](const Baserel &x, const Baserel &y) { return x.rva < y.rva; });
2727 for (size_t e = v.size(); j < e; ++j) {
2728 uint32_t p = v[j].rva & mask;
2729 if (p == page)
2730 continue;
2731 relocSec->addChunk(c: make<BaserelChunk>(args&: page, args: &v[i], args: &v[0] + j));
2732 i = j;
2733 page = p;
2734 }
2735 if (i == j)
2736 return;
2737 relocSec->addChunk(c: make<BaserelChunk>(args&: page, args: &v[i], args: &v[0] + j));
2738}
2739
2740void Writer::createDynamicRelocs() {
2741 if (!ctx.dynamicRelocs)
2742 return;
2743
2744 // Adjust the Machine field in the COFF header to AMD64.
2745 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint16_t),
2746 offset: coffHeaderOffset + offsetof(coff_file_header, Machine),
2747 value: AMD64);
2748
2749 if (ctx.symtab.entry != ctx.hybridSymtab->entry ||
2750 pdata.first != hybridPdata.first) {
2751 chpeSym = cast_or_null<DefinedRegular>(
2752 Val: ctx.symtab.findUnderscore(name: "__chpe_metadata"));
2753 if (!chpeSym)
2754 Warn(ctx) << "'__chpe_metadata' is missing for ARM64X target";
2755 }
2756
2757 if (ctx.symtab.entry != ctx.hybridSymtab->entry) {
2758 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2759 offset: peHeaderOffset +
2760 offsetof(pe32plus_header, AddressOfEntryPoint),
2761 value: cast_or_null<Defined>(Val: ctx.symtab.entry));
2762
2763 // Swap the alternate entry point in the CHPE metadata.
2764 if (chpeSym)
2765 ctx.dynamicRelocs->add(
2766 type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2767 offset: Arm64XRelocVal(chpeSym, offsetof(chpe_metadata, AlternateEntryPoint)),
2768 value: cast_or_null<Defined>(Val: ctx.hybridSymtab->entry));
2769 }
2770
2771 if (ctx.symtab.edataStart != ctx.hybridSymtab->edataStart) {
2772 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2773 offset: dataDirOffset64 +
2774 EXPORT_TABLE * sizeof(data_directory) +
2775 offsetof(data_directory, RelativeVirtualAddress),
2776 value: ctx.symtab.edataStart);
2777 // The Size value is assigned after addresses are finalized.
2778 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2779 offset: dataDirOffset64 +
2780 EXPORT_TABLE * sizeof(data_directory) +
2781 offsetof(data_directory, Size));
2782 }
2783
2784 if (pdata.first != hybridPdata.first) {
2785 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2786 offset: dataDirOffset64 +
2787 EXCEPTION_TABLE * sizeof(data_directory) +
2788 offsetof(data_directory, RelativeVirtualAddress),
2789 value: hybridPdata.first);
2790 // The Size value is assigned after addresses are finalized.
2791 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2792 offset: dataDirOffset64 +
2793 EXCEPTION_TABLE * sizeof(data_directory) +
2794 offsetof(data_directory, Size));
2795
2796 // Swap ExtraRFETable in the CHPE metadata.
2797 if (chpeSym) {
2798 ctx.dynamicRelocs->add(
2799 type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2800 offset: Arm64XRelocVal(chpeSym, offsetof(chpe_metadata, ExtraRFETable)),
2801 value: pdata.first);
2802 // The Size value is assigned after addresses are finalized.
2803 ctx.dynamicRelocs->add(
2804 type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2805 offset: Arm64XRelocVal(chpeSym, offsetof(chpe_metadata, ExtraRFETableSize)));
2806 }
2807 }
2808
2809 // Set the hybrid load config to the EC load config.
2810 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2811 offset: dataDirOffset64 +
2812 LOAD_CONFIG_TABLE * sizeof(data_directory) +
2813 offsetof(data_directory, RelativeVirtualAddress),
2814 value: ctx.symtab.loadConfigSym);
2815 ctx.dynamicRelocs->add(type: IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, size: sizeof(uint32_t),
2816 offset: dataDirOffset64 +
2817 LOAD_CONFIG_TABLE * sizeof(data_directory) +
2818 offsetof(data_directory, Size),
2819 value: ctx.symtab.loadConfigSize);
2820}
2821
2822PartialSection *Writer::createPartialSection(StringRef name,
2823 uint32_t outChars) {
2824 PartialSection *&pSec = partialSections[{.name: name, .characteristics: outChars}];
2825 if (pSec)
2826 return pSec;
2827 pSec = make<PartialSection>(args&: name, args&: outChars);
2828 return pSec;
2829}
2830
2831PartialSection *Writer::findPartialSection(StringRef name, uint32_t outChars) {
2832 auto it = partialSections.find(x: {.name: name, .characteristics: outChars});
2833 if (it != partialSections.end())
2834 return it->second;
2835 return nullptr;
2836}
2837
2838void Writer::fixTlsAlignment() {
2839 Defined *tlsSym =
2840 dyn_cast_or_null<Defined>(Val: ctx.symtab.findUnderscore(name: "_tls_used"));
2841 if (!tlsSym)
2842 return;
2843
2844 OutputSection *sec = ctx.getOutputSection(c: tlsSym->getChunk());
2845 assert(sec && tlsSym->getRVA() >= sec->getRVA() &&
2846 "no output section for _tls_used");
2847
2848 uint8_t *secBuf = buffer->getBufferStart() + sec->getFileOff();
2849 uint64_t tlsOffset = tlsSym->getRVA() - sec->getRVA();
2850 uint64_t directorySize = ctx.config.is64()
2851 ? sizeof(object::coff_tls_directory64)
2852 : sizeof(object::coff_tls_directory32);
2853
2854 if (tlsOffset + directorySize > sec->getRawSize())
2855 Fatal(ctx) << "_tls_used sym is malformed";
2856
2857 if (ctx.config.is64()) {
2858 object::coff_tls_directory64 *tlsDir =
2859 reinterpret_cast<object::coff_tls_directory64 *>(&secBuf[tlsOffset]);
2860 tlsDir->setAlignment(tlsAlignment);
2861 } else {
2862 object::coff_tls_directory32 *tlsDir =
2863 reinterpret_cast<object::coff_tls_directory32 *>(&secBuf[tlsOffset]);
2864 tlsDir->setAlignment(tlsAlignment);
2865 }
2866}
2867
2868void Writer::prepareLoadConfig() {
2869 ctx.forEachActiveSymtab(f: [&](SymbolTable &symtab) {
2870 if (!symtab.loadConfigSym)
2871 return;
2872
2873 OutputSection *sec = ctx.getOutputSection(c: symtab.loadConfigSym->getChunk());
2874 uint8_t *secBuf = buffer->getBufferStart() + sec->getFileOff();
2875 uint8_t *symBuf = secBuf + (symtab.loadConfigSym->getRVA() - sec->getRVA());
2876
2877 if (ctx.config.is64())
2878 prepareLoadConfig(symtab,
2879 loadConfig: reinterpret_cast<coff_load_configuration64 *>(symBuf));
2880 else
2881 prepareLoadConfig(symtab,
2882 loadConfig: reinterpret_cast<coff_load_configuration32 *>(symBuf));
2883 });
2884}
2885
2886template <typename T>
2887void Writer::prepareLoadConfig(SymbolTable &symtab, T *loadConfig) {
2888 size_t loadConfigSize = loadConfig->Size;
2889
2890#define RETURN_IF_NOT_CONTAINS(field) \
2891 if (loadConfigSize < offsetof(T, field) + sizeof(T::field)) { \
2892 Warn(ctx) << "'_load_config_used' structure too small to include " #field; \
2893 return; \
2894 }
2895
2896#define IF_CONTAINS(field) \
2897 if (loadConfigSize >= offsetof(T, field) + sizeof(T::field))
2898
2899#define CHECK_VA(field, sym) \
2900 if (auto *s = dyn_cast<DefinedSynthetic>(symtab.findUnderscore(sym))) \
2901 if (loadConfig->field != ctx.config.imageBase + s->getRVA()) \
2902 Warn(ctx) << #field " not set correctly in '_load_config_used'";
2903
2904#define CHECK_ABSOLUTE(field, sym) \
2905 if (auto *s = dyn_cast<DefinedAbsolute>(symtab.findUnderscore(sym))) \
2906 if (loadConfig->field != s->getVA()) \
2907 Warn(ctx) << #field " not set correctly in '_load_config_used'";
2908
2909 if (ctx.config.dependentLoadFlags) {
2910 RETURN_IF_NOT_CONTAINS(DependentLoadFlags)
2911 loadConfig->DependentLoadFlags = ctx.config.dependentLoadFlags;
2912 }
2913
2914 if (ctx.dynamicRelocs) {
2915 IF_CONTAINS(DynamicValueRelocTableSection) {
2916 loadConfig->DynamicValueRelocTableSection = relocSec->sectionIndex;
2917 loadConfig->DynamicValueRelocTableOffset =
2918 ctx.dynamicRelocs->getRVA() - relocSec->getRVA();
2919 }
2920 else {
2921 Warn(ctx) << "'_load_config_used' structure too small to include dynamic "
2922 "relocations";
2923 }
2924 }
2925
2926 IF_CONTAINS(CHPEMetadataPointer) {
2927 // On ARM64X, only the EC version of the load config contains
2928 // CHPEMetadataPointer. Copy its value to the native load config.
2929 if (ctx.config.machine == ARM64X && !symtab.isEC() &&
2930 ctx.symtab.loadConfigSize >=
2931 offsetof(T, CHPEMetadataPointer) + sizeof(T::CHPEMetadataPointer)) {
2932 OutputSection *sec =
2933 ctx.getOutputSection(c: ctx.symtab.loadConfigSym->getChunk());
2934 uint8_t *secBuf = buffer->getBufferStart() + sec->getFileOff();
2935 auto hybridLoadConfig =
2936 reinterpret_cast<const coff_load_configuration64 *>(
2937 secBuf + (ctx.symtab.loadConfigSym->getRVA() - sec->getRVA()));
2938 loadConfig->CHPEMetadataPointer = hybridLoadConfig->CHPEMetadataPointer;
2939 }
2940 }
2941
2942 if (ctx.config.guardCF == GuardCFLevel::Off)
2943 return;
2944 RETURN_IF_NOT_CONTAINS(GuardFlags)
2945 CHECK_VA(GuardCFFunctionTable, "__guard_fids_table")
2946 CHECK_ABSOLUTE(GuardCFFunctionCount, "__guard_fids_count")
2947 CHECK_ABSOLUTE(GuardFlags, "__guard_flags")
2948 IF_CONTAINS(GuardAddressTakenIatEntryCount) {
2949 CHECK_VA(GuardAddressTakenIatEntryTable, "__guard_iat_table")
2950 CHECK_ABSOLUTE(GuardAddressTakenIatEntryCount, "__guard_iat_count")
2951 }
2952
2953 if (!(ctx.config.guardCF & GuardCFLevel::LongJmp))
2954 return;
2955 RETURN_IF_NOT_CONTAINS(GuardLongJumpTargetCount)
2956 CHECK_VA(GuardLongJumpTargetTable, "__guard_longjmp_table")
2957 CHECK_ABSOLUTE(GuardLongJumpTargetCount, "__guard_longjmp_count")
2958
2959 if (!(ctx.config.guardCF & GuardCFLevel::EHCont))
2960 return;
2961 RETURN_IF_NOT_CONTAINS(GuardEHContinuationCount)
2962 CHECK_VA(GuardEHContinuationTable, "__guard_eh_cont_table")
2963 CHECK_ABSOLUTE(GuardEHContinuationCount, "__guard_eh_cont_count")
2964
2965#undef RETURN_IF_NOT_CONTAINS
2966#undef IF_CONTAINS
2967#undef CHECK_VA
2968#undef CHECK_ABSOLUTE
2969}
2970

Provided by KDAB

Privacy Policy
Learn to use CMake with our Intro Training
Find out more

source code of lld/COFF/Writer.cpp