1//===- OutputSections.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "OutputSections.h"
10#include "Config.h"
11#include "InputFiles.h"
12#include "LinkerScript.h"
13#include "Symbols.h"
14#include "SyntheticSections.h"
15#include "Target.h"
16#include "lld/Common/Arrays.h"
17#include "lld/Common/Memory.h"
18#include "llvm/BinaryFormat/Dwarf.h"
19#include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB, LLVM_ENABLE_ZSTD
20#include "llvm/Support/Compression.h"
21#include "llvm/Support/LEB128.h"
22#include "llvm/Support/Parallel.h"
23#include "llvm/Support/Path.h"
24#include "llvm/Support/TimeProfiler.h"
25#undef in
26#if LLVM_ENABLE_ZLIB
27// Avoid introducing max as a macro from Windows headers.
28#define NOMINMAX
29#include <zlib.h>
30#endif
31#if LLVM_ENABLE_ZSTD
32#include <zstd.h>
33#endif
34
35using namespace llvm;
36using namespace llvm::dwarf;
37using namespace llvm::object;
38using namespace llvm::support::endian;
39using namespace llvm::ELF;
40using namespace lld;
41using namespace lld::elf;
42
43uint32_t OutputSection::getPhdrFlags() const {
44 uint32_t ret = 0;
45 bool purecode =
46 (ctx.arg.emachine == EM_ARM && (flags & SHF_ARM_PURECODE)) ||
47 (ctx.arg.emachine == EM_AARCH64 && (flags & SHF_AARCH64_PURECODE));
48 if (!purecode)
49 ret |= PF_R;
50 if (flags & SHF_WRITE)
51 ret |= PF_W;
52 if (flags & SHF_EXECINSTR)
53 ret |= PF_X;
54 return ret;
55}
56
57template <class ELFT>
58void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
59 shdr->sh_entsize = entsize;
60 shdr->sh_addralign = addralign;
61 shdr->sh_type = type;
62 shdr->sh_offset = offset;
63 shdr->sh_flags = flags;
64 shdr->sh_info = info;
65 shdr->sh_link = link;
66 shdr->sh_addr = addr;
67 shdr->sh_size = size;
68 shdr->sh_name = shName;
69}
70
71OutputSection::OutputSection(Ctx &ctx, StringRef name, uint32_t type,
72 uint64_t flags)
73 : SectionBase(Output, ctx.internalFile, name, type, flags, /*link=*/0,
74 /*info=*/0, /*addralign=*/1, /*entsize=*/0),
75 ctx(ctx) {}
76
77uint64_t OutputSection::getLMA() const {
78 return ptLoad ? addr + ptLoad->lmaOffset : addr;
79}
80
81// We allow sections of types listed below to merged into a
82// single progbits section. This is typically done by linker
83// scripts. Merging nobits and progbits will force disk space
84// to be allocated for nobits sections. Other ones don't require
85// any special treatment on top of progbits, so there doesn't
86// seem to be a harm in merging them.
87//
88// NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
89// them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
90static bool canMergeToProgbits(Ctx &ctx, unsigned type) {
91 return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
92 type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
93 type == SHT_NOTE ||
94 (type == SHT_X86_64_UNWIND && ctx.arg.emachine == EM_X86_64);
95}
96
97// Record that isec will be placed in the OutputSection. isec does not become
98// permanent until finalizeInputSections() is called. The function should not be
99// used after finalizeInputSections() is called. If you need to add an
100// InputSection post finalizeInputSections(), then you must do the following:
101//
102// 1. Find or create an InputSectionDescription to hold InputSection.
103// 2. Add the InputSection to the InputSectionDescription::sections.
104// 3. Call commitSection(isec).
105void OutputSection::recordSection(InputSectionBase *isec) {
106 partition = isec->partition;
107 isec->parent = this;
108 if (commands.empty() || !isa<InputSectionDescription>(Val: commands.back()))
109 commands.push_back(Elt: make<InputSectionDescription>(args: ""));
110 auto *isd = cast<InputSectionDescription>(Val: commands.back());
111 isd->sectionBases.push_back(Elt: isec);
112}
113
114// Update fields (type, flags, alignment, etc) according to the InputSection
115// isec. Also check whether the InputSection flags and type are consistent with
116// other InputSections.
117void OutputSection::commitSection(InputSection *isec) {
118 if (LLVM_UNLIKELY(type != isec->type)) {
119 if (!hasInputSections && !typeIsSet) {
120 type = isec->type;
121 } else if (isStaticRelSecType(type) && isStaticRelSecType(type: isec->type) &&
122 (type == SHT_CREL) != (isec->type == SHT_CREL)) {
123 // Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
124 type = SHT_CREL;
125 if (type == SHT_REL) {
126 if (name.consume_front(Prefix: ".rel"))
127 name = ctx.saver.save(S: ".crel" + name);
128 } else if (name.consume_front(Prefix: ".rela")) {
129 name = ctx.saver.save(S: ".crel" + name);
130 }
131 } else {
132 if (typeIsSet || !canMergeToProgbits(ctx, type) ||
133 !canMergeToProgbits(ctx, type: isec->type)) {
134 // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
135 // that the contents at that address is provided by some other means.
136 // Some projects (e.g.
137 // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
138 // behavior. Other types get an error.
139 if (type != SHT_NOBITS) {
140 Err(ctx) << "section type mismatch for " << isec->name << "\n>>> "
141 << isec << ": "
142 << getELFSectionTypeName(Machine: ctx.arg.emachine, Type: isec->type)
143 << "\n>>> output section " << name << ": "
144 << getELFSectionTypeName(Machine: ctx.arg.emachine, Type: type);
145 }
146 }
147 if (!typeIsSet)
148 type = SHT_PROGBITS;
149 }
150 }
151 if (!hasInputSections) {
152 // If IS is the first section to be added to this section,
153 // initialize type, entsize and flags from isec.
154 hasInputSections = true;
155 entsize = isec->entsize;
156 flags = isec->flags;
157 } else {
158 // Otherwise, check if new type or flags are compatible with existing ones.
159 if ((flags ^ isec->flags) & SHF_TLS)
160 ErrAlways(ctx) << "incompatible section flags for " << name << "\n>>> "
161 << isec << ": 0x" << utohexstr(X: isec->flags, LowerCase: true)
162 << "\n>>> output section " << name << ": 0x"
163 << utohexstr(X: flags, LowerCase: true);
164 }
165
166 isec->parent = this;
167 uint64_t andMask = 0;
168 if (ctx.arg.emachine == EM_ARM)
169 andMask |= (uint64_t)SHF_ARM_PURECODE;
170 if (ctx.arg.emachine == EM_AARCH64)
171 andMask |= (uint64_t)SHF_AARCH64_PURECODE;
172 uint64_t orMask = ~andMask;
173 uint64_t andFlags = (flags & isec->flags) & andMask;
174 uint64_t orFlags = (flags | isec->flags) & orMask;
175 flags = andFlags | orFlags;
176 if (nonAlloc)
177 flags &= ~(uint64_t)SHF_ALLOC;
178
179 addralign = std::max(a: addralign, b: isec->addralign);
180
181 // If this section contains a table of fixed-size entries, sh_entsize
182 // holds the element size. If it contains elements of different size we
183 // set sh_entsize to 0.
184 if (entsize != isec->entsize)
185 entsize = 0;
186}
187
188static MergeSyntheticSection *createMergeSynthetic(Ctx &ctx, StringRef name,
189 uint32_t type,
190 uint64_t flags,
191 uint32_t addralign) {
192 if ((flags & SHF_STRINGS) && ctx.arg.optimize >= 2)
193 return make<MergeTailSection>(args&: ctx, args&: name, args&: type, args&: flags, args&: addralign);
194 return make<MergeNoTailSection>(args&: ctx, args&: name, args&: type, args&: flags, args&: addralign);
195}
196
197// This function scans over the InputSectionBase list sectionBases to create
198// InputSectionDescription::sections.
199//
200// It removes MergeInputSections from the input section array and adds
201// new synthetic sections at the location of the first input section
202// that it replaces. It then finalizes each synthetic section in order
203// to compute an output offset for each piece of each input section.
204void OutputSection::finalizeInputSections() {
205 auto *script = ctx.script;
206 std::vector<MergeSyntheticSection *> mergeSections;
207 for (SectionCommand *cmd : commands) {
208 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
209 if (!isd)
210 continue;
211 isd->sections.reserve(N: isd->sectionBases.size());
212 for (InputSectionBase *s : isd->sectionBases) {
213 MergeInputSection *ms = dyn_cast<MergeInputSection>(Val: s);
214 if (!ms) {
215 isd->sections.push_back(Elt: cast<InputSection>(Val: s));
216 continue;
217 }
218
219 // We do not want to handle sections that are not alive, so just remove
220 // them instead of trying to merge.
221 if (!ms->isLive())
222 continue;
223
224 auto i = llvm::find_if(Range&: mergeSections, P: [=](MergeSyntheticSection *sec) {
225 // While we could create a single synthetic section for two different
226 // values of Entsize, it is better to take Entsize into consideration.
227 //
228 // With a single synthetic section no two pieces with different Entsize
229 // could be equal, so we may as well have two sections.
230 //
231 // Using Entsize in here also allows us to propagate it to the synthetic
232 // section.
233 //
234 // SHF_STRINGS section with different alignments should not be merged.
235 return sec->flags == ms->flags && sec->entsize == ms->entsize &&
236 (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
237 });
238 if (i == mergeSections.end()) {
239 MergeSyntheticSection *syn = createMergeSynthetic(
240 ctx, name: s->name, type: ms->type, flags: ms->flags, addralign: ms->addralign);
241 mergeSections.push_back(x: syn);
242 i = std::prev(x: mergeSections.end());
243 syn->entsize = ms->entsize;
244 isd->sections.push_back(Elt: syn);
245 // The merge synthetic section inherits the potential spill locations of
246 // its first contained section.
247 auto it = script->potentialSpillLists.find(Val: ms);
248 if (it != script->potentialSpillLists.end())
249 script->potentialSpillLists.try_emplace(Key: syn, Args&: it->second);
250 }
251 (*i)->addSection(ms);
252 }
253
254 // sectionBases should not be used from this point onwards. Clear it to
255 // catch misuses.
256 isd->sectionBases.clear();
257
258 // Some input sections may be removed from the list after ICF.
259 for (InputSection *s : isd->sections)
260 commitSection(isec: s);
261 }
262 for (auto *ms : mergeSections) {
263 // Merging may have increased the alignment of a spillable section. Update
264 // the alignment of potential spill sections and their containing output
265 // sections.
266 if (auto it = script->potentialSpillLists.find(Val: ms);
267 it != script->potentialSpillLists.end()) {
268 for (PotentialSpillSection *s = it->second.head; s; s = s->next) {
269 s->addralign = std::max(a: s->addralign, b: ms->addralign);
270 s->parent->addralign = std::max(a: s->parent->addralign, b: s->addralign);
271 }
272 }
273
274 ms->finalizeContents();
275 }
276}
277
278static void sortByOrder(MutableArrayRef<InputSection *> in,
279 llvm::function_ref<int(InputSectionBase *s)> order) {
280 std::vector<std::pair<int, InputSection *>> v;
281 for (InputSection *s : in)
282 v.emplace_back(args: order(s), args&: s);
283 llvm::stable_sort(Range&: v, C: less_first());
284
285 for (size_t i = 0; i < v.size(); ++i)
286 in[i] = v[i].second;
287}
288
289uint64_t elf::getHeaderSize(Ctx &ctx) {
290 if (ctx.arg.oFormatBinary)
291 return 0;
292 return ctx.out.elfHeader->size + ctx.out.programHeaders->size;
293}
294
295void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
296 assert(isLive());
297 for (SectionCommand *b : commands)
298 if (auto *isd = dyn_cast<InputSectionDescription>(Val: b))
299 sortByOrder(in: isd->sections, order);
300}
301
302static void nopInstrFill(Ctx &ctx, uint8_t *buf, size_t size) {
303 if (size == 0)
304 return;
305 unsigned i = 0;
306 if (size == 0)
307 return;
308 std::vector<std::vector<uint8_t>> nopFiller = *ctx.target->nopInstrs;
309 unsigned num = size / nopFiller.back().size();
310 for (unsigned c = 0; c < num; ++c) {
311 memcpy(dest: buf + i, src: nopFiller.back().data(), n: nopFiller.back().size());
312 i += nopFiller.back().size();
313 }
314 unsigned remaining = size - i;
315 if (!remaining)
316 return;
317 assert(nopFiller[remaining - 1].size() == remaining);
318 memcpy(dest: buf + i, src: nopFiller[remaining - 1].data(), n: remaining);
319}
320
321// Fill [Buf, Buf + Size) with Filler.
322// This is used for linker script "=fillexp" command.
323static void fill(uint8_t *buf, size_t size,
324 const std::array<uint8_t, 4> &filler) {
325 size_t i = 0;
326 for (; i + 4 < size; i += 4)
327 memcpy(dest: buf + i, src: filler.data(), n: 4);
328 memcpy(dest: buf + i, src: filler.data(), n: size - i);
329}
330
331#if LLVM_ENABLE_ZLIB
332static SmallVector<uint8_t, 0> deflateShard(Ctx &ctx, ArrayRef<uint8_t> in,
333 int level, int flush) {
334 // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
335 // data with no zlib header or trailer.
336 z_stream s = {};
337 auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
338 if (res != 0) {
339 Err(ctx) << "--compress-sections: deflateInit2 returned " << res;
340 return {};
341 }
342 s.next_in = const_cast<uint8_t *>(in.data());
343 s.avail_in = in.size();
344
345 // Allocate a buffer of half of the input size, and grow it by 1.5x if
346 // insufficient.
347 SmallVector<uint8_t, 0> out;
348 size_t pos = 0;
349 out.resize_for_overwrite(N: std::max<size_t>(a: in.size() / 2, b: 64));
350 do {
351 if (pos == out.size())
352 out.resize_for_overwrite(N: out.size() * 3 / 2);
353 s.next_out = out.data() + pos;
354 s.avail_out = out.size() - pos;
355 (void)deflate(strm: &s, flush);
356 pos = s.next_out - out.data();
357 } while (s.avail_out == 0);
358 assert(s.avail_in == 0);
359
360 out.truncate(N: pos);
361 deflateEnd(strm: &s);
362 return out;
363}
364#endif
365
366// Compress certain non-SHF_ALLOC sections:
367//
368// * (if --compress-debug-sections is specified) non-empty .debug_* sections
369// * (if --compress-sections is specified) matched sections
370template <class ELFT> void OutputSection::maybeCompress(Ctx &ctx) {
371 using Elf_Chdr = typename ELFT::Chdr;
372 (void)sizeof(Elf_Chdr);
373
374 DebugCompressionType ctype = DebugCompressionType::None;
375 size_t compressedSize = sizeof(Elf_Chdr);
376 unsigned level = 0; // default compression level
377 if (!(flags & SHF_ALLOC) && ctx.arg.compressDebugSections &&
378 name.starts_with(Prefix: ".debug_"))
379 ctype = *ctx.arg.compressDebugSections;
380 for (auto &[glob, t, l] : ctx.arg.compressSections)
381 if (glob.match(S: name))
382 std::tie(args&: ctype, args&: level) = {t, l};
383 if (ctype == DebugCompressionType::None)
384 return;
385 if (flags & SHF_ALLOC) {
386 Err(ctx) << "--compress-sections: section '" << name
387 << "' with the SHF_ALLOC flag cannot be compressed";
388 return;
389 }
390
391 llvm::TimeTraceScope timeScope("Compress sections");
392 auto buf = std::make_unique<uint8_t[]>(num: size);
393 // Write uncompressed data to a temporary zero-initialized buffer.
394 {
395 parallel::TaskGroup tg;
396 writeTo<ELFT>(ctx, buf.get(), tg);
397 }
398 // The generic ABI specifies "The sh_size and sh_addralign fields of the
399 // section header for a compressed section reflect the requirements of the
400 // compressed section." However, 1-byte alignment has been wildly accepted
401 // and utilized for a long time. Removing alignment padding is particularly
402 // useful when there are many compressed output sections.
403 addralign = 1;
404
405 // Split input into 1-MiB shards.
406 [[maybe_unused]] constexpr size_t shardSize = 1 << 20;
407 auto shardsIn = split(arr: ArrayRef<uint8_t>(buf.get(), size), chunkSize: shardSize);
408 const size_t numShards = shardsIn.size();
409 auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(num: numShards);
410
411#if LLVM_ENABLE_ZSTD
412 // Use ZSTD's streaming compression API. See
413 // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
414 // HowTo".
415 if (ctype == DebugCompressionType::Zstd) {
416 parallelFor(0, numShards, [&](size_t i) {
417 SmallVector<uint8_t, 0> out;
418 ZSTD_CCtx *cctx = ZSTD_createCCtx();
419 ZSTD_CCtx_setParameter(cctx, param: ZSTD_c_compressionLevel, value: level);
420 ZSTD_inBuffer zib = {.src: shardsIn[i].data(), .size: shardsIn[i].size(), .pos: 0};
421 ZSTD_outBuffer zob = {.dst: nullptr, .size: 0, .pos: 0};
422 size_t size;
423 do {
424 // Allocate a buffer of half of the input size, and grow it by 1.5x if
425 // insufficient.
426 if (zob.pos == zob.size) {
427 out.resize_for_overwrite(
428 N: zob.size ? zob.size * 3 / 2 : std::max<size_t>(a: zib.size / 4, b: 64));
429 zob = {.dst: out.data(), .size: out.size(), .pos: zob.pos};
430 }
431 size = ZSTD_compressStream2(cctx, output: &zob, input: &zib, endOp: ZSTD_e_end);
432 assert(!ZSTD_isError(size));
433 } while (size != 0);
434 out.truncate(N: zob.pos);
435 ZSTD_freeCCtx(cctx);
436 shardsOut[i] = std::move(out);
437 });
438 compressed.type = ELFCOMPRESS_ZSTD;
439 for (size_t i = 0; i != numShards; ++i)
440 compressedSize += shardsOut[i].size();
441 }
442#endif
443
444#if LLVM_ENABLE_ZLIB
445 // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
446 // fast and provides decent compression ratios.
447 if (ctype == DebugCompressionType::Zlib) {
448 if (!level)
449 level = Z_BEST_SPEED;
450
451 // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
452 // shards but the last to flush the output to a byte boundary to be
453 // concatenated with the next shard.
454 auto shardsAdler = std::make_unique<uint32_t[]>(num: numShards);
455 parallelFor(0, numShards, [&](size_t i) {
456 shardsOut[i] = deflateShard(ctx, in: shardsIn[i], level,
457 flush: i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
458 shardsAdler[i] = adler32(adler: 1, buf: shardsIn[i].data(), len: shardsIn[i].size());
459 });
460
461 // Update section size and combine Alder-32 checksums.
462 uint32_t checksum = 1; // Initial Adler-32 value
463 compressedSize += 2; // Elf_Chdir and zlib header
464 for (size_t i = 0; i != numShards; ++i) {
465 compressedSize += shardsOut[i].size();
466 checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
467 }
468 compressedSize += 4; // checksum
469 compressed.type = ELFCOMPRESS_ZLIB;
470 compressed.checksum = checksum;
471 }
472#endif
473
474 if (compressedSize >= size)
475 return;
476 compressed.uncompressedSize = size;
477 compressed.shards = std::move(shardsOut);
478 compressed.numShards = numShards;
479 size = compressedSize;
480 flags |= SHF_COMPRESSED;
481}
482
483static void writeInt(Ctx &ctx, uint8_t *buf, uint64_t data, uint64_t size) {
484 if (size == 1)
485 *buf = data;
486 else if (size == 2)
487 write16(ctx, p: buf, v: data);
488 else if (size == 4)
489 write32(ctx, p: buf, v: data);
490 else if (size == 8)
491 write64(ctx, p: buf, v: data);
492 else
493 llvm_unreachable("unsupported Size argument");
494}
495
496template <class ELFT>
497void OutputSection::writeTo(Ctx &ctx, uint8_t *buf, parallel::TaskGroup &tg) {
498 llvm::TimeTraceScope timeScope("Write sections", name);
499 if (type == SHT_NOBITS)
500 return;
501 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
502 buf += encodeULEB128(Value: crelHeader, p: buf);
503 memcpy(dest: buf, src: crelBody.data(), n: crelBody.size());
504 return;
505 }
506
507 // If the section is compressed due to
508 // --compress-debug-section/--compress-sections, the content is already known.
509 if (compressed.shards) {
510 auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
511 chdr->ch_type = compressed.type;
512 chdr->ch_size = compressed.uncompressedSize;
513 chdr->ch_addralign = addralign;
514 buf += sizeof(*chdr);
515
516 auto offsets = std::make_unique<size_t[]>(num: compressed.numShards);
517 if (compressed.type == ELFCOMPRESS_ZLIB) {
518 buf[0] = 0x78; // CMF
519 buf[1] = 0x01; // FLG: best speed
520 offsets[0] = 2; // zlib header
521 write32be(P: buf + (size - sizeof(*chdr) - 4), V: compressed.checksum);
522 }
523
524 // Compute shard offsets.
525 for (size_t i = 1; i != compressed.numShards; ++i)
526 offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
527 parallelFor(0, compressed.numShards, [&](size_t i) {
528 memcpy(dest: buf + offsets[i], src: compressed.shards[i].data(),
529 n: compressed.shards[i].size());
530 });
531 return;
532 }
533
534 // Write leading padding.
535 ArrayRef<InputSection *> sections = getInputSections(os: *this, storage);
536 std::array<uint8_t, 4> filler = getFiller(ctx);
537 bool nonZeroFiller = read32(ctx, p: filler.data()) != 0;
538 if (nonZeroFiller)
539 fill(buf, size: sections.empty() ? size : sections[0]->outSecOff, filler);
540
541 if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
542 buf += encodeULEB128(Value: crelHeader, p: buf);
543 memcpy(dest: buf, src: crelBody.data(), n: crelBody.size());
544 return;
545 }
546
547 auto fn = [=, &ctx](size_t begin, size_t end) {
548 size_t numSections = sections.size();
549 for (size_t i = begin; i != end; ++i) {
550 InputSection *isec = sections[i];
551 if (auto *s = dyn_cast<SyntheticSection>(Val: isec))
552 s->writeTo(buf: buf + isec->outSecOff);
553 else
554 isec->writeTo<ELFT>(ctx, buf + isec->outSecOff);
555
556 // When in Arm BE8 mode, the linker has to convert the big-endian
557 // instructions to little-endian, leaving the data big-endian.
558 if (ctx.arg.emachine == EM_ARM && !ctx.arg.isLE && ctx.arg.armBe8 &&
559 (flags & SHF_EXECINSTR))
560 convertArmInstructionstoBE8(ctx, sec: isec, buf: buf + isec->outSecOff);
561
562 // Fill gaps between sections.
563 if (nonZeroFiller) {
564 uint8_t *start = buf + isec->outSecOff + isec->getSize();
565 uint8_t *end;
566 if (i + 1 == numSections)
567 end = buf + size;
568 else
569 end = buf + sections[i + 1]->outSecOff;
570 if (isec->nopFiller) {
571 assert(ctx.target->nopInstrs);
572 nopInstrFill(ctx, buf: start, size: end - start);
573 } else
574 fill(buf: start, size: end - start, filler);
575 }
576 }
577 };
578
579 // If there is any BYTE()-family command (rare), write the section content
580 // first then process BYTE to overwrite the filler content. The write is
581 // serial due to the limitation of llvm/Support/Parallel.h.
582 bool written = false;
583 size_t numSections = sections.size();
584 for (SectionCommand *cmd : commands)
585 if (auto *data = dyn_cast<ByteCommand>(Val: cmd)) {
586 if (!std::exchange(obj&: written, new_val: true))
587 fn(0, numSections);
588 writeInt(ctx, buf: buf + data->offset, data: data->expression().getValue(),
589 size: data->size);
590 }
591 if (written || !numSections)
592 return;
593
594 // There is no data command. Write content asynchronously to overlap the write
595 // time with other output sections. Note, if a linker script specifies
596 // overlapping output sections (needs --noinhibit-exec or --no-check-sections
597 // to supress the error), the output may be non-deterministic.
598 const size_t taskSizeLimit = 4 << 20;
599 for (size_t begin = 0, i = 0, taskSize = 0;;) {
600 taskSize += sections[i]->getSize();
601 bool done = ++i == numSections;
602 if (done || taskSize >= taskSizeLimit) {
603 tg.spawn(f: [=] { fn(begin, i); });
604 if (done)
605 break;
606 begin = i;
607 taskSize = 0;
608 }
609 }
610}
611
612static void finalizeShtGroup(Ctx &ctx, OutputSection *os,
613 InputSection *section) {
614 // sh_link field for SHT_GROUP sections should contain the section index of
615 // the symbol table.
616 os->link = ctx.in.symTab->getParent()->sectionIndex;
617
618 if (!section)
619 return;
620
621 // sh_info then contain index of an entry in symbol table section which
622 // provides signature of the section group.
623 ArrayRef<Symbol *> symbols = section->file->getSymbols();
624 os->info = ctx.in.symTab->getSymbolIndex(sym: *symbols[section->info]);
625
626 // Some group members may be combined or discarded, so we need to compute the
627 // new size. The content will be rewritten in InputSection::copyShtGroup.
628 DenseSet<uint32_t> seen;
629 ArrayRef<InputSectionBase *> sections = section->file->getSections();
630 for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(N: 1))
631 if (OutputSection *osec = sections[read32(ctx, p: &idx)]->getOutputSection())
632 seen.insert(V: osec->sectionIndex);
633 os->size = (1 + seen.size()) * sizeof(uint32_t);
634}
635
636template <class uint>
637LLVM_ATTRIBUTE_ALWAYS_INLINE static void
638encodeOneCrel(Ctx &ctx, raw_svector_ostream &os,
639 Elf_Crel<sizeof(uint) == 8> &out, uint offset, const Symbol &sym,
640 uint32_t type, uint addend) {
641 const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
642 out.r_offset = offset;
643 int64_t symidx = ctx.in.symTab->getSymbolIndex(sym);
644 if (sym.type == STT_SECTION) {
645 auto *d = dyn_cast<Defined>(Val: &sym);
646 if (d) {
647 SectionBase *section = d->section;
648 assert(section->isLive());
649 addend = sym.getVA(ctx, addend) - section->getOutputSection()->addr;
650 } else {
651 // Encode R_*_NONE(symidx=0).
652 symidx = type = addend = 0;
653 }
654 }
655
656 // Similar to llvm::ELF::encodeCrel.
657 uint8_t b = deltaOffset * 8 + (out.r_symidx != symidx) +
658 (out.r_type != type ? 2 : 0) +
659 (uint(out.r_addend) != addend ? 4 : 0);
660 if (deltaOffset < 0x10) {
661 os << char(b);
662 } else {
663 os << char(b | 0x80);
664 encodeULEB128(Value: deltaOffset >> 4, OS&: os);
665 }
666 if (b & 1) {
667 encodeSLEB128(Value: static_cast<int32_t>(symidx - out.r_symidx), OS&: os);
668 out.r_symidx = symidx;
669 }
670 if (b & 2) {
671 encodeSLEB128(Value: static_cast<int32_t>(type - out.r_type), OS&: os);
672 out.r_type = type;
673 }
674 if (b & 4) {
675 encodeSLEB128(std::make_signed_t<uint>(addend - out.r_addend), os);
676 out.r_addend = addend;
677 }
678}
679
680template <class ELFT>
681static size_t relToCrel(Ctx &ctx, raw_svector_ostream &os,
682 Elf_Crel<ELFT::Is64Bits> &out, InputSection *relSec,
683 InputSectionBase *sec) {
684 const auto &file = *cast<ELFFileBase>(Val: relSec->file);
685 if (relSec->type == SHT_REL) {
686 // REL conversion is complex and unsupported yet.
687 Err(ctx) << relSec << ": REL cannot be converted to CREL";
688 return 0;
689 }
690 auto rels = relSec->getDataAs<typename ELFT::Rela>();
691 for (auto rel : rels) {
692 encodeOneCrel<typename ELFT::uint>(
693 ctx, os, out, sec->getVA(offset: rel.r_offset), file.getRelocTargetSym(rel),
694 rel.getType(ctx.arg.isMips64EL), getAddend<ELFT>(rel));
695 }
696 return rels.size();
697}
698
699// Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
700// Input CREL sections are decoded while REL[A] need to be converted.
701template <bool is64> void OutputSection::finalizeNonAllocCrel(Ctx &ctx) {
702 using uint = typename Elf_Crel_Impl<is64>::uint;
703 raw_svector_ostream os(crelBody);
704 uint64_t totalCount = 0;
705 Elf_Crel<is64> out{};
706 assert(commands.size() == 1);
707 auto *isd = cast<InputSectionDescription>(Val: commands[0]);
708 for (InputSection *relSec : isd->sections) {
709 const auto &file = *cast<ELFFileBase>(Val: relSec->file);
710 InputSectionBase *sec = relSec->getRelocatedSection();
711 if (relSec->type == SHT_CREL) {
712 RelocsCrel<is64> entries(relSec->content_);
713 totalCount += entries.size();
714 for (Elf_Crel_Impl<is64> r : entries) {
715 encodeOneCrel<uint>(ctx, os, out, uint(sec->getVA(offset: r.r_offset)),
716 file.getSymbol(symbolIndex: r.r_symidx), r.r_type, r.r_addend);
717 }
718 continue;
719 }
720
721 // Convert REL[A] to CREL.
722 if constexpr (is64) {
723 totalCount += ctx.arg.isLE
724 ? relToCrel<ELF64LE>(ctx, os, out, relSec, sec)
725 : relToCrel<ELF64BE>(ctx, os, out, relSec, sec);
726 } else {
727 totalCount += ctx.arg.isLE
728 ? relToCrel<ELF32LE>(ctx, os, out, relSec, sec)
729 : relToCrel<ELF32BE>(ctx, os, out, relSec, sec);
730 }
731 }
732
733 crelHeader = totalCount * 8 + 4;
734 size = getULEB128Size(Value: crelHeader) + crelBody.size();
735}
736
737void OutputSection::finalize(Ctx &ctx) {
738 InputSection *first = getFirstInputSection(os: this);
739
740 if (flags & SHF_LINK_ORDER) {
741 // We must preserve the link order dependency of sections with the
742 // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
743 // need to translate the InputSection sh_link to the OutputSection sh_link,
744 // all InputSections in the OutputSection have the same dependency.
745 if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(Val: first))
746 link = ex->getLinkOrderDep()->getParent()->sectionIndex;
747 else if (first->flags & SHF_LINK_ORDER)
748 if (auto *d = first->getLinkOrderDep())
749 link = d->getParent()->sectionIndex;
750 }
751
752 if (type == SHT_GROUP) {
753 finalizeShtGroup(ctx, os: this, section: first);
754 return;
755 }
756
757 if (!ctx.arg.copyRelocs || !isStaticRelSecType(type))
758 return;
759
760 // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
761 // Normally 'type' was changed by 'first' so 'first' should be non-null.
762 // However, if the output section is .rela.dyn, 'type' can be set by the empty
763 // synthetic .rela.plt and first can be null.
764 if (!first || isa<SyntheticSection>(Val: first))
765 return;
766
767 link = ctx.in.symTab->getParent()->sectionIndex;
768 // sh_info for SHT_REL[A] sections should contain the section header index of
769 // the section to which the relocation applies.
770 InputSectionBase *s = first->getRelocatedSection();
771 info = s->getOutputSection()->sectionIndex;
772 flags |= SHF_INFO_LINK;
773 // Finalize the content of non-alloc CREL.
774 if (type == SHT_CREL) {
775 if (ctx.arg.is64)
776 finalizeNonAllocCrel<true>(ctx);
777 else
778 finalizeNonAllocCrel<false>(ctx);
779 }
780}
781
782// Returns true if S is in one of the many forms the compiler driver may pass
783// crtbegin files.
784//
785// Gcc uses any of crtbegin[<empty>|S|T].o.
786// Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
787
788static bool isCrt(StringRef s, StringRef beginEnd) {
789 s = sys::path::filename(path: s);
790 if (!s.consume_back(Suffix: ".o"))
791 return false;
792 if (s.consume_front(Prefix: "clang_rt."))
793 return s.consume_front(Prefix: beginEnd);
794 return s.consume_front(Prefix: beginEnd) && s.size() <= 1;
795}
796
797// .ctors and .dtors are sorted by this order:
798//
799// 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
800// 2. The section is named ".ctors" or ".dtors" (priority: 65536).
801// 3. The section has an optional priority value in the form of ".ctors.N" or
802// ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
803// 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
804//
805// For 2 and 3, the sections are sorted by priority from high to low, e.g.
806// .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
807// internal linker scripts, the sorting is by string comparison which can
808// achieve the same goal given the optional priority values are of the same
809// length.
810//
811// In an ideal world, we don't need this function because .init_array and
812// .ctors are duplicate features (and .init_array is newer.) However, there
813// are too many real-world use cases of .ctors, so we had no choice to
814// support that with this rather ad-hoc semantics.
815static bool compCtors(const InputSection *a, const InputSection *b) {
816 bool beginA = isCrt(s: a->file->getName(), beginEnd: "crtbegin");
817 bool beginB = isCrt(s: b->file->getName(), beginEnd: "crtbegin");
818 if (beginA != beginB)
819 return beginA;
820 bool endA = isCrt(s: a->file->getName(), beginEnd: "crtend");
821 bool endB = isCrt(s: b->file->getName(), beginEnd: "crtend");
822 if (endA != endB)
823 return endB;
824 return getPriority(s: a->name) > getPriority(s: b->name);
825}
826
827// Sorts input sections by the special rules for .ctors and .dtors.
828// Unfortunately, the rules are different from the one for .{init,fini}_array.
829// Read the comment above.
830void OutputSection::sortCtorsDtors() {
831 assert(commands.size() == 1);
832 auto *isd = cast<InputSectionDescription>(Val: commands[0]);
833 llvm::stable_sort(Range&: isd->sections, C: compCtors);
834}
835
836// If an input string is in the form of "foo.N" where N is a number, return N
837// (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
838// greater than the lowest priority.
839int elf::getPriority(StringRef s) {
840 size_t pos = s.rfind(C: '.');
841 if (pos == StringRef::npos)
842 return 65536;
843 int v = 65536;
844 if (to_integer(S: s.substr(Start: pos + 1), Num&: v, Base: 10) &&
845 (pos == 6 && (s.starts_with(Prefix: ".ctors") || s.starts_with(Prefix: ".dtors"))))
846 v = 65535 - v;
847 return v;
848}
849
850InputSection *elf::getFirstInputSection(const OutputSection *os) {
851 for (SectionCommand *cmd : os->commands)
852 if (auto *isd = dyn_cast<InputSectionDescription>(Val: cmd))
853 if (!isd->sections.empty())
854 return isd->sections[0];
855 return nullptr;
856}
857
858ArrayRef<InputSection *>
859elf::getInputSections(const OutputSection &os,
860 SmallVector<InputSection *, 0> &storage) {
861 ArrayRef<InputSection *> ret;
862 storage.clear();
863 for (SectionCommand *cmd : os.commands) {
864 auto *isd = dyn_cast<InputSectionDescription>(Val: cmd);
865 if (!isd)
866 continue;
867 if (ret.empty()) {
868 ret = isd->sections;
869 } else {
870 if (storage.empty())
871 storage.assign(in_start: ret.begin(), in_end: ret.end());
872 storage.insert(I: storage.end(), From: isd->sections.begin(), To: isd->sections.end());
873 }
874 }
875 return storage.empty() ? ret : ArrayRef(storage);
876}
877
878// Sorts input sections by section name suffixes, so that .foo.N comes
879// before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
880// We want to keep the original order if the priorities are the same
881// because the compiler keeps the original initialization order in a
882// translation unit and we need to respect that.
883// For more detail, read the section of the GCC's manual about init_priority.
884void OutputSection::sortInitFini() {
885 // Sort sections by priority.
886 sort(order: [](InputSectionBase *s) { return getPriority(s: s->name); });
887}
888
889std::array<uint8_t, 4> OutputSection::getFiller(Ctx &ctx) {
890 if (filler)
891 return *filler;
892 if (flags & SHF_EXECINSTR)
893 return ctx.target->trapInstr;
894 return {0, 0, 0, 0};
895}
896
897void OutputSection::checkDynRelAddends(Ctx &ctx) {
898 assert(ctx.arg.writeAddends && ctx.arg.checkDynamicRelocs);
899 assert(isStaticRelSecType(type));
900 SmallVector<InputSection *, 0> storage;
901 ArrayRef<InputSection *> sections = getInputSections(os: *this, storage);
902 parallelFor(Begin: 0, End: sections.size(), Fn: [&](size_t i) {
903 // When linking with -r or --emit-relocs we might also call this function
904 // for input .rel[a].<sec> sections which we simply pass through to the
905 // output. We skip over those and only look at the synthetic relocation
906 // sections created during linking.
907 if (!SyntheticSection::classof(sec: sections[i]) ||
908 !is_contained(Set: {ELF::SHT_REL, ELF::SHT_RELA, ELF::SHT_RELR},
909 Element: sections[i]->type))
910 return;
911 const auto *sec = cast<RelocationBaseSection>(Val: sections[i]);
912 if (!sec)
913 return;
914 for (const DynamicReloc &rel : sec->relocs) {
915 int64_t addend = rel.addend;
916 const OutputSection *relOsec = rel.inputSec->getOutputSection();
917 assert(relOsec != nullptr && "missing output section for relocation");
918 // Some targets have NOBITS synthetic sections with dynamic relocations
919 // with non-zero addends. Skip such sections.
920 if (is_contained(Set: {EM_PPC, EM_PPC64}, Element: ctx.arg.emachine) &&
921 (rel.inputSec == ctx.in.ppc64LongBranchTarget.get() ||
922 rel.inputSec == ctx.in.igotPlt.get()))
923 continue;
924 const uint8_t *relocTarget = ctx.bufferStart + relOsec->offset +
925 rel.inputSec->getOffset(offset: rel.offsetInSec);
926 // For SHT_NOBITS the written addend is always zero.
927 int64_t writtenAddend =
928 relOsec->type == SHT_NOBITS
929 ? 0
930 : ctx.target->getImplicitAddend(buf: relocTarget, type: rel.type);
931 if (addend != writtenAddend)
932 InternalErr(ctx, buf: relocTarget)
933 << "wrote incorrect addend value 0x" << utohexstr(X: writtenAddend)
934 << " instead of 0x" << utohexstr(X: addend)
935 << " for dynamic relocation " << rel.type << " at offset 0x"
936 << utohexstr(X: rel.getOffset())
937 << (rel.sym ? " against symbol " + rel.sym->getName() : "");
938 }
939 });
940}
941
942template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
943template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
944template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
945template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
946
947template void OutputSection::writeTo<ELF32LE>(Ctx &, uint8_t *,
948 llvm::parallel::TaskGroup &);
949template void OutputSection::writeTo<ELF32BE>(Ctx &, uint8_t *,
950 llvm::parallel::TaskGroup &);
951template void OutputSection::writeTo<ELF64LE>(Ctx &, uint8_t *,
952 llvm::parallel::TaskGroup &);
953template void OutputSection::writeTo<ELF64BE>(Ctx &, uint8_t *,
954 llvm::parallel::TaskGroup &);
955
956template void OutputSection::maybeCompress<ELF32LE>(Ctx &);
957template void OutputSection::maybeCompress<ELF32BE>(Ctx &);
958template void OutputSection::maybeCompress<ELF64LE>(Ctx &);
959template void OutputSection::maybeCompress<ELF64BE>(Ctx &);
960

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of lld/ELF/OutputSections.cpp