1//===- InputChunks.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputChunks.h"
10#include "Config.h"
11#include "OutputSegment.h"
12#include "WriterUtils.h"
13#include "lld/Common/ErrorHandler.h"
14#include "lld/Common/LLVM.h"
15#include "llvm/Support/LEB128.h"
16#include "llvm/Support/xxhash.h"
17#include <algorithm>
18
19#define DEBUG_TYPE "lld"
20
21using namespace llvm;
22using namespace llvm::wasm;
23using namespace llvm::support::endian;
24
25namespace lld {
26StringRef relocTypeToString(uint8_t relocType) {
27 switch (relocType) {
28#define WASM_RELOC(NAME, REL) \
29 case REL: \
30 return #NAME;
31#include "llvm/BinaryFormat/WasmRelocs.def"
32#undef WASM_RELOC
33 }
34 llvm_unreachable("unknown reloc type");
35}
36
37bool relocIs64(uint8_t relocType) {
38 switch (relocType) {
39 case R_WASM_MEMORY_ADDR_LEB64:
40 case R_WASM_MEMORY_ADDR_SLEB64:
41 case R_WASM_MEMORY_ADDR_REL_SLEB64:
42 case R_WASM_MEMORY_ADDR_I64:
43 case R_WASM_TABLE_INDEX_SLEB64:
44 case R_WASM_TABLE_INDEX_I64:
45 case R_WASM_FUNCTION_OFFSET_I64:
46 case R_WASM_TABLE_INDEX_REL_SLEB64:
47 case R_WASM_MEMORY_ADDR_TLS_SLEB64:
48 return true;
49 default:
50 return false;
51 }
52}
53
54std::string toString(const wasm::InputChunk *c) {
55 return (toString(file: c->file) + ":(" + c->name + ")").str();
56}
57
58namespace wasm {
59StringRef InputChunk::getComdatName() const {
60 uint32_t index = getComdat();
61 if (index == UINT32_MAX)
62 return StringRef();
63 return file->getWasmObj()->linkingData().Comdats[index];
64}
65
66uint32_t InputChunk::getSize() const {
67 if (const auto *ms = dyn_cast<SyntheticMergedChunk>(Val: this))
68 return ms->builder.getSize();
69
70 if (const auto *f = dyn_cast<InputFunction>(Val: this)) {
71 if (ctx.arg.compressRelocations && f->file) {
72 return f->getCompressedSize();
73 }
74 }
75
76 return data().size();
77}
78
79uint32_t InputChunk::getInputSize() const {
80 if (const auto *f = dyn_cast<InputFunction>(Val: this))
81 return f->function->Size;
82 return getSize();
83}
84
85// Copy this input chunk to an mmap'ed output file and apply relocations.
86void InputChunk::writeTo(uint8_t *buf) const {
87 if (const auto *f = dyn_cast<InputFunction>(Val: this)) {
88 if (file && ctx.arg.compressRelocations)
89 return f->writeCompressed(buf);
90 } else if (const auto *ms = dyn_cast<SyntheticMergedChunk>(Val: this)) {
91 ms->builder.write(Buf: buf + outSecOff);
92 // Apply relocations
93 ms->relocate(buf: buf + outSecOff);
94 return;
95 }
96
97 // Copy contents
98 memcpy(dest: buf + outSecOff, src: data().data(), n: data().size());
99
100 // Apply relocations
101 relocate(buf: buf + outSecOff);
102}
103
104void InputChunk::relocate(uint8_t *buf) const {
105 if (relocations.empty())
106 return;
107
108 LLVM_DEBUG(dbgs() << "applying relocations: " << toString(this)
109 << " count=" << relocations.size() << "\n");
110 int32_t inputSectionOffset = getInputSectionOffset();
111 uint64_t tombstone = getTombstone();
112
113 for (const WasmRelocation &rel : relocations) {
114 uint8_t *loc = buf + rel.Offset - inputSectionOffset;
115 LLVM_DEBUG(dbgs() << "apply reloc: type=" << relocTypeToString(rel.Type));
116 if (rel.Type != R_WASM_TYPE_INDEX_LEB)
117 LLVM_DEBUG(dbgs() << " sym=" << file->getSymbols()[rel.Index]->getName());
118 LLVM_DEBUG(dbgs() << " addend=" << rel.Addend << " index=" << rel.Index
119 << " offset=" << rel.Offset << "\n");
120 // TODO(sbc): Check that the value is within the range of the
121 // relocation type below. Most likely we must error out here
122 // if its not with range.
123 uint64_t value = file->calcNewValue(reloc: rel, tombstone, chunk: this);
124
125 switch (rel.Type) {
126 case R_WASM_TYPE_INDEX_LEB:
127 case R_WASM_FUNCTION_INDEX_LEB:
128 case R_WASM_GLOBAL_INDEX_LEB:
129 case R_WASM_TAG_INDEX_LEB:
130 case R_WASM_MEMORY_ADDR_LEB:
131 case R_WASM_TABLE_NUMBER_LEB:
132 encodeULEB128(Value: static_cast<uint32_t>(value), p: loc, PadTo: 5);
133 break;
134 case R_WASM_MEMORY_ADDR_LEB64:
135 encodeULEB128(Value: value, p: loc, PadTo: 10);
136 break;
137 case R_WASM_TABLE_INDEX_SLEB:
138 case R_WASM_TABLE_INDEX_REL_SLEB:
139 case R_WASM_MEMORY_ADDR_SLEB:
140 case R_WASM_MEMORY_ADDR_REL_SLEB:
141 case R_WASM_MEMORY_ADDR_TLS_SLEB:
142 encodeSLEB128(Value: static_cast<int32_t>(value), p: loc, PadTo: 5);
143 break;
144 case R_WASM_TABLE_INDEX_SLEB64:
145 case R_WASM_TABLE_INDEX_REL_SLEB64:
146 case R_WASM_MEMORY_ADDR_SLEB64:
147 case R_WASM_MEMORY_ADDR_REL_SLEB64:
148 case R_WASM_MEMORY_ADDR_TLS_SLEB64:
149 encodeSLEB128(Value: static_cast<int64_t>(value), p: loc, PadTo: 10);
150 break;
151 case R_WASM_TABLE_INDEX_I32:
152 case R_WASM_MEMORY_ADDR_I32:
153 case R_WASM_FUNCTION_OFFSET_I32:
154 case R_WASM_FUNCTION_INDEX_I32:
155 case R_WASM_SECTION_OFFSET_I32:
156 case R_WASM_GLOBAL_INDEX_I32:
157 case R_WASM_MEMORY_ADDR_LOCREL_I32:
158 write32le(P: loc, V: value);
159 break;
160 case R_WASM_TABLE_INDEX_I64:
161 case R_WASM_MEMORY_ADDR_I64:
162 case R_WASM_FUNCTION_OFFSET_I64:
163 write64le(P: loc, V: value);
164 break;
165 default:
166 llvm_unreachable("unknown relocation type");
167 }
168 }
169}
170
171static bool relocIsLive(const WasmRelocation &rel, ObjFile *file) {
172 return rel.Type == R_WASM_TYPE_INDEX_LEB ||
173 file->getSymbol(index: rel.Index)->isLive();
174}
175
176size_t InputChunk::getNumLiveRelocations() const {
177 return llvm::count_if(Range: relocations, P: [this](const WasmRelocation &rel) {
178 return relocIsLive(rel, file);
179 });
180}
181
182// Copy relocation entries to a given output stream.
183// This function is used only when a user passes "-r". For a regular link,
184// we consume relocations instead of copying them to an output file.
185void InputChunk::writeRelocations(raw_ostream &os) const {
186 if (relocations.empty())
187 return;
188
189 int32_t off = outSecOff - getInputSectionOffset();
190 LLVM_DEBUG(dbgs() << "writeRelocations: " << file->getName()
191 << " offset=" << Twine(off) << "\n");
192
193 for (const WasmRelocation &rel : relocations) {
194 if (!relocIsLive(rel, file))
195 continue;
196 writeUleb128(os, number: rel.Type, msg: "reloc type");
197 writeUleb128(os, number: rel.Offset + off, msg: "reloc offset");
198 writeUleb128(os, number: file->calcNewIndex(reloc: rel), msg: "reloc index");
199
200 if (relocTypeHasAddend(type: rel.Type))
201 writeSleb128(os, number: file->calcNewAddend(reloc: rel), msg: "reloc addend");
202 }
203}
204
205uint64_t InputChunk::getTombstone() const {
206 if (const auto *s = dyn_cast<InputSection>(Val: this)) {
207 return s->tombstoneValue;
208 }
209
210 return 0;
211}
212
213void InputFunction::setFunctionIndex(uint32_t index) {
214 LLVM_DEBUG(dbgs() << "InputFunction::setFunctionIndex: " << name << " -> "
215 << index << "\n");
216 assert(!hasFunctionIndex());
217 functionIndex = index;
218}
219
220void InputFunction::setTableIndex(uint32_t index) {
221 LLVM_DEBUG(dbgs() << "InputFunction::setTableIndex: " << name << " -> "
222 << index << "\n");
223 assert(!hasTableIndex());
224 tableIndex = index;
225}
226
227// Write a relocation value without padding and return the number of bytes
228// witten.
229static unsigned writeCompressedReloc(uint8_t *buf, const WasmRelocation &rel,
230 uint64_t value) {
231 switch (rel.Type) {
232 case R_WASM_TYPE_INDEX_LEB:
233 case R_WASM_FUNCTION_INDEX_LEB:
234 case R_WASM_GLOBAL_INDEX_LEB:
235 case R_WASM_TAG_INDEX_LEB:
236 case R_WASM_MEMORY_ADDR_LEB:
237 case R_WASM_MEMORY_ADDR_LEB64:
238 case R_WASM_TABLE_NUMBER_LEB:
239 return encodeULEB128(Value: value, p: buf);
240 case R_WASM_TABLE_INDEX_SLEB:
241 case R_WASM_TABLE_INDEX_SLEB64:
242 case R_WASM_MEMORY_ADDR_SLEB:
243 case R_WASM_MEMORY_ADDR_SLEB64:
244 return encodeSLEB128(Value: static_cast<int64_t>(value), p: buf);
245 default:
246 llvm_unreachable("unexpected relocation type");
247 }
248}
249
250static unsigned getRelocWidthPadded(const WasmRelocation &rel) {
251 switch (rel.Type) {
252 case R_WASM_TYPE_INDEX_LEB:
253 case R_WASM_FUNCTION_INDEX_LEB:
254 case R_WASM_GLOBAL_INDEX_LEB:
255 case R_WASM_TAG_INDEX_LEB:
256 case R_WASM_MEMORY_ADDR_LEB:
257 case R_WASM_TABLE_NUMBER_LEB:
258 case R_WASM_TABLE_INDEX_SLEB:
259 case R_WASM_MEMORY_ADDR_SLEB:
260 return 5;
261 case R_WASM_TABLE_INDEX_SLEB64:
262 case R_WASM_MEMORY_ADDR_LEB64:
263 case R_WASM_MEMORY_ADDR_SLEB64:
264 return 10;
265 default:
266 llvm_unreachable("unexpected relocation type");
267 }
268}
269
270static unsigned getRelocWidth(const WasmRelocation &rel, uint64_t value) {
271 uint8_t buf[10];
272 return writeCompressedReloc(buf, rel, value);
273}
274
275// Relocations of type LEB and SLEB in the code section are padded to 5 bytes
276// so that a fast linker can blindly overwrite them without needing to worry
277// about the number of bytes needed to encode the values.
278// However, for optimal output the code section can be compressed to remove
279// the padding then outputting non-relocatable files.
280// In this case we need to perform a size calculation based on the value at each
281// relocation. At best we end up saving 4 bytes for each relocation entry.
282//
283// This function only computes the final output size. It must be called
284// before getSize() is used to calculate of layout of the code section.
285void InputFunction::calculateSize() {
286 if (!file || !ctx.arg.compressRelocations)
287 return;
288
289 LLVM_DEBUG(dbgs() << "calculateSize: " << name << "\n");
290
291 const uint8_t *secStart = file->codeSection->Content.data();
292 const uint8_t *funcStart = secStart + getInputSectionOffset();
293 uint32_t functionSizeLength;
294 decodeULEB128(p: funcStart, n: &functionSizeLength);
295
296 uint32_t start = getInputSectionOffset();
297 uint32_t end = start + function->Size;
298
299 uint64_t tombstone = getTombstone();
300
301 uint32_t lastRelocEnd = start + functionSizeLength;
302 for (const WasmRelocation &rel : relocations) {
303 LLVM_DEBUG(dbgs() << " region: " << (rel.Offset - lastRelocEnd) << "\n");
304 compressedFuncSize += rel.Offset - lastRelocEnd;
305 compressedFuncSize +=
306 getRelocWidth(rel, value: file->calcNewValue(reloc: rel, tombstone, chunk: this));
307 lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
308 }
309 LLVM_DEBUG(dbgs() << " final region: " << (end - lastRelocEnd) << "\n");
310 compressedFuncSize += end - lastRelocEnd;
311
312 // Now we know how long the resulting function is we can add the encoding
313 // of its length
314 uint8_t buf[5];
315 compressedSize = compressedFuncSize + encodeULEB128(Value: compressedFuncSize, p: buf);
316
317 LLVM_DEBUG(dbgs() << " calculateSize orig: " << function->Size << "\n");
318 LLVM_DEBUG(dbgs() << " calculateSize new: " << compressedSize << "\n");
319}
320
321// Override the default writeTo method so that we can (optionally) write the
322// compressed version of the function.
323void InputFunction::writeCompressed(uint8_t *buf) const {
324 buf += outSecOff;
325 uint8_t *orig = buf;
326 (void)orig;
327
328 const uint8_t *secStart = file->codeSection->Content.data();
329 const uint8_t *funcStart = secStart + getInputSectionOffset();
330 const uint8_t *end = funcStart + function->Size;
331 uint64_t tombstone = getTombstone();
332 uint32_t count;
333 decodeULEB128(p: funcStart, n: &count);
334 funcStart += count;
335
336 LLVM_DEBUG(dbgs() << "write func: " << name << "\n");
337 buf += encodeULEB128(Value: compressedFuncSize, p: buf);
338 const uint8_t *lastRelocEnd = funcStart;
339 for (const WasmRelocation &rel : relocations) {
340 unsigned chunkSize = (secStart + rel.Offset) - lastRelocEnd;
341 LLVM_DEBUG(dbgs() << " write chunk: " << chunkSize << "\n");
342 memcpy(dest: buf, src: lastRelocEnd, n: chunkSize);
343 buf += chunkSize;
344 buf += writeCompressedReloc(buf, rel,
345 value: file->calcNewValue(reloc: rel, tombstone, chunk: this));
346 lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
347 }
348
349 unsigned chunkSize = end - lastRelocEnd;
350 LLVM_DEBUG(dbgs() << " write final chunk: " << chunkSize << "\n");
351 memcpy(dest: buf, src: lastRelocEnd, n: chunkSize);
352 LLVM_DEBUG(dbgs() << " total: " << (buf + chunkSize - orig) << "\n");
353}
354
355uint64_t InputChunk::getChunkOffset(uint64_t offset) const {
356 if (const auto *ms = dyn_cast<MergeInputChunk>(Val: this)) {
357 LLVM_DEBUG(dbgs() << "getChunkOffset(merged): " << name << "\n");
358 LLVM_DEBUG(dbgs() << "offset: " << offset << "\n");
359 LLVM_DEBUG(dbgs() << "parentOffset: " << ms->getParentOffset(offset)
360 << "\n");
361 assert(ms->parent);
362 return ms->parent->getChunkOffset(offset: ms->getParentOffset(offset));
363 }
364 return outputSegmentOffset + offset;
365}
366
367uint64_t InputChunk::getOffset(uint64_t offset) const {
368 return outSecOff + getChunkOffset(offset);
369}
370
371uint64_t InputChunk::getVA(uint64_t offset) const {
372 return (outputSeg ? outputSeg->startVA : 0) + getChunkOffset(offset);
373}
374
375// Generate code to apply relocations to the data section at runtime.
376// This is only called when generating shared libraries (PIC) where address are
377// not known at static link time.
378bool InputChunk::generateRelocationCode(raw_ostream &os) const {
379 LLVM_DEBUG(dbgs() << "generating runtime relocations: " << name
380 << " count=" << relocations.size() << "\n");
381
382 bool is64 = ctx.arg.is64.value_or(u: false);
383 bool generated = false;
384 unsigned opcode_ptr_const = is64 ? WASM_OPCODE_I64_CONST
385 : WASM_OPCODE_I32_CONST;
386 unsigned opcode_ptr_add = is64 ? WASM_OPCODE_I64_ADD
387 : WASM_OPCODE_I32_ADD;
388
389 uint64_t tombstone = getTombstone();
390 // TODO(sbc): Encode the relocations in the data section and write a loop
391 // here to apply them.
392 for (const WasmRelocation &rel : relocations) {
393 uint64_t offset = getVA(offset: rel.Offset) - getInputSectionOffset();
394
395 Symbol *sym = file->getSymbol(reloc: rel);
396 // Runtime relocations are needed when we don't know the address of
397 // a symbol statically.
398 bool requiresRuntimeReloc = ctx.isPic || sym->hasGOTIndex();
399 if (!requiresRuntimeReloc)
400 continue;
401
402 LLVM_DEBUG(dbgs() << "gen reloc: type=" << relocTypeToString(rel.Type)
403 << " addend=" << rel.Addend << " index=" << rel.Index
404 << " output offset=" << offset << "\n");
405
406 // Calculate the address at which to apply the relocation
407 writeU8(os, byte: opcode_ptr_const, msg: "CONST");
408 writeSleb128(os, number: offset, msg: "offset");
409
410 // In PIC mode we need to add the __memory_base
411 if (ctx.isPic) {
412 writeU8(os, byte: WASM_OPCODE_GLOBAL_GET, msg: "GLOBAL_GET");
413 if (isTLS())
414 writeUleb128(os, number: ctx.sym.tlsBase->getGlobalIndex(), msg: "tls_base");
415 else
416 writeUleb128(os, number: ctx.sym.memoryBase->getGlobalIndex(), msg: "memory_base");
417 writeU8(os, byte: opcode_ptr_add, msg: "ADD");
418 }
419
420 // Now figure out what we want to store at this location
421 bool is64 = relocIs64(relocType: rel.Type);
422 unsigned opcode_reloc_const =
423 is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
424 unsigned opcode_reloc_add =
425 is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;
426 unsigned opcode_reloc_store =
427 is64 ? WASM_OPCODE_I64_STORE : WASM_OPCODE_I32_STORE;
428
429 if (sym->hasGOTIndex()) {
430 writeU8(os, byte: WASM_OPCODE_GLOBAL_GET, msg: "GLOBAL_GET");
431 writeUleb128(os, number: sym->getGOTIndex(), msg: "global index");
432 if (rel.Addend) {
433 writeU8(os, byte: opcode_reloc_const, msg: "CONST");
434 writeSleb128(os, number: rel.Addend, msg: "addend");
435 writeU8(os, byte: opcode_reloc_add, msg: "ADD");
436 }
437 } else {
438 assert(ctx.isPic);
439 const GlobalSymbol *baseSymbol = ctx.sym.memoryBase;
440 if (rel.Type == R_WASM_TABLE_INDEX_I32 ||
441 rel.Type == R_WASM_TABLE_INDEX_I64)
442 baseSymbol = ctx.sym.tableBase;
443 else if (sym->isTLS())
444 baseSymbol = ctx.sym.tlsBase;
445 writeU8(os, byte: WASM_OPCODE_GLOBAL_GET, msg: "GLOBAL_GET");
446 writeUleb128(os, number: baseSymbol->getGlobalIndex(), msg: "base");
447 writeU8(os, byte: opcode_reloc_const, msg: "CONST");
448 writeSleb128(os, number: file->calcNewValue(reloc: rel, tombstone, chunk: this), msg: "offset");
449 writeU8(os, byte: opcode_reloc_add, msg: "ADD");
450 }
451
452 // Store that value at the virtual address
453 writeU8(os, byte: opcode_reloc_store, msg: "I32_STORE");
454 writeUleb128(os, number: 2, msg: "align");
455 writeUleb128(os, number: 0, msg: "offset");
456 generated = true;
457 }
458 return generated;
459}
460
461// Split WASM_SEG_FLAG_STRINGS section. Such a section is a sequence of
462// null-terminated strings.
463void MergeInputChunk::splitStrings(ArrayRef<uint8_t> data) {
464 LLVM_DEBUG(llvm::dbgs() << "splitStrings\n");
465 size_t off = 0;
466 StringRef s = toStringRef(Input: data);
467
468 while (!s.empty()) {
469 size_t end = s.find(C: 0);
470 if (end == StringRef::npos)
471 fatal(msg: toString(c: this) + ": string is not null terminated");
472 size_t size = end + 1;
473
474 pieces.emplace_back(args&: off, args: xxh3_64bits(data: s.substr(Start: 0, N: size)), args: true);
475 s = s.substr(Start: size);
476 off += size;
477 }
478}
479
480// This function is called after we obtain a complete list of input sections
481// that need to be linked. This is responsible to split section contents
482// into small chunks for further processing.
483//
484// Note that this function is called from parallelForEach. This must be
485// thread-safe (i.e. no memory allocation from the pools).
486void MergeInputChunk::splitIntoPieces() {
487 assert(pieces.empty());
488 // As of now we only support WASM_SEG_FLAG_STRINGS but in the future we
489 // could add other types of splitting (see ELF's splitIntoPieces).
490 assert(flags & WASM_SEG_FLAG_STRINGS);
491 splitStrings(data: data());
492}
493
494SectionPiece *MergeInputChunk::getSectionPiece(uint64_t offset) {
495 if (this->data().size() <= offset)
496 fatal(msg: toString(c: this) + ": offset is outside the section");
497
498 // If Offset is not at beginning of a section piece, it is not in the map.
499 // In that case we need to do a binary search of the original section piece
500 // vector.
501 auto it = partition_point(
502 Range&: pieces, P: [=](SectionPiece p) { return p.inputOff <= offset; });
503 return &it[-1];
504}
505
506// Returns the offset in an output section for a given input offset.
507// Because contents of a mergeable section is not contiguous in output,
508// it is not just an addition to a base output offset.
509uint64_t MergeInputChunk::getParentOffset(uint64_t offset) const {
510 // If Offset is not at beginning of a section piece, it is not in the map.
511 // In that case we need to search from the original section piece vector.
512 const SectionPiece *piece = getSectionPiece(offset);
513 uint64_t addend = offset - piece->inputOff;
514 return piece->outputOff + addend;
515}
516
517void SyntheticMergedChunk::finalizeContents() {
518 // Add all string pieces to the string table builder to create section
519 // contents.
520 for (MergeInputChunk *sec : chunks)
521 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
522 if (sec->pieces[i].live)
523 builder.add(S: sec->getData(i));
524
525 // Fix the string table content. After this, the contents will never change.
526 builder.finalize();
527
528 // finalize() fixed tail-optimized strings, so we can now get
529 // offsets of strings. Get an offset for each string and save it
530 // to a corresponding SectionPiece for easy access.
531 for (MergeInputChunk *sec : chunks)
532 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
533 if (sec->pieces[i].live)
534 sec->pieces[i].outputOff = builder.getOffset(S: sec->getData(i));
535}
536
537uint64_t InputSection::getTombstoneForSection(StringRef name) {
538 // When a function is not live we need to update relocations referring to it.
539 // If they occur in DWARF debug symbols, we want to change the pc of the
540 // function to -1 to avoid overlapping with a valid range. However for the
541 // debug_ranges and debug_loc sections that would conflict with the existing
542 // meaning of -1 so we use -2.
543 if (name == ".debug_ranges" || name == ".debug_loc")
544 return UINT64_C(-2);
545 if (name.starts_with(Prefix: ".debug_"))
546 return UINT64_C(-1);
547 // If the function occurs in an function attribute section change it to -1 since
548 // 0 is a valid function index.
549 if (name.starts_with(Prefix: "llvm.func_attr."))
550 return UINT64_C(-1);
551 // Returning 0 means there is no tombstone value for this section, and relocation
552 // will just use the addend.
553 return 0;
554}
555
556} // namespace wasm
557} // namespace lld
558

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of lld/wasm/InputChunks.cpp