1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script for the x86 kernel
4 *
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
9 *
10 *
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
16 */
17
18#define LOAD_OFFSET __START_KERNEL_map
19
20#define RUNTIME_DISCARD_EXIT
21#define EMITS_PT_NOTE
22#define RO_EXCEPTION_TABLE_ALIGN 16
23
24#include <asm-generic/vmlinux.lds.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27#include <asm/page_types.h>
28#include <asm/orc_lookup.h>
29#include <asm/cache.h>
30#include <asm/boot.h>
31#include <asm/kexec.h>
32
33#undef i386 /* in case the preprocessor is a 32bit one */
34
35OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
36
37#ifdef CONFIG_X86_32
38OUTPUT_ARCH(i386)
39ENTRY(phys_startup_32)
40#else
41OUTPUT_ARCH(i386:x86-64)
42ENTRY(phys_startup_64)
43#endif
44
45jiffies = jiffies_64;
46const_current_task = current_task;
47const_cpu_current_top_of_stack = cpu_current_top_of_stack;
48
49#if defined(CONFIG_X86_64)
50/*
51 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
52 * boundaries spanning kernel text, rodata and data sections.
53 *
54 * However, kernel identity mappings will have different RWX permissions
55 * to the pages mapping to text and to the pages padding (which are freed) the
56 * text section. Hence kernel identity mappings will be broken to smaller
57 * pages. For 64-bit, kernel text and kernel identity mappings are different,
58 * so we can enable protection checks as well as retain 2MB large page
59 * mappings for kernel text.
60 */
61#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
62
63#define X86_ALIGN_RODATA_END \
64 . = ALIGN(HPAGE_SIZE); \
65 __end_rodata_hpage_align = .; \
66 __end_rodata_aligned = .;
67
68#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
69#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
70
71/*
72 * This section contains data which will be mapped as decrypted. Memory
73 * encryption operates on a page basis. Make this section PMD-aligned
74 * to avoid splitting the pages while mapping the section early.
75 *
76 * Note: We use a separate section so that only this section gets
77 * decrypted to avoid exposing more than we wish.
78 */
79#define BSS_DECRYPTED \
80 . = ALIGN(PMD_SIZE); \
81 __start_bss_decrypted = .; \
82 __pi___start_bss_decrypted = .; \
83 *(.bss..decrypted); \
84 . = ALIGN(PAGE_SIZE); \
85 __start_bss_decrypted_unused = .; \
86 . = ALIGN(PMD_SIZE); \
87 __end_bss_decrypted = .; \
88 __pi___end_bss_decrypted = .; \
89
90#else
91
92#define X86_ALIGN_RODATA_BEGIN
93#define X86_ALIGN_RODATA_END \
94 . = ALIGN(PAGE_SIZE); \
95 __end_rodata_aligned = .;
96
97#define ALIGN_ENTRY_TEXT_BEGIN
98#define ALIGN_ENTRY_TEXT_END
99#define BSS_DECRYPTED
100
101#endif
102#if defined(CONFIG_X86_64) && defined(CONFIG_KEXEC_CORE)
103#define KEXEC_RELOCATE_KERNEL \
104 . = ALIGN(0x100); \
105 __relocate_kernel_start = .; \
106 *(.text..relocate_kernel); \
107 *(.data..relocate_kernel); \
108 __relocate_kernel_end = .;
109
110ASSERT(__relocate_kernel_end - __relocate_kernel_start <= KEXEC_CONTROL_CODE_MAX_SIZE,
111 "relocate_kernel code too large!")
112#else
113#define KEXEC_RELOCATE_KERNEL
114#endif
115PHDRS {
116 text PT_LOAD FLAGS(5); /* R_E */
117 data PT_LOAD FLAGS(6); /* RW_ */
118 note PT_NOTE FLAGS(0); /* ___ */
119}
120
121SECTIONS
122{
123 . = __START_KERNEL;
124#ifdef CONFIG_X86_32
125 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
126#else
127 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
128#endif
129
130 /* Text and read-only data */
131 .text : AT(ADDR(.text) - LOAD_OFFSET) {
132 _text = .;
133 __pi__text = .;
134 _stext = .;
135 ALIGN_ENTRY_TEXT_BEGIN
136 *(.text..__x86.rethunk_untrain)
137 ENTRY_TEXT
138
139#ifdef CONFIG_MITIGATION_SRSO
140 /*
141 * See the comment above srso_alias_untrain_ret()'s
142 * definition.
143 */
144 . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
145 *(.text..__x86.rethunk_safe)
146#endif
147 ALIGN_ENTRY_TEXT_END
148
149 TEXT_TEXT
150 SCHED_TEXT
151 LOCK_TEXT
152 KPROBES_TEXT
153 SOFTIRQENTRY_TEXT
154#ifdef CONFIG_MITIGATION_RETPOLINE
155 *(.text..__x86.indirect_thunk)
156 *(.text..__x86.return_thunk)
157#endif
158 STATIC_CALL_TEXT
159 *(.gnu.warning)
160
161 } :text = 0xcccccccc
162
163 /* End of text section, which should occupy whole number of pages */
164 _etext = .;
165 . = ALIGN(PAGE_SIZE);
166
167 X86_ALIGN_RODATA_BEGIN
168 RO_DATA(PAGE_SIZE)
169 X86_ALIGN_RODATA_END
170
171 /* Data */
172 .data : AT(ADDR(.data) - LOAD_OFFSET) {
173 /* Start of data section */
174 _sdata = .;
175
176 /* init_task */
177 INIT_TASK_DATA(THREAD_SIZE)
178
179 /* equivalent to task_pt_regs(&init_task) */
180 __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
181
182#ifdef CONFIG_X86_32
183 /* 32 bit has nosave before _edata */
184 NOSAVE_DATA
185#endif
186
187 PAGE_ALIGNED_DATA(PAGE_SIZE)
188
189 CACHE_HOT_DATA(L1_CACHE_BYTES)
190
191 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
192
193 DATA_DATA
194 CONSTRUCTORS
195 KEXEC_RELOCATE_KERNEL
196
197 /* rarely changed data like cpu maps */
198 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
199
200 /* End of data section */
201 _edata = .;
202 } :data
203
204 BUG_TABLE
205
206 ORC_UNWIND_TABLE
207
208 /* Init code and data - will be freed after init */
209 . = ALIGN(PAGE_SIZE);
210 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
211 __init_begin = .; /* paired with __init_end */
212 }
213
214 INIT_TEXT_SECTION(PAGE_SIZE)
215
216 /*
217 * Section for code used exclusively before alternatives are run. All
218 * references to such code must be patched out by alternatives, normally
219 * by using X86_FEATURE_ALWAYS CPU feature bit.
220 *
221 * See static_cpu_has() for an example.
222 */
223 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
224 *(.altinstr_aux)
225 . = ALIGN(PAGE_SIZE);
226 __inittext_end = .;
227 }
228
229 INIT_DATA_SECTION(16)
230
231 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
232 __x86_cpu_dev_start = .;
233 *(.x86_cpu_dev.init)
234 __x86_cpu_dev_end = .;
235 }
236
237#ifdef CONFIG_X86_INTEL_MID
238 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
239 LOAD_OFFSET) {
240 __x86_intel_mid_dev_start = .;
241 *(.x86_intel_mid_dev.init)
242 __x86_intel_mid_dev_end = .;
243 }
244#endif
245
246#ifdef CONFIG_MITIGATION_RETPOLINE
247 /*
248 * List of instructions that call/jmp/jcc to retpoline thunks
249 * __x86_indirect_thunk_*(). These instructions can be patched along
250 * with alternatives, after which the section can be freed.
251 */
252 . = ALIGN(8);
253 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
254 __retpoline_sites = .;
255 *(.retpoline_sites)
256 __retpoline_sites_end = .;
257 }
258
259 . = ALIGN(8);
260 .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
261 __return_sites = .;
262 *(.return_sites)
263 __return_sites_end = .;
264 }
265
266 . = ALIGN(8);
267 .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
268 __call_sites = .;
269 *(.call_sites)
270 __call_sites_end = .;
271 }
272#endif
273
274#ifdef CONFIG_X86_KERNEL_IBT
275 . = ALIGN(8);
276 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
277 __ibt_endbr_seal = .;
278 *(.ibt_endbr_seal)
279 __ibt_endbr_seal_end = .;
280 }
281#endif
282
283#ifdef CONFIG_FINEIBT
284 . = ALIGN(8);
285 .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
286 __cfi_sites = .;
287 *(.cfi_sites)
288 __cfi_sites_end = .;
289 }
290#endif
291
292 /*
293 * struct alt_inst entries. From the header (alternative.h):
294 * "Alternative instructions for different CPU types or capabilities"
295 * Think locking instructions on spinlocks.
296 */
297 . = ALIGN(8);
298 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
299 __alt_instructions = .;
300 *(.altinstructions)
301 __alt_instructions_end = .;
302 }
303
304 /*
305 * And here are the replacement instructions. The linker sticks
306 * them as binary blobs. The .altinstructions has enough data to
307 * get the address and the length of them to patch the kernel safely.
308 */
309 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
310 *(.altinstr_replacement)
311 }
312
313 . = ALIGN(8);
314 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
315 __apicdrivers = .;
316 *(.apicdrivers);
317 __apicdrivers_end = .;
318 }
319
320 . = ALIGN(8);
321 /*
322 * .exit.text is discarded at runtime, not link time, to deal with
323 * references from .altinstructions
324 */
325 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
326 EXIT_TEXT
327 }
328
329 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
330 EXIT_DATA
331 }
332
333 PERCPU_SECTION(L1_CACHE_BYTES)
334 ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot data too large")
335
336 RUNTIME_CONST_VARIABLES
337 RUNTIME_CONST(ptr, USER_PTR_MAX)
338
339 . = ALIGN(PAGE_SIZE);
340
341 /* freed after init ends here */
342 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
343 __init_end = .;
344 }
345
346 /*
347 * smp_locks might be freed after init
348 * start/end must be page aligned
349 */
350 . = ALIGN(PAGE_SIZE);
351 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
352 __smp_locks = .;
353 *(.smp_locks)
354 . = ALIGN(PAGE_SIZE);
355 __smp_locks_end = .;
356 }
357
358#ifdef CONFIG_X86_64
359 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
360 NOSAVE_DATA
361 }
362#endif
363
364 /* BSS */
365 . = ALIGN(PAGE_SIZE);
366 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
367 __bss_start = .;
368 *(.bss..page_aligned)
369 . = ALIGN(PAGE_SIZE);
370 *(BSS_MAIN)
371 BSS_DECRYPTED
372 . = ALIGN(PAGE_SIZE);
373 __bss_stop = .;
374 }
375
376 /*
377 * The memory occupied from _text to here, __end_of_kernel_reserve, is
378 * automatically reserved in setup_arch(). Anything after here must be
379 * explicitly reserved using memblock_reserve() or it will be discarded
380 * and treated as available memory.
381 */
382 __end_of_kernel_reserve = .;
383
384 . = ALIGN(PAGE_SIZE);
385 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
386 __brk_base = .;
387 . += 64 * 1024; /* 64k alignment slop space */
388 *(.bss..brk) /* areas brk users have reserved */
389 __brk_limit = .;
390 }
391
392 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
393 _end = .;
394 __pi__end = .;
395
396#ifdef CONFIG_AMD_MEM_ENCRYPT
397 /*
398 * Early scratch/workarea section: Lives outside of the kernel proper
399 * (_text - _end).
400 *
401 * Resides after _end because even though the .brk section is after
402 * __end_of_kernel_reserve, the .brk section is later reserved as a
403 * part of the kernel. Since it is located after __end_of_kernel_reserve
404 * it will be discarded and become part of the available memory. As
405 * such, it can only be used by very early boot code and must not be
406 * needed afterwards.
407 *
408 * Currently used by SME for performing in-place encryption of the
409 * kernel during boot. Resides on a 2MB boundary to simplify the
410 * pagetable setup used for SME in-place encryption.
411 */
412 . = ALIGN(HPAGE_SIZE);
413 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
414 __init_scratch_begin = .;
415 *(.init.scratch)
416 . = ALIGN(HPAGE_SIZE);
417 __init_scratch_end = .;
418 }
419#endif
420
421 STABS_DEBUG
422 DWARF_DEBUG
423#ifdef CONFIG_PROPELLER_CLANG
424 .llvm_bb_addr_map : { *(.llvm_bb_addr_map) }
425#endif
426
427 ELF_DETAILS
428
429 DISCARDS
430
431 /*
432 * Make sure that the .got.plt is either completely empty or it
433 * contains only the lazy dispatch entries.
434 */
435 .got.plt (INFO) : { *(.got.plt) }
436 ASSERT(SIZEOF(.got.plt) == 0 ||
437#ifdef CONFIG_X86_64
438 SIZEOF(.got.plt) == 0x18,
439#else
440 SIZEOF(.got.plt) == 0xc,
441#endif
442 "Unexpected GOT/PLT entries detected!")
443
444 /*
445 * Sections that should stay zero sized, which is safer to
446 * explicitly check instead of blindly discarding.
447 */
448 .got : {
449 *(.got) *(.igot.*)
450 }
451 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
452
453 .plt : {
454 *(.plt) *(.plt.*) *(.iplt)
455 }
456 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
457
458 .rel.dyn : {
459 *(.rel.*) *(.rel_*)
460 }
461 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
462
463 .rela.dyn : {
464 *(.rela.*) *(.rela_*)
465 }
466 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
467}
468
469/*
470 * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
471 * this. Let's assume that nobody will be running a COMPILE_TEST kernel and
472 * let's assert that fuller build coverage is more valuable than being able to
473 * run a COMPILE_TEST kernel.
474 */
475#ifndef CONFIG_COMPILE_TEST
476/*
477 * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
478 */
479. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
480 "kernel image bigger than KERNEL_IMAGE_SIZE");
481#endif
482
483/* needed for Clang - see arch/x86/entry/entry.S */
484PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
485
486#ifdef CONFIG_X86_64
487
488#ifdef CONFIG_MITIGATION_UNRET_ENTRY
489. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
490#endif
491
492#ifdef CONFIG_MITIGATION_SRSO
493. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
494/*
495 * GNU ld cannot do XOR until 2.41.
496 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
497 *
498 * LLVM lld cannot do XOR until lld-17.
499 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
500 *
501 * Instead do: (A | B) - (A & B) in order to compute the XOR
502 * of the two function addresses:
503 */
504. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
505 (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
506 "SRSO function pair won't alias");
507#endif
508
509#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
510. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
511. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
512. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
513#endif
514
515#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
516. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
517#endif
518
519#endif /* CONFIG_X86_64 */
520
521/*
522 * The symbols below are referenced using relative relocations in the
523 * respective ELF notes. This produces build time constants that the
524 * linker will never mark as relocatable. (Using just ABSOLUTE() is not
525 * sufficient for that).
526 */
527#ifdef CONFIG_XEN_PV
528xen_elfnote_entry_value =
529 ABSOLUTE(xen_elfnote_entry) + ABSOLUTE(startup_xen);
530#endif
531#ifdef CONFIG_PVH
532xen_elfnote_phys32_entry_value =
533 ABSOLUTE(xen_elfnote_phys32_entry) + ABSOLUTE(pvh_start_xen - LOAD_OFFSET);
534#endif
535
536#include "../boot/startup/exports.h"
537

source code of linux/arch/x86/kernel/vmlinux.lds.S