1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script to make ARM Linux kernel
4 * taken from the i386 version by Russell King
5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 */
7
8#include <asm/hyp_image.h>
9#ifdef CONFIG_KVM
10#define HYPERVISOR_EXTABLE \
11 . = ALIGN(SZ_8); \
12 __start___kvm_ex_table = .; \
13 *(__kvm_ex_table) \
14 __stop___kvm_ex_table = .;
15
16#define HYPERVISOR_DATA_SECTIONS \
17 HYP_SECTION_NAME(.rodata) : { \
18 . = ALIGN(PAGE_SIZE); \
19 __hyp_rodata_start = .; \
20 *(HYP_SECTION_NAME(.data..ro_after_init)) \
21 *(HYP_SECTION_NAME(.rodata)) \
22 . = ALIGN(PAGE_SIZE); \
23 __hyp_rodata_end = .; \
24 }
25
26#define HYPERVISOR_PERCPU_SECTION \
27 . = ALIGN(PAGE_SIZE); \
28 HYP_SECTION_NAME(.data..percpu) : { \
29 *(HYP_SECTION_NAME(.data..percpu)) \
30 }
31
32#define HYPERVISOR_RELOC_SECTION \
33 .hyp.reloc : ALIGN(4) { \
34 __hyp_reloc_begin = .; \
35 *(.hyp.reloc) \
36 __hyp_reloc_end = .; \
37 }
38
39#define BSS_FIRST_SECTIONS \
40 __hyp_bss_start = .; \
41 *(HYP_SECTION_NAME(.bss)) \
42 . = ALIGN(PAGE_SIZE); \
43 __hyp_bss_end = .;
44
45/*
46 * We require that __hyp_bss_start and __bss_start are aligned, and enforce it
47 * with an assertion. But the BSS_SECTION macro places an empty .sbss section
48 * between them, which can in some cases cause the linker to misalign them. To
49 * work around the issue, force a page alignment for __bss_start.
50 */
51#define SBSS_ALIGN PAGE_SIZE
52#else /* CONFIG_KVM */
53#define HYPERVISOR_EXTABLE
54#define HYPERVISOR_DATA_SECTIONS
55#define HYPERVISOR_PERCPU_SECTION
56#define HYPERVISOR_RELOC_SECTION
57#define SBSS_ALIGN 0
58#endif
59
60#define RO_EXCEPTION_TABLE_ALIGN 4
61#define RUNTIME_DISCARD_EXIT
62
63#include <asm-generic/vmlinux.lds.h>
64#include <asm/cache.h>
65#include <asm/kernel-pgtable.h>
66#include <asm/kexec.h>
67#include <asm/memory.h>
68#include <asm/page.h>
69
70#include "image.h"
71
72OUTPUT_ARCH(aarch64)
73ENTRY(_text)
74
75jiffies = jiffies_64;
76
77#define HYPERVISOR_TEXT \
78 . = ALIGN(PAGE_SIZE); \
79 __hyp_idmap_text_start = .; \
80 *(.hyp.idmap.text) \
81 __hyp_idmap_text_end = .; \
82 __hyp_text_start = .; \
83 *(.hyp.text) \
84 HYPERVISOR_EXTABLE \
85 . = ALIGN(PAGE_SIZE); \
86 __hyp_text_end = .;
87
88#define IDMAP_TEXT \
89 . = ALIGN(SZ_4K); \
90 __idmap_text_start = .; \
91 *(.idmap.text) \
92 __idmap_text_end = .;
93
94#ifdef CONFIG_HIBERNATION
95#define HIBERNATE_TEXT \
96 ALIGN_FUNCTION(); \
97 __hibernate_exit_text_start = .; \
98 *(.hibernate_exit.text) \
99 __hibernate_exit_text_end = .;
100#else
101#define HIBERNATE_TEXT
102#endif
103
104#ifdef CONFIG_KEXEC_CORE
105#define KEXEC_TEXT \
106 ALIGN_FUNCTION(); \
107 __relocate_new_kernel_start = .; \
108 *(.kexec_relocate.text) \
109 __relocate_new_kernel_end = .;
110#else
111#define KEXEC_TEXT
112#endif
113
114#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
115#define TRAMP_TEXT \
116 . = ALIGN(PAGE_SIZE); \
117 __entry_tramp_text_start = .; \
118 *(.entry.tramp.text) \
119 . = ALIGN(PAGE_SIZE); \
120 __entry_tramp_text_end = .; \
121 *(.entry.tramp.rodata)
122#else
123#define TRAMP_TEXT
124#endif
125
126#ifdef CONFIG_UNWIND_TABLES
127#define UNWIND_DATA_SECTIONS \
128 .eh_frame : { \
129 __pi___eh_frame_start = .; \
130 *(.eh_frame) \
131 __pi___eh_frame_end = .; \
132 }
133#else
134#define UNWIND_DATA_SECTIONS
135#endif
136
137/*
138 * The size of the PE/COFF section that covers the kernel image, which
139 * runs from _stext to _edata, must be a round multiple of the PE/COFF
140 * FileAlignment, which we set to its minimum value of 0x200. '_stext'
141 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
142 * boundary should be sufficient.
143 */
144PECOFF_FILE_ALIGNMENT = 0x200;
145
146#ifdef CONFIG_EFI
147#define PECOFF_EDATA_PADDING \
148 .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
149#else
150#define PECOFF_EDATA_PADDING
151#endif
152
153SECTIONS
154{
155 /*
156 * XXX: The linker does not define how output sections are
157 * assigned to input sections when there are multiple statements
158 * matching the same input section name. There is no documented
159 * order of matching.
160 */
161 DISCARDS
162 /DISCARD/ : {
163 *(.interp .dynamic)
164 *(.dynsym .dynstr .hash .gnu.hash)
165 }
166
167 . = KIMAGE_VADDR;
168
169 .head.text : {
170 _text = .;
171 HEAD_TEXT
172 }
173 .text : ALIGN(SEGMENT_ALIGN) { /* Real text segment */
174 _stext = .; /* Text and read-only data */
175 IRQENTRY_TEXT
176 SOFTIRQENTRY_TEXT
177 ENTRY_TEXT
178 TEXT_TEXT
179 SCHED_TEXT
180 LOCK_TEXT
181 KPROBES_TEXT
182 HYPERVISOR_TEXT
183 *(.gnu.warning)
184 }
185
186 . = ALIGN(SEGMENT_ALIGN);
187 _etext = .; /* End of text section */
188
189 /* everything from this point to __init_begin will be marked RO NX */
190 RO_DATA(PAGE_SIZE)
191
192 HYPERVISOR_DATA_SECTIONS
193
194 .got : { *(.got) }
195 /*
196 * Make sure that the .got.plt is either completely empty or it
197 * contains only the lazy dispatch entries.
198 */
199 .got.plt : { *(.got.plt) }
200 ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18,
201 "Unexpected GOT/PLT entries detected!")
202
203 /* code sections that are never executed via the kernel mapping */
204 .rodata.text : {
205 TRAMP_TEXT
206 HIBERNATE_TEXT
207 KEXEC_TEXT
208 IDMAP_TEXT
209 . = ALIGN(PAGE_SIZE);
210 }
211
212 idmap_pg_dir = .;
213 . += PAGE_SIZE;
214
215#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
216 tramp_pg_dir = .;
217 . += PAGE_SIZE;
218#endif
219
220 reserved_pg_dir = .;
221 . += PAGE_SIZE;
222
223 swapper_pg_dir = .;
224 . += PAGE_SIZE;
225
226 . = ALIGN(SEGMENT_ALIGN);
227 __init_begin = .;
228 __inittext_begin = .;
229
230 INIT_TEXT_SECTION(8)
231
232 __exittext_begin = .;
233 .exit.text : {
234 EXIT_TEXT
235 }
236 __exittext_end = .;
237
238 . = ALIGN(4);
239 .altinstructions : {
240 __alt_instructions = .;
241 *(.altinstructions)
242 __alt_instructions_end = .;
243 }
244
245 UNWIND_DATA_SECTIONS
246
247 . = ALIGN(SEGMENT_ALIGN);
248 __inittext_end = .;
249 __initdata_begin = .;
250
251 init_idmap_pg_dir = .;
252 . += INIT_IDMAP_DIR_SIZE;
253 init_idmap_pg_end = .;
254
255 .init.data : {
256 INIT_DATA
257 INIT_SETUP(16)
258 INIT_CALLS
259 CON_INITCALL
260 INIT_RAM_FS
261 *(.init.altinstructions .init.bss) /* from the EFI stub */
262 }
263 .exit.data : {
264 EXIT_DATA
265 }
266
267 PERCPU_SECTION(L1_CACHE_BYTES)
268 HYPERVISOR_PERCPU_SECTION
269
270 HYPERVISOR_RELOC_SECTION
271
272 .rela.dyn : ALIGN(8) {
273 __pi_rela_start = .;
274 *(.rela .rela*)
275 __pi_rela_end = .;
276 }
277
278 .relr.dyn : ALIGN(8) {
279 __pi_relr_start = .;
280 *(.relr.dyn)
281 __pi_relr_end = .;
282 }
283
284 . = ALIGN(SEGMENT_ALIGN);
285 __initdata_end = .;
286 __init_end = .;
287
288 _data = .;
289 _sdata = .;
290 RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
291
292 /*
293 * Data written with the MMU off but read with the MMU on requires
294 * cache lines to be invalidated, discarding up to a Cache Writeback
295 * Granule (CWG) of data from the cache. Keep the section that
296 * requires this type of maintenance to be in its own Cache Writeback
297 * Granule (CWG) area so the cache maintenance operations don't
298 * interfere with adjacent data.
299 */
300 .mmuoff.data.write : ALIGN(SZ_2K) {
301 __mmuoff_data_start = .;
302 *(.mmuoff.data.write)
303 }
304 . = ALIGN(SZ_2K);
305 .mmuoff.data.read : {
306 *(.mmuoff.data.read)
307 __mmuoff_data_end = .;
308 }
309
310 PECOFF_EDATA_PADDING
311 __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
312 _edata = .;
313
314 /* start of zero-init region */
315 BSS_SECTION(SBSS_ALIGN, 0, 0)
316
317 . = ALIGN(PAGE_SIZE);
318 init_pg_dir = .;
319 . += INIT_DIR_SIZE;
320 init_pg_end = .;
321 /* end of zero-init region */
322
323 . += SZ_4K; /* stack for the early C runtime */
324 early_init_stack = .;
325
326 . = ALIGN(SEGMENT_ALIGN);
327 __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
328 _end = .;
329
330 STABS_DEBUG
331 DWARF_DEBUG
332 ELF_DETAILS
333
334 HEAD_SYMBOLS
335
336 /*
337 * Sections that should stay zero sized, which is safer to
338 * explicitly check instead of blindly discarding.
339 */
340 .plt : {
341 *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
342 }
343 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
344
345 .data.rel.ro : { *(.data.rel.ro) }
346 ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!")
347}
348
349#include "image-vars.h"
350
351/*
352 * The HYP init code and ID map text can't be longer than a page each. The
353 * former is page-aligned, but the latter may not be with 16K or 64K pages, so
354 * it should also not cross a page boundary.
355 */
356ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE,
357 "HYP init code too big")
358ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
359 "ID map text too big or misaligned")
360#ifdef CONFIG_HIBERNATION
361ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K,
362 "Hibernate exit text is bigger than 4 KiB")
363ASSERT(__hibernate_exit_text_start == swsusp_arch_suspend_exit,
364 "Hibernate exit text does not start with swsusp_arch_suspend_exit")
365#endif
366#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
367ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
368 "Entry trampoline text too big")
369#endif
370#ifdef CONFIG_KVM
371ASSERT(__hyp_bss_start == __bss_start, "HYP and Host BSS are misaligned")
372#endif
373/*
374 * If padding is applied before .head.text, virt<->phys conversions will fail.
375 */
376ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")
377
378ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
379 "RESERVED_SWAPPER_OFFSET is wrong!")
380
381#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
382ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
383 "TRAMP_SWAPPER_OFFSET is wrong!")
384#endif
385
386#ifdef CONFIG_KEXEC_CORE
387/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
388ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K,
389 "kexec relocation code is bigger than 4 KiB")
390ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
391ASSERT(__relocate_new_kernel_start == arm64_relocate_new_kernel,
392 "kexec control page does not start with arm64_relocate_new_kernel")
393#endif
394

source code of linux/arch/arm64/kernel/vmlinux.lds.S