1 | /* |
2 | * Helper macros to support writing architecture specific |
3 | * linker scripts. |
4 | * |
5 | * A minimal linker scripts has following content: |
6 | * [This is a sample, architectures may have special requirements] |
7 | * |
8 | * OUTPUT_FORMAT(...) |
9 | * OUTPUT_ARCH(...) |
10 | * ENTRY(...) |
11 | * SECTIONS |
12 | * { |
13 | * . = START; |
14 | * __init_begin = .; |
15 | * HEAD_TEXT_SECTION |
16 | * INIT_TEXT_SECTION(PAGE_SIZE) |
17 | * INIT_DATA_SECTION(...) |
18 | * PERCPU_SECTION(CACHELINE_SIZE) |
19 | * __init_end = .; |
20 | * |
21 | * _stext = .; |
22 | * TEXT_SECTION = 0 |
23 | * _etext = .; |
24 | * |
25 | * _sdata = .; |
26 | * RO_DATA(PAGE_SIZE) |
27 | * RW_DATA(...) |
28 | * _edata = .; |
29 | * |
30 | * EXCEPTION_TABLE(...) |
31 | * |
32 | * BSS_SECTION(0, 0, 0) |
33 | * _end = .; |
34 | * |
35 | * STABS_DEBUG |
36 | * DWARF_DEBUG |
37 | * ELF_DETAILS |
38 | * |
39 | * DISCARDS // must be the last |
40 | * } |
41 | * |
42 | * [__init_begin, __init_end] is the init section that may be freed after init |
43 | * // __init_begin and __init_end should be page aligned, so that we can |
44 | * // free the whole .init memory |
45 | * [_stext, _etext] is the text section |
46 | * [_sdata, _edata] is the data section |
47 | * |
48 | * Some of the included output section have their own set of constants. |
49 | * Examples are: [__initramfs_start, __initramfs_end] for initramfs and |
50 | * [__nosave_begin, __nosave_end] for the nosave data |
51 | */ |
52 | |
53 | #include <asm-generic/codetag.lds.h> |
54 | |
55 | #ifndef LOAD_OFFSET |
56 | #define LOAD_OFFSET 0 |
57 | #endif |
58 | |
59 | /* |
60 | * Only some architectures want to have the .notes segment visible in |
61 | * a separate PT_NOTE ELF Program Header. When this happens, it needs |
62 | * to be visible in both the kernel text's PT_LOAD and the PT_NOTE |
63 | * Program Headers. In this case, though, the PT_LOAD needs to be made |
64 | * the default again so that all the following sections don't also end |
65 | * up in the PT_NOTE Program Header. |
66 | */ |
67 | #ifdef EMITS_PT_NOTE |
68 | #define :text :note |
69 | #define __restore_ph : { *(.__restore_ph) } :text |
70 | #else |
71 | #define NOTES_HEADERS |
72 | #define NOTES_HEADERS_RESTORE |
73 | #endif |
74 | |
75 | /* |
76 | * Some architectures have non-executable read-only exception tables. |
77 | * They can be added to the RO_DATA segment by specifying their desired |
78 | * alignment. |
79 | */ |
80 | #ifdef RO_EXCEPTION_TABLE_ALIGN |
81 | #define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) |
82 | #else |
83 | #define RO_EXCEPTION_TABLE |
84 | #endif |
85 | |
86 | /* Align . function alignment. */ |
87 | #define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT) |
88 | |
89 | /* |
90 | * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which |
91 | * generates .data.identifier sections, which need to be pulled in with |
92 | * .data. We don't want to pull in .data..other sections, which Linux |
93 | * has defined. Same for text and bss. |
94 | * |
95 | * With LTO_CLANG, the linker also splits sections by default, so we need |
96 | * these macros to combine the sections during the final link. |
97 | * |
98 | * With AUTOFDO_CLANG and PROPELLER_CLANG, by default, the linker splits |
99 | * text sections and regroups functions into subsections. |
100 | * |
101 | * RODATA_MAIN is not used because existing code already defines .rodata.x |
102 | * sections to be brought in with rodata. |
103 | */ |
104 | #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) || \ |
105 | defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) |
106 | #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* |
107 | #else |
108 | #define TEXT_MAIN .text |
109 | #endif |
110 | #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) |
111 | #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* |
112 | #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* |
113 | #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* |
114 | #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* |
115 | #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* |
116 | #else |
117 | #define DATA_MAIN .data .data.rel .data.rel.local |
118 | #define SDATA_MAIN .sdata |
119 | #define RODATA_MAIN .rodata |
120 | #define BSS_MAIN .bss |
121 | #define SBSS_MAIN .sbss |
122 | #endif |
123 | |
124 | /* |
125 | * GCC 4.5 and later have a 32 bytes section alignment for structures. |
126 | * Except GCC 4.9, that feels the need to align on 64 bytes. |
127 | */ |
128 | #define STRUCT_ALIGNMENT 32 |
129 | #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) |
130 | |
131 | /* |
132 | * The order of the sched class addresses are important, as they are |
133 | * used to determine the order of the priority of each sched class in |
134 | * relation to each other. |
135 | */ |
136 | #define SCHED_DATA \ |
137 | STRUCT_ALIGN(); \ |
138 | __sched_class_highest = .; \ |
139 | *(__stop_sched_class) \ |
140 | *(__dl_sched_class) \ |
141 | *(__rt_sched_class) \ |
142 | *(__fair_sched_class) \ |
143 | *(__ext_sched_class) \ |
144 | *(__idle_sched_class) \ |
145 | __sched_class_lowest = .; |
146 | |
147 | /* The actual configuration determine if the init/exit sections |
148 | * are handled as text/data or they can be discarded (which |
149 | * often happens at runtime) |
150 | */ |
151 | |
152 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE |
153 | #define KEEP_PATCHABLE KEEP(*(__patchable_function_entries)) |
154 | #define PATCHABLE_DISCARDS |
155 | #else |
156 | #define KEEP_PATCHABLE |
157 | #define PATCHABLE_DISCARDS *(__patchable_function_entries) |
158 | #endif |
159 | |
160 | #ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG |
161 | /* |
162 | * Simply points to ftrace_stub, but with the proper protocol. |
163 | * Defined by the linker script in linux/vmlinux.lds.h |
164 | */ |
165 | #define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub; |
166 | #else |
167 | #define FTRACE_STUB_HACK |
168 | #endif |
169 | |
170 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
171 | /* |
172 | * The ftrace call sites are logged to a section whose name depends on the |
173 | * compiler option used. A given kernel image will only use one, AKA |
174 | * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header |
175 | * dependencies for FTRACE_CALLSITE_SECTION's definition. |
176 | * |
177 | * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func |
178 | * as some archs will have a different prototype for that function |
179 | * but ftrace_ops_list_func() will have a single prototype. |
180 | */ |
181 | #define MCOUNT_REC() . = ALIGN(8); \ |
182 | __start_mcount_loc = .; \ |
183 | KEEP(*(__mcount_loc)) \ |
184 | KEEP_PATCHABLE \ |
185 | __stop_mcount_loc = .; \ |
186 | FTRACE_STUB_HACK \ |
187 | ftrace_ops_list_func = arch_ftrace_ops_list_func; |
188 | #else |
189 | # ifdef CONFIG_FUNCTION_TRACER |
190 | # define MCOUNT_REC() FTRACE_STUB_HACK \ |
191 | ftrace_ops_list_func = arch_ftrace_ops_list_func; |
192 | # else |
193 | # define MCOUNT_REC() |
194 | # endif |
195 | #endif |
196 | |
197 | #define BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) \ |
198 | _BEGIN_##_label_ = .; \ |
199 | KEEP(*(_sec_)) \ |
200 | _END_##_label_ = .; |
201 | |
202 | #define BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) \ |
203 | _label_##_BEGIN_ = .; \ |
204 | KEEP(*(_sec_)) \ |
205 | _label_##_END_ = .; |
206 | |
207 | #define BOUNDED_SECTION_BY(_sec_, _label_) \ |
208 | BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) |
209 | |
210 | #define BOUNDED_SECTION(_sec) BOUNDED_SECTION_BY(_sec, _sec) |
211 | |
212 | #define (_sec_, _label_, _BEGIN_, _END_, _HDR_) \ |
213 | _HDR_##_label_ = .; \ |
214 | KEEP(*(.gnu.linkonce.##_sec_)) \ |
215 | BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) |
216 | |
217 | #define (_sec_, _label_, _BEGIN_, _END_, _HDR_) \ |
218 | _label_##_HDR_ = .; \ |
219 | KEEP(*(.gnu.linkonce.##_sec_)) \ |
220 | BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) |
221 | |
222 | #define (_sec_, _label_) \ |
223 | HEADERED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) |
224 | |
225 | #define (_sec) HEADERED_SECTION_BY(_sec, _sec) |
226 | |
227 | #ifdef CONFIG_TRACE_BRANCH_PROFILING |
228 | #define LIKELY_PROFILE() \ |
229 | BOUNDED_SECTION_BY(_ftrace_annotated_branch, _annotated_branch_profile) |
230 | #else |
231 | #define LIKELY_PROFILE() |
232 | #endif |
233 | |
234 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
235 | #define BRANCH_PROFILE() \ |
236 | BOUNDED_SECTION_BY(_ftrace_branch, _branch_profile) |
237 | #else |
238 | #define BRANCH_PROFILE() |
239 | #endif |
240 | |
241 | #ifdef CONFIG_KPROBES |
242 | #define KPROBE_BLACKLIST() \ |
243 | . = ALIGN(8); \ |
244 | BOUNDED_SECTION(_kprobe_blacklist) |
245 | #else |
246 | #define KPROBE_BLACKLIST() |
247 | #endif |
248 | |
249 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION |
250 | #define ERROR_INJECT_WHITELIST() \ |
251 | STRUCT_ALIGN(); \ |
252 | BOUNDED_SECTION(_error_injection_whitelist) |
253 | #else |
254 | #define ERROR_INJECT_WHITELIST() |
255 | #endif |
256 | |
257 | #ifdef CONFIG_EVENT_TRACING |
258 | #define FTRACE_EVENTS() \ |
259 | . = ALIGN(8); \ |
260 | BOUNDED_SECTION(_ftrace_events) \ |
261 | BOUNDED_SECTION_BY(_ftrace_eval_map, _ftrace_eval_maps) |
262 | #else |
263 | #define FTRACE_EVENTS() |
264 | #endif |
265 | |
266 | #ifdef CONFIG_TRACING |
267 | #define TRACE_PRINTKS() BOUNDED_SECTION_BY(__trace_printk_fmt, ___trace_bprintk_fmt) |
268 | #define TRACEPOINT_STR() BOUNDED_SECTION_BY(__tracepoint_str, ___tracepoint_str) |
269 | #else |
270 | #define TRACE_PRINTKS() |
271 | #define TRACEPOINT_STR() |
272 | #endif |
273 | |
274 | #ifdef CONFIG_FTRACE_SYSCALLS |
275 | #define TRACE_SYSCALLS() \ |
276 | . = ALIGN(8); \ |
277 | BOUNDED_SECTION_BY(__syscalls_metadata, _syscalls_metadata) |
278 | #else |
279 | #define TRACE_SYSCALLS() |
280 | #endif |
281 | |
282 | #ifdef CONFIG_BPF_EVENTS |
283 | #define BPF_RAW_TP() STRUCT_ALIGN(); \ |
284 | BOUNDED_SECTION_BY(__bpf_raw_tp_map, __bpf_raw_tp) |
285 | #else |
286 | #define BPF_RAW_TP() |
287 | #endif |
288 | |
289 | #ifdef CONFIG_SERIAL_EARLYCON |
290 | #define EARLYCON_TABLE() \ |
291 | . = ALIGN(8); \ |
292 | BOUNDED_SECTION_POST_LABEL(__earlycon_table, __earlycon_table, , _end) |
293 | #else |
294 | #define EARLYCON_TABLE() |
295 | #endif |
296 | |
297 | #ifdef CONFIG_SECURITY |
298 | #define LSM_TABLE() \ |
299 | . = ALIGN(8); \ |
300 | BOUNDED_SECTION_PRE_LABEL(.lsm_info.init, _lsm_info, __start, __end) |
301 | |
302 | #define EARLY_LSM_TABLE() \ |
303 | . = ALIGN(8); \ |
304 | BOUNDED_SECTION_PRE_LABEL(.early_lsm_info.init, _early_lsm_info, __start, __end) |
305 | #else |
306 | #define LSM_TABLE() |
307 | #define EARLY_LSM_TABLE() |
308 | #endif |
309 | |
310 | #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) |
311 | #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) |
312 | #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) |
313 | #define _OF_TABLE_0(name) |
314 | #define _OF_TABLE_1(name) \ |
315 | . = ALIGN(8); \ |
316 | __##name##_of_table = .; \ |
317 | KEEP(*(__##name##_of_table)) \ |
318 | KEEP(*(__##name##_of_table_end)) |
319 | |
320 | #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) |
321 | #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) |
322 | #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) |
323 | #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) |
324 | #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) |
325 | #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) |
326 | |
327 | #ifdef CONFIG_ACPI |
328 | #define ACPI_PROBE_TABLE(name) \ |
329 | . = ALIGN(8); \ |
330 | BOUNDED_SECTION_POST_LABEL(__##name##_acpi_probe_table, \ |
331 | __##name##_acpi_probe_table,, _end) |
332 | #else |
333 | #define ACPI_PROBE_TABLE(name) |
334 | #endif |
335 | |
336 | #ifdef CONFIG_THERMAL |
337 | #define THERMAL_TABLE(name) \ |
338 | . = ALIGN(8); \ |
339 | BOUNDED_SECTION_POST_LABEL(__##name##_thermal_table, \ |
340 | __##name##_thermal_table,, _end) |
341 | #else |
342 | #define THERMAL_TABLE(name) |
343 | #endif |
344 | |
345 | #define KERNEL_DTB() \ |
346 | STRUCT_ALIGN(); \ |
347 | __dtb_start = .; \ |
348 | KEEP(*(.dtb.init.rodata)) \ |
349 | __dtb_end = .; |
350 | |
351 | /* |
352 | * .data section |
353 | */ |
354 | #define DATA_DATA \ |
355 | *(.xiptext) \ |
356 | *(DATA_MAIN) \ |
357 | *(.data..decrypted) \ |
358 | *(.ref.data) \ |
359 | *(.data..shared_aligned) /* percpu related */ \ |
360 | *(.data..unlikely) \ |
361 | __start_once = .; \ |
362 | *(.data..once) \ |
363 | __end_once = .; \ |
364 | STRUCT_ALIGN(); \ |
365 | *(__tracepoints) \ |
366 | /* implement dynamic printk debug */ \ |
367 | . = ALIGN(8); \ |
368 | BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \ |
369 | BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \ |
370 | CODETAG_SECTIONS() \ |
371 | LIKELY_PROFILE() \ |
372 | BRANCH_PROFILE() \ |
373 | TRACE_PRINTKS() \ |
374 | BPF_RAW_TP() \ |
375 | TRACEPOINT_STR() \ |
376 | KUNIT_TABLE() |
377 | |
378 | /* |
379 | * Data section helpers |
380 | */ |
381 | #define NOSAVE_DATA \ |
382 | . = ALIGN(PAGE_SIZE); \ |
383 | __nosave_begin = .; \ |
384 | *(.data..nosave) \ |
385 | . = ALIGN(PAGE_SIZE); \ |
386 | __nosave_end = .; |
387 | |
388 | #define CACHE_HOT_DATA(align) \ |
389 | . = ALIGN(align); \ |
390 | *(SORT_BY_ALIGNMENT(.data..hot.*)) \ |
391 | . = ALIGN(align); |
392 | |
393 | #define PAGE_ALIGNED_DATA(page_align) \ |
394 | . = ALIGN(page_align); \ |
395 | *(.data..page_aligned) \ |
396 | . = ALIGN(page_align); |
397 | |
398 | #define READ_MOSTLY_DATA(align) \ |
399 | . = ALIGN(align); \ |
400 | *(.data..read_mostly) \ |
401 | . = ALIGN(align); |
402 | |
403 | #define CACHELINE_ALIGNED_DATA(align) \ |
404 | . = ALIGN(align); \ |
405 | *(.data..cacheline_aligned) |
406 | |
407 | #define INIT_TASK_DATA(align) \ |
408 | . = ALIGN(align); \ |
409 | __start_init_stack = .; \ |
410 | init_thread_union = .; \ |
411 | init_stack = .; \ |
412 | KEEP(*(.data..init_thread_info)) \ |
413 | . = __start_init_stack + THREAD_SIZE; \ |
414 | __end_init_stack = .; |
415 | |
416 | #define JUMP_TABLE_DATA \ |
417 | . = ALIGN(8); \ |
418 | BOUNDED_SECTION_BY(__jump_table, ___jump_table) |
419 | |
420 | #ifdef CONFIG_HAVE_STATIC_CALL_INLINE |
421 | #define STATIC_CALL_DATA \ |
422 | . = ALIGN(8); \ |
423 | BOUNDED_SECTION_BY(.static_call_sites, _static_call_sites) \ |
424 | BOUNDED_SECTION_BY(.static_call_tramp_key, _static_call_tramp_key) |
425 | #else |
426 | #define STATIC_CALL_DATA |
427 | #endif |
428 | |
429 | /* |
430 | * Allow architectures to handle ro_after_init data on their |
431 | * own by defining an empty RO_AFTER_INIT_DATA. |
432 | */ |
433 | #ifndef RO_AFTER_INIT_DATA |
434 | #define RO_AFTER_INIT_DATA \ |
435 | . = ALIGN(8); \ |
436 | __start_ro_after_init = .; \ |
437 | *(.data..ro_after_init) \ |
438 | JUMP_TABLE_DATA \ |
439 | STATIC_CALL_DATA \ |
440 | __end_ro_after_init = .; |
441 | #endif |
442 | |
443 | /* |
444 | * .kcfi_traps contains a list KCFI trap locations. |
445 | */ |
446 | #ifndef KCFI_TRAPS |
447 | #ifdef CONFIG_ARCH_USES_CFI_TRAPS |
448 | #define KCFI_TRAPS \ |
449 | __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \ |
450 | BOUNDED_SECTION_BY(.kcfi_traps, ___kcfi_traps) \ |
451 | } |
452 | #else |
453 | #define KCFI_TRAPS |
454 | #endif |
455 | #endif |
456 | |
457 | /* |
458 | * Read only Data |
459 | */ |
460 | #define RO_DATA(align) \ |
461 | . = ALIGN((align)); \ |
462 | .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ |
463 | __start_rodata = .; \ |
464 | *(.rodata) *(.rodata.*) *(.data.rel.ro*) \ |
465 | SCHED_DATA \ |
466 | RO_AFTER_INIT_DATA /* Read only after init */ \ |
467 | . = ALIGN(8); \ |
468 | BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \ |
469 | *(__tracepoints_strings)/* Tracepoints: strings */ \ |
470 | } \ |
471 | \ |
472 | .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ |
473 | *(.rodata1) \ |
474 | } \ |
475 | \ |
476 | /* PCI quirks */ \ |
477 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
478 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \ |
479 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \ |
480 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_final, _pci_fixups_final, __start, __end) \ |
481 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_enable, _pci_fixups_enable, __start, __end) \ |
482 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume, _pci_fixups_resume, __start, __end) \ |
483 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend, _pci_fixups_suspend, __start, __end) \ |
484 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume_early, _pci_fixups_resume_early, __start, __end) \ |
485 | BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend_late, _pci_fixups_suspend_late, __start, __end) \ |
486 | } \ |
487 | \ |
488 | FW_LOADER_BUILT_IN_DATA \ |
489 | TRACEDATA \ |
490 | \ |
491 | PRINTK_INDEX \ |
492 | \ |
493 | /* Kernel symbol table: Normal symbols */ \ |
494 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ |
495 | __start___ksymtab = .; \ |
496 | KEEP(*(SORT(___ksymtab+*))) \ |
497 | __stop___ksymtab = .; \ |
498 | } \ |
499 | \ |
500 | /* Kernel symbol table: GPL-only symbols */ \ |
501 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ |
502 | __start___ksymtab_gpl = .; \ |
503 | KEEP(*(SORT(___ksymtab_gpl+*))) \ |
504 | __stop___ksymtab_gpl = .; \ |
505 | } \ |
506 | \ |
507 | /* Kernel symbol table: Normal symbols */ \ |
508 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ |
509 | __start___kcrctab = .; \ |
510 | KEEP(*(SORT(___kcrctab+*))) \ |
511 | __stop___kcrctab = .; \ |
512 | } \ |
513 | \ |
514 | /* Kernel symbol table: GPL-only symbols */ \ |
515 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ |
516 | __start___kcrctab_gpl = .; \ |
517 | KEEP(*(SORT(___kcrctab_gpl+*))) \ |
518 | __stop___kcrctab_gpl = .; \ |
519 | } \ |
520 | \ |
521 | /* Kernel symbol table: strings */ \ |
522 | __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ |
523 | *(__ksymtab_strings) \ |
524 | } \ |
525 | \ |
526 | /* __*init sections */ \ |
527 | __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ |
528 | *(.ref.rodata) \ |
529 | } \ |
530 | \ |
531 | /* Built-in module parameters. */ \ |
532 | __param : AT(ADDR(__param) - LOAD_OFFSET) { \ |
533 | BOUNDED_SECTION_BY(__param, ___param) \ |
534 | } \ |
535 | \ |
536 | /* Built-in module versions. */ \ |
537 | __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ |
538 | BOUNDED_SECTION_BY(__modver, ___modver) \ |
539 | } \ |
540 | \ |
541 | KCFI_TRAPS \ |
542 | \ |
543 | RO_EXCEPTION_TABLE \ |
544 | NOTES \ |
545 | BTF \ |
546 | \ |
547 | . = ALIGN((align)); \ |
548 | __end_rodata = .; |
549 | |
550 | |
551 | /* |
552 | * Non-instrumentable text section |
553 | */ |
554 | #define NOINSTR_TEXT \ |
555 | ALIGN_FUNCTION(); \ |
556 | __noinstr_text_start = .; \ |
557 | *(.noinstr.text) \ |
558 | __cpuidle_text_start = .; \ |
559 | *(.cpuidle.text) \ |
560 | __cpuidle_text_end = .; \ |
561 | __noinstr_text_end = .; |
562 | |
563 | #define TEXT_SPLIT \ |
564 | __split_text_start = .; \ |
565 | *(.text.split .text.split.[0-9a-zA-Z_]*) \ |
566 | __split_text_end = .; |
567 | |
568 | #define TEXT_UNLIKELY \ |
569 | __unlikely_text_start = .; \ |
570 | *(.text.unlikely .text.unlikely.*) \ |
571 | __unlikely_text_end = .; |
572 | |
573 | #define TEXT_HOT \ |
574 | __hot_text_start = .; \ |
575 | *(.text.hot .text.hot.*) \ |
576 | __hot_text_end = .; |
577 | |
578 | /* |
579 | * .text section. Map to function alignment to avoid address changes |
580 | * during second ld run in second ld pass when generating System.map |
581 | * |
582 | * TEXT_MAIN here will match symbols with a fixed pattern (for example, |
583 | * .text.hot or .text.unlikely) if dead code elimination or |
584 | * function-section is enabled. Match these symbols first before |
585 | * TEXT_MAIN to ensure they are grouped together. |
586 | * |
587 | * Also placing .text.hot section at the beginning of a page, this |
588 | * would help the TLB performance. |
589 | */ |
590 | #define TEXT_TEXT \ |
591 | ALIGN_FUNCTION(); \ |
592 | *(.text.asan.* .text.tsan.*) \ |
593 | *(.text.unknown .text.unknown.*) \ |
594 | TEXT_SPLIT \ |
595 | TEXT_UNLIKELY \ |
596 | . = ALIGN(PAGE_SIZE); \ |
597 | TEXT_HOT \ |
598 | *(TEXT_MAIN .text.fixup) \ |
599 | NOINSTR_TEXT \ |
600 | *(.ref.text) |
601 | |
602 | /* sched.text is aling to function alignment to secure we have same |
603 | * address even at second ld pass when generating System.map */ |
604 | #define SCHED_TEXT \ |
605 | ALIGN_FUNCTION(); \ |
606 | __sched_text_start = .; \ |
607 | *(.sched.text) \ |
608 | __sched_text_end = .; |
609 | |
610 | /* spinlock.text is aling to function alignment to secure we have same |
611 | * address even at second ld pass when generating System.map */ |
612 | #define LOCK_TEXT \ |
613 | ALIGN_FUNCTION(); \ |
614 | __lock_text_start = .; \ |
615 | *(.spinlock.text) \ |
616 | __lock_text_end = .; |
617 | |
618 | #define KPROBES_TEXT \ |
619 | ALIGN_FUNCTION(); \ |
620 | __kprobes_text_start = .; \ |
621 | *(.kprobes.text) \ |
622 | __kprobes_text_end = .; |
623 | |
624 | #define ENTRY_TEXT \ |
625 | ALIGN_FUNCTION(); \ |
626 | __entry_text_start = .; \ |
627 | *(.entry.text) \ |
628 | __entry_text_end = .; |
629 | |
630 | #define IRQENTRY_TEXT \ |
631 | ALIGN_FUNCTION(); \ |
632 | __irqentry_text_start = .; \ |
633 | *(.irqentry.text) \ |
634 | __irqentry_text_end = .; |
635 | |
636 | #define SOFTIRQENTRY_TEXT \ |
637 | ALIGN_FUNCTION(); \ |
638 | __softirqentry_text_start = .; \ |
639 | *(.softirqentry.text) \ |
640 | __softirqentry_text_end = .; |
641 | |
642 | #define STATIC_CALL_TEXT \ |
643 | ALIGN_FUNCTION(); \ |
644 | __static_call_text_start = .; \ |
645 | *(.static_call.text) \ |
646 | __static_call_text_end = .; |
647 | |
648 | /* Section used for early init (in .S files) */ |
649 | #define HEAD_TEXT KEEP(*(.head.text)) |
650 | |
651 | #define HEAD_TEXT_SECTION \ |
652 | .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ |
653 | HEAD_TEXT \ |
654 | } |
655 | |
656 | /* |
657 | * Exception table |
658 | */ |
659 | #define EXCEPTION_TABLE(align) \ |
660 | . = ALIGN(align); \ |
661 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ |
662 | BOUNDED_SECTION_BY(__ex_table, ___ex_table) \ |
663 | } |
664 | |
665 | /* |
666 | * .BTF |
667 | */ |
668 | #ifdef CONFIG_DEBUG_INFO_BTF |
669 | #define BTF \ |
670 | . = ALIGN(PAGE_SIZE); \ |
671 | .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ |
672 | BOUNDED_SECTION_BY(.BTF, _BTF) \ |
673 | } \ |
674 | . = ALIGN(PAGE_SIZE); \ |
675 | .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ |
676 | *(.BTF_ids) \ |
677 | } |
678 | #else |
679 | #define BTF |
680 | #endif |
681 | |
682 | /* |
683 | * Init task |
684 | */ |
685 | #define INIT_TASK_DATA_SECTION(align) \ |
686 | . = ALIGN(align); \ |
687 | .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ |
688 | INIT_TASK_DATA(align) \ |
689 | } |
690 | |
691 | #ifdef CONFIG_CONSTRUCTORS |
692 | #define KERNEL_CTORS() . = ALIGN(8); \ |
693 | __ctors_start = .; \ |
694 | KEEP(*(SORT(.ctors.*))) \ |
695 | KEEP(*(.ctors)) \ |
696 | KEEP(*(SORT(.init_array.*))) \ |
697 | KEEP(*(.init_array)) \ |
698 | __ctors_end = .; |
699 | #else |
700 | #define KERNEL_CTORS() |
701 | #endif |
702 | |
703 | /* init and exit section handling */ |
704 | #define INIT_DATA \ |
705 | KEEP(*(SORT(___kentry+*))) \ |
706 | *(.init.data .init.data.*) \ |
707 | KERNEL_CTORS() \ |
708 | MCOUNT_REC() \ |
709 | *(.init.rodata .init.rodata.*) \ |
710 | FTRACE_EVENTS() \ |
711 | TRACE_SYSCALLS() \ |
712 | KPROBE_BLACKLIST() \ |
713 | ERROR_INJECT_WHITELIST() \ |
714 | CLK_OF_TABLES() \ |
715 | RESERVEDMEM_OF_TABLES() \ |
716 | TIMER_OF_TABLES() \ |
717 | CPU_METHOD_OF_TABLES() \ |
718 | CPUIDLE_METHOD_OF_TABLES() \ |
719 | KERNEL_DTB() \ |
720 | IRQCHIP_OF_MATCH_TABLE() \ |
721 | ACPI_PROBE_TABLE(irqchip) \ |
722 | ACPI_PROBE_TABLE(timer) \ |
723 | THERMAL_TABLE(governor) \ |
724 | EARLYCON_TABLE() \ |
725 | LSM_TABLE() \ |
726 | EARLY_LSM_TABLE() \ |
727 | KUNIT_INIT_TABLE() |
728 | |
729 | #define INIT_TEXT \ |
730 | *(.init.text .init.text.*) \ |
731 | *(.text.startup) |
732 | |
733 | #define EXIT_DATA \ |
734 | *(.exit.data .exit.data.*) \ |
735 | *(.fini_array .fini_array.*) \ |
736 | *(.dtors .dtors.*) \ |
737 | |
738 | #define EXIT_TEXT \ |
739 | *(.exit.text) \ |
740 | *(.text.exit) \ |
741 | |
742 | #define EXIT_CALL \ |
743 | *(.exitcall.exit) |
744 | |
745 | /* |
746 | * bss (Block Started by Symbol) - uninitialized data |
747 | * zeroed during startup |
748 | */ |
749 | #define SBSS(sbss_align) \ |
750 | . = ALIGN(sbss_align); \ |
751 | .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ |
752 | *(.dynsbss) \ |
753 | *(SBSS_MAIN) \ |
754 | *(.scommon) \ |
755 | } |
756 | |
757 | /* |
758 | * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra |
759 | * sections to the front of bss. |
760 | */ |
761 | #ifndef BSS_FIRST_SECTIONS |
762 | #define BSS_FIRST_SECTIONS |
763 | #endif |
764 | |
765 | #define BSS(bss_align) \ |
766 | . = ALIGN(bss_align); \ |
767 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ |
768 | BSS_FIRST_SECTIONS \ |
769 | . = ALIGN(PAGE_SIZE); \ |
770 | *(.bss..page_aligned) \ |
771 | . = ALIGN(PAGE_SIZE); \ |
772 | *(.dynbss) \ |
773 | *(BSS_MAIN) \ |
774 | *(COMMON) \ |
775 | } |
776 | |
777 | /* |
778 | * DWARF debug sections. |
779 | * Symbols in the DWARF debugging sections are relative to |
780 | * the beginning of the section so we begin them at 0. |
781 | */ |
782 | #define DWARF_DEBUG \ |
783 | /* DWARF 1 */ \ |
784 | .debug 0 : { *(.debug) } \ |
785 | .line 0 : { *(.line) } \ |
786 | /* GNU DWARF 1 extensions */ \ |
787 | .debug_srcinfo 0 : { *(.debug_srcinfo) } \ |
788 | .debug_sfnames 0 : { *(.debug_sfnames) } \ |
789 | /* DWARF 1.1 and DWARF 2 */ \ |
790 | .debug_aranges 0 : { *(.debug_aranges) } \ |
791 | .debug_pubnames 0 : { *(.debug_pubnames) } \ |
792 | /* DWARF 2 */ \ |
793 | .debug_info 0 : { *(.debug_info \ |
794 | .gnu.linkonce.wi.*) } \ |
795 | .debug_abbrev 0 : { *(.debug_abbrev) } \ |
796 | .debug_line 0 : { *(.debug_line) } \ |
797 | .debug_frame 0 : { *(.debug_frame) } \ |
798 | .debug_str 0 : { *(.debug_str) } \ |
799 | .debug_loc 0 : { *(.debug_loc) } \ |
800 | .debug_macinfo 0 : { *(.debug_macinfo) } \ |
801 | .debug_pubtypes 0 : { *(.debug_pubtypes) } \ |
802 | /* DWARF 3 */ \ |
803 | .debug_ranges 0 : { *(.debug_ranges) } \ |
804 | /* SGI/MIPS DWARF 2 extensions */ \ |
805 | .debug_weaknames 0 : { *(.debug_weaknames) } \ |
806 | .debug_funcnames 0 : { *(.debug_funcnames) } \ |
807 | .debug_typenames 0 : { *(.debug_typenames) } \ |
808 | .debug_varnames 0 : { *(.debug_varnames) } \ |
809 | /* GNU DWARF 2 extensions */ \ |
810 | .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ |
811 | .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ |
812 | /* DWARF 4 */ \ |
813 | .debug_types 0 : { *(.debug_types) } \ |
814 | /* DWARF 5 */ \ |
815 | .debug_addr 0 : { *(.debug_addr) } \ |
816 | .debug_line_str 0 : { *(.debug_line_str) } \ |
817 | .debug_loclists 0 : { *(.debug_loclists) } \ |
818 | .debug_macro 0 : { *(.debug_macro) } \ |
819 | .debug_names 0 : { *(.debug_names) } \ |
820 | .debug_rnglists 0 : { *(.debug_rnglists) } \ |
821 | .debug_str_offsets 0 : { *(.debug_str_offsets) } |
822 | |
823 | /* Stabs debugging sections. */ |
824 | #define STABS_DEBUG \ |
825 | .stab 0 : { *(.stab) } \ |
826 | .stabstr 0 : { *(.stabstr) } \ |
827 | .stab.excl 0 : { *(.stab.excl) } \ |
828 | .stab.exclstr 0 : { *(.stab.exclstr) } \ |
829 | .stab.index 0 : { *(.stab.index) } \ |
830 | .stab.indexstr 0 : { *(.stab.indexstr) } |
831 | |
832 | /* Required sections not related to debugging. */ |
833 | #define ELF_DETAILS \ |
834 | .comment 0 : { *(.comment) } \ |
835 | .symtab 0 : { *(.symtab) } \ |
836 | .strtab 0 : { *(.strtab) } \ |
837 | .shstrtab 0 : { *(.shstrtab) } |
838 | |
839 | #ifdef CONFIG_GENERIC_BUG |
840 | #define BUG_TABLE \ |
841 | . = ALIGN(8); \ |
842 | __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ |
843 | BOUNDED_SECTION_BY(__bug_table, ___bug_table) \ |
844 | } |
845 | #else |
846 | #define BUG_TABLE |
847 | #endif |
848 | |
849 | #ifdef CONFIG_UNWINDER_ORC |
850 | #define ORC_UNWIND_TABLE \ |
851 | .orc_header : AT(ADDR(.orc_header) - LOAD_OFFSET) { \ |
852 | BOUNDED_SECTION_BY(.orc_header, _orc_header) \ |
853 | } \ |
854 | . = ALIGN(4); \ |
855 | .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ |
856 | BOUNDED_SECTION_BY(.orc_unwind_ip, _orc_unwind_ip) \ |
857 | } \ |
858 | . = ALIGN(2); \ |
859 | .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ |
860 | BOUNDED_SECTION_BY(.orc_unwind, _orc_unwind) \ |
861 | } \ |
862 | text_size = _etext - _stext; \ |
863 | . = ALIGN(4); \ |
864 | .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ |
865 | orc_lookup = .; \ |
866 | . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \ |
867 | LOOKUP_BLOCK_SIZE) + 1) * 4; \ |
868 | orc_lookup_end = .; \ |
869 | } |
870 | #else |
871 | #define ORC_UNWIND_TABLE |
872 | #endif |
873 | |
874 | /* Built-in firmware blobs */ |
875 | #ifdef CONFIG_FW_LOADER |
876 | #define FW_LOADER_BUILT_IN_DATA \ |
877 | .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ |
878 | BOUNDED_SECTION_PRE_LABEL(.builtin_fw, _builtin_fw, __start, __end) \ |
879 | } |
880 | #else |
881 | #define FW_LOADER_BUILT_IN_DATA |
882 | #endif |
883 | |
884 | #ifdef CONFIG_PM_TRACE |
885 | #define TRACEDATA \ |
886 | . = ALIGN(4); \ |
887 | .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ |
888 | BOUNDED_SECTION_POST_LABEL(.tracedata, __tracedata, _start, _end) \ |
889 | } |
890 | #else |
891 | #define TRACEDATA |
892 | #endif |
893 | |
894 | #ifdef CONFIG_PRINTK_INDEX |
895 | #define PRINTK_INDEX \ |
896 | .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \ |
897 | BOUNDED_SECTION_BY(.printk_index, _printk_index) \ |
898 | } |
899 | #else |
900 | #define PRINTK_INDEX |
901 | #endif |
902 | |
903 | /* |
904 | * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler. |
905 | * Otherwise, the type of .notes section would become PROGBITS instead of NOTES. |
906 | * |
907 | * Also, discard .note.gnu.property, otherwise it forces the notes section to |
908 | * be 8-byte aligned which causes alignment mismatches with the kernel's custom |
909 | * 4-byte aligned notes. |
910 | */ |
911 | #define NOTES \ |
912 | /DISCARD/ : { \ |
913 | *(.note.GNU-stack) \ |
914 | *(.note.gnu.property) \ |
915 | } \ |
916 | .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ |
917 | BOUNDED_SECTION_BY(.note.*, _notes) \ |
918 | } NOTES_HEADERS \ |
919 | NOTES_HEADERS_RESTORE |
920 | |
921 | #define INIT_SETUP(initsetup_align) \ |
922 | . = ALIGN(initsetup_align); \ |
923 | BOUNDED_SECTION_POST_LABEL(.init.setup, __setup, _start, _end) |
924 | |
925 | #define INIT_CALLS_LEVEL(level) \ |
926 | __initcall##level##_start = .; \ |
927 | KEEP(*(.initcall##level##.init)) \ |
928 | KEEP(*(.initcall##level##s.init)) \ |
929 | |
930 | #define INIT_CALLS \ |
931 | __initcall_start = .; \ |
932 | KEEP(*(.initcallearly.init)) \ |
933 | INIT_CALLS_LEVEL(0) \ |
934 | INIT_CALLS_LEVEL(1) \ |
935 | INIT_CALLS_LEVEL(2) \ |
936 | INIT_CALLS_LEVEL(3) \ |
937 | INIT_CALLS_LEVEL(4) \ |
938 | INIT_CALLS_LEVEL(5) \ |
939 | INIT_CALLS_LEVEL(rootfs) \ |
940 | INIT_CALLS_LEVEL(6) \ |
941 | INIT_CALLS_LEVEL(7) \ |
942 | __initcall_end = .; |
943 | |
944 | #define CON_INITCALL \ |
945 | BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end) |
946 | |
947 | #define NAMED_SECTION(name) \ |
948 | . = ALIGN(8); \ |
949 | name : AT(ADDR(name) - LOAD_OFFSET) \ |
950 | { BOUNDED_SECTION_PRE_LABEL(name, name, __start_, __stop_) } |
951 | |
952 | #define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x) |
953 | |
954 | #define RUNTIME_CONST_VARIABLES \ |
955 | RUNTIME_CONST(shift, d_hash_shift) \ |
956 | RUNTIME_CONST(ptr, dentry_hashtable) |
957 | |
958 | /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ |
959 | #define KUNIT_TABLE() \ |
960 | . = ALIGN(8); \ |
961 | BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end) |
962 | |
963 | /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ |
964 | #define KUNIT_INIT_TABLE() \ |
965 | . = ALIGN(8); \ |
966 | BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \ |
967 | __kunit_init_suites, _start, _end) |
968 | |
969 | #ifdef CONFIG_BLK_DEV_INITRD |
970 | #define INIT_RAM_FS \ |
971 | . = ALIGN(4); \ |
972 | __initramfs_start = .; \ |
973 | KEEP(*(.init.ramfs)) \ |
974 | . = ALIGN(8); \ |
975 | KEEP(*(.init.ramfs.info)) |
976 | #else |
977 | #define INIT_RAM_FS |
978 | #endif |
979 | |
980 | /* |
981 | * Memory encryption operates on a page basis. Since we need to clear |
982 | * the memory encryption mask for this section, it needs to be aligned |
983 | * on a page boundary and be a page-size multiple in length. |
984 | * |
985 | * Note: We use a separate section so that only this section gets |
986 | * decrypted to avoid exposing more than we wish. |
987 | */ |
988 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
989 | #define PERCPU_DECRYPTED_SECTION \ |
990 | . = ALIGN(PAGE_SIZE); \ |
991 | *(.data..percpu..decrypted) \ |
992 | . = ALIGN(PAGE_SIZE); |
993 | #else |
994 | #define PERCPU_DECRYPTED_SECTION |
995 | #endif |
996 | |
997 | |
998 | /* |
999 | * Default discarded sections. |
1000 | * |
1001 | * Some archs want to discard exit text/data at runtime rather than |
1002 | * link time due to cross-section references such as alt instructions, |
1003 | * bug table, eh_frame, etc. DISCARDS must be the last of output |
1004 | * section definitions so that such archs put those in earlier section |
1005 | * definitions. |
1006 | */ |
1007 | #ifdef RUNTIME_DISCARD_EXIT |
1008 | #define EXIT_DISCARDS |
1009 | #else |
1010 | #define EXIT_DISCARDS \ |
1011 | EXIT_TEXT \ |
1012 | EXIT_DATA |
1013 | #endif |
1014 | |
1015 | /* |
1016 | * Clang's -fprofile-arcs, -fsanitize=kernel-address, and |
1017 | * -fsanitize=thread produce unwanted sections (.eh_frame |
1018 | * and .init_array.*), but CONFIG_CONSTRUCTORS wants to |
1019 | * keep any .init_array.* sections. |
1020 | * https://llvm.org/pr46478 |
1021 | */ |
1022 | #ifdef CONFIG_UNWIND_TABLES |
1023 | #define DISCARD_EH_FRAME |
1024 | #else |
1025 | #define DISCARD_EH_FRAME *(.eh_frame) |
1026 | #endif |
1027 | #if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) |
1028 | # ifdef CONFIG_CONSTRUCTORS |
1029 | # define SANITIZER_DISCARDS \ |
1030 | DISCARD_EH_FRAME |
1031 | # else |
1032 | # define SANITIZER_DISCARDS \ |
1033 | *(.init_array) *(.init_array.*) \ |
1034 | DISCARD_EH_FRAME |
1035 | # endif |
1036 | #else |
1037 | # define SANITIZER_DISCARDS |
1038 | #endif |
1039 | |
1040 | #define COMMON_DISCARDS \ |
1041 | SANITIZER_DISCARDS \ |
1042 | PATCHABLE_DISCARDS \ |
1043 | *(.discard) \ |
1044 | *(.discard.*) \ |
1045 | *(.export_symbol) \ |
1046 | *(.no_trim_symbol) \ |
1047 | *(.modinfo) \ |
1048 | /* ld.bfd warns about .gnu.version* even when not emitted */ \ |
1049 | *(.gnu.version*) \ |
1050 | |
1051 | #define DISCARDS \ |
1052 | /DISCARD/ : { \ |
1053 | EXIT_DISCARDS \ |
1054 | EXIT_CALL \ |
1055 | COMMON_DISCARDS \ |
1056 | } |
1057 | |
1058 | /** |
1059 | * PERCPU_INPUT - the percpu input sections |
1060 | * @cacheline: cacheline size |
1061 | * |
1062 | * The core percpu section names and core symbols which do not rely |
1063 | * directly upon load addresses. |
1064 | * |
1065 | * @cacheline is used to align subsections to avoid false cacheline |
1066 | * sharing between subsections for different purposes. |
1067 | */ |
1068 | #define PERCPU_INPUT(cacheline) \ |
1069 | __per_cpu_start = .; \ |
1070 | . = ALIGN(PAGE_SIZE); \ |
1071 | *(.data..percpu..page_aligned) \ |
1072 | . = ALIGN(cacheline); \ |
1073 | __per_cpu_hot_start = .; \ |
1074 | *(SORT_BY_ALIGNMENT(.data..percpu..hot.*)) \ |
1075 | __per_cpu_hot_end = .; \ |
1076 | . = ALIGN(cacheline); \ |
1077 | *(.data..percpu..read_mostly) \ |
1078 | . = ALIGN(cacheline); \ |
1079 | *(.data..percpu) \ |
1080 | *(.data..percpu..shared_aligned) \ |
1081 | PERCPU_DECRYPTED_SECTION \ |
1082 | __per_cpu_end = .; |
1083 | |
1084 | /** |
1085 | * PERCPU_SECTION - define output section for percpu area |
1086 | * @cacheline: cacheline size |
1087 | * |
1088 | * Macro which expands to output section for percpu area. |
1089 | * |
1090 | * @cacheline is used to align subsections to avoid false cacheline |
1091 | * sharing between subsections for different purposes. |
1092 | */ |
1093 | #define PERCPU_SECTION(cacheline) \ |
1094 | . = ALIGN(PAGE_SIZE); \ |
1095 | .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ |
1096 | PERCPU_INPUT(cacheline) \ |
1097 | } |
1098 | |
1099 | |
1100 | /* |
1101 | * Definition of the high level *_SECTION macros |
1102 | * They will fit only a subset of the architectures |
1103 | */ |
1104 | |
1105 | |
1106 | /* |
1107 | * Writeable data. |
1108 | * All sections are combined in a single .data section. |
1109 | * The sections following CONSTRUCTORS are arranged so their |
1110 | * typical alignment matches. |
1111 | * A cacheline is typical/always less than a PAGE_SIZE so |
1112 | * the sections that has this restriction (or similar) |
1113 | * is located before the ones requiring PAGE_SIZE alignment. |
1114 | * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which |
1115 | * matches the requirement of PAGE_ALIGNED_DATA. |
1116 | * |
1117 | * use 0 as page_align if page_aligned data is not used */ |
1118 | #define RW_DATA(cacheline, pagealigned, inittask) \ |
1119 | . = ALIGN(PAGE_SIZE); \ |
1120 | .data : AT(ADDR(.data) - LOAD_OFFSET) { \ |
1121 | INIT_TASK_DATA(inittask) \ |
1122 | NOSAVE_DATA \ |
1123 | PAGE_ALIGNED_DATA(pagealigned) \ |
1124 | CACHE_HOT_DATA(cacheline) \ |
1125 | CACHELINE_ALIGNED_DATA(cacheline) \ |
1126 | READ_MOSTLY_DATA(cacheline) \ |
1127 | DATA_DATA \ |
1128 | CONSTRUCTORS \ |
1129 | } \ |
1130 | BUG_TABLE \ |
1131 | |
1132 | #define INIT_TEXT_SECTION(inittext_align) \ |
1133 | . = ALIGN(inittext_align); \ |
1134 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ |
1135 | _sinittext = .; \ |
1136 | INIT_TEXT \ |
1137 | _einittext = .; \ |
1138 | } |
1139 | |
1140 | #define INIT_DATA_SECTION(initsetup_align) \ |
1141 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ |
1142 | INIT_DATA \ |
1143 | INIT_SETUP(initsetup_align) \ |
1144 | INIT_CALLS \ |
1145 | CON_INITCALL \ |
1146 | INIT_RAM_FS \ |
1147 | } |
1148 | |
1149 | #define BSS_SECTION(sbss_align, bss_align, stop_align) \ |
1150 | . = ALIGN(sbss_align); \ |
1151 | __bss_start = .; \ |
1152 | SBSS(sbss_align) \ |
1153 | BSS(bss_align) \ |
1154 | . = ALIGN(stop_align); \ |
1155 | __bss_stop = .; |
1156 | |