1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Support for Kernel relocation at boot time |
4 | * |
5 | * Copyright (C) 2023 Loongson Technology Corporation Limited |
6 | */ |
7 | |
8 | #include <linux/elf.h> |
9 | #include <linux/kernel.h> |
10 | #include <linux/printk.h> |
11 | #include <linux/panic_notifier.h> |
12 | #include <linux/start_kernel.h> |
13 | #include <asm/bootinfo.h> |
14 | #include <asm/early_ioremap.h> |
15 | #include <asm/inst.h> |
16 | #include <asm/sections.h> |
17 | #include <asm/setup.h> |
18 | |
19 | #define RELOCATED(x) ((void *)((long)x + reloc_offset)) |
20 | #define RELOCATED_KASLR(x) ((void *)((long)x + random_offset)) |
21 | |
22 | static unsigned long reloc_offset; |
23 | |
24 | static inline void __init relocate_relative(void) |
25 | { |
26 | Elf64_Rela *rela, *rela_end; |
27 | rela = (Elf64_Rela *)&__rela_dyn_begin; |
28 | rela_end = (Elf64_Rela *)&__rela_dyn_end; |
29 | |
30 | for ( ; rela < rela_end; rela++) { |
31 | Elf64_Addr addr = rela->r_offset; |
32 | Elf64_Addr relocated_addr = rela->r_addend; |
33 | |
34 | if (rela->r_info != R_LARCH_RELATIVE) |
35 | continue; |
36 | |
37 | if (relocated_addr >= VMLINUX_LOAD_ADDRESS) |
38 | relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr); |
39 | |
40 | *(Elf64_Addr *)RELOCATED(addr) = relocated_addr; |
41 | } |
42 | } |
43 | |
44 | static inline void __init relocate_absolute(long random_offset) |
45 | { |
46 | void *begin, *end; |
47 | struct rela_la_abs *p; |
48 | |
49 | begin = RELOCATED_KASLR(&__la_abs_begin); |
50 | end = RELOCATED_KASLR(&__la_abs_end); |
51 | |
52 | for (p = begin; (void *)p < end; p++) { |
53 | long v = p->symvalue; |
54 | uint32_t lu12iw, ori, lu32id, lu52id; |
55 | union loongarch_instruction *insn = (void *)p->pc; |
56 | |
57 | lu12iw = (v >> 12) & 0xfffff; |
58 | ori = v & 0xfff; |
59 | lu32id = (v >> 32) & 0xfffff; |
60 | lu52id = v >> 52; |
61 | |
62 | insn[0].reg1i20_format.immediate = lu12iw; |
63 | insn[1].reg2i12_format.immediate = ori; |
64 | insn[2].reg1i20_format.immediate = lu32id; |
65 | insn[3].reg2i12_format.immediate = lu52id; |
66 | } |
67 | } |
68 | |
69 | #ifdef CONFIG_RANDOMIZE_BASE |
70 | static inline __init unsigned long rotate_xor(unsigned long hash, |
71 | const void *area, size_t size) |
72 | { |
73 | size_t i, diff; |
74 | const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash)); |
75 | |
76 | diff = (void *)ptr - area; |
77 | if (size < diff + sizeof(hash)) |
78 | return hash; |
79 | |
80 | size = ALIGN_DOWN(size - diff, sizeof(hash)); |
81 | |
82 | for (i = 0; i < size / sizeof(hash); i++) { |
83 | /* Rotate by odd number of bits and XOR. */ |
84 | hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); |
85 | hash ^= ptr[i]; |
86 | } |
87 | |
88 | return hash; |
89 | } |
90 | |
91 | static inline __init unsigned long get_random_boot(void) |
92 | { |
93 | unsigned long hash = 0; |
94 | unsigned long entropy = random_get_entropy(); |
95 | |
96 | /* Attempt to create a simple but unpredictable starting entropy. */ |
97 | hash = rotate_xor(hash, area: linux_banner, strlen(linux_banner)); |
98 | |
99 | /* Add in any runtime entropy we can get */ |
100 | hash = rotate_xor(hash, area: &entropy, size: sizeof(entropy)); |
101 | |
102 | return hash; |
103 | } |
104 | |
105 | static int __init nokaslr(char *p) |
106 | { |
107 | pr_info("KASLR is disabled.\n" ); |
108 | |
109 | return 0; /* Print a notice and silence the boot warning */ |
110 | } |
111 | early_param("nokaslr" , nokaslr); |
112 | |
113 | static inline __init bool kaslr_disabled(void) |
114 | { |
115 | char *str; |
116 | const char *builtin_cmdline = CONFIG_CMDLINE; |
117 | |
118 | str = strstr(builtin_cmdline, "nokaslr" ); |
119 | if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) |
120 | return true; |
121 | |
122 | str = strstr(boot_command_line, "nokaslr" ); |
123 | if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) |
124 | return true; |
125 | |
126 | return false; |
127 | } |
128 | |
129 | /* Choose a new address for the kernel */ |
130 | static inline void __init *determine_relocation_address(void) |
131 | { |
132 | unsigned long kernel_length; |
133 | unsigned long random_offset; |
134 | void *destination = _text; |
135 | |
136 | if (kaslr_disabled()) |
137 | return destination; |
138 | |
139 | kernel_length = (long)_end - (long)_text; |
140 | |
141 | random_offset = get_random_boot() << 16; |
142 | random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); |
143 | if (random_offset < kernel_length) |
144 | random_offset += ALIGN(kernel_length, 0xffff); |
145 | |
146 | return RELOCATED_KASLR(destination); |
147 | } |
148 | |
149 | static inline int __init relocation_addr_valid(void *location_new) |
150 | { |
151 | if ((unsigned long)location_new & 0x00000ffff) |
152 | return 0; /* Inappropriately aligned new location */ |
153 | |
154 | if ((unsigned long)location_new < (unsigned long)_end) |
155 | return 0; /* New location overlaps original kernel */ |
156 | |
157 | return 1; |
158 | } |
159 | #endif |
160 | |
161 | static inline void __init update_reloc_offset(unsigned long *addr, long random_offset) |
162 | { |
163 | unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr); |
164 | |
165 | *new_addr = (unsigned long)reloc_offset; |
166 | } |
167 | |
168 | unsigned long __init relocate_kernel(void) |
169 | { |
170 | unsigned long kernel_length; |
171 | unsigned long random_offset = 0; |
172 | void *location_new = _text; /* Default to original kernel start */ |
173 | char *cmdline = early_ioremap(phys_addr: fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ |
174 | |
175 | strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); |
176 | |
177 | #ifdef CONFIG_RANDOMIZE_BASE |
178 | location_new = determine_relocation_address(); |
179 | |
180 | /* Sanity check relocation address */ |
181 | if (relocation_addr_valid(location_new)) |
182 | random_offset = (unsigned long)location_new - (unsigned long)(_text); |
183 | #endif |
184 | reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS; |
185 | |
186 | if (random_offset) { |
187 | kernel_length = (long)(_end) - (long)(_text); |
188 | |
189 | /* Copy the kernel to it's new location */ |
190 | memcpy(location_new, _text, kernel_length); |
191 | |
192 | /* Sync the caches ready for execution of new kernel */ |
193 | __asm__ __volatile__ ( |
194 | "ibar 0 \t\n" |
195 | "dbar 0 \t\n" |
196 | ::: "memory" ); |
197 | |
198 | reloc_offset += random_offset; |
199 | |
200 | /* The current thread is now within the relocated kernel */ |
201 | __current_thread_info = RELOCATED_KASLR(__current_thread_info); |
202 | |
203 | update_reloc_offset(addr: &reloc_offset, random_offset); |
204 | } |
205 | |
206 | if (reloc_offset) |
207 | relocate_relative(); |
208 | |
209 | relocate_absolute(random_offset); |
210 | |
211 | return random_offset; |
212 | } |
213 | |
214 | /* |
215 | * Show relocation information on panic. |
216 | */ |
217 | static void show_kernel_relocation(const char *level) |
218 | { |
219 | if (reloc_offset > 0) { |
220 | printk(level); |
221 | pr_cont("Kernel relocated by 0x%lx\n" , reloc_offset); |
222 | pr_cont(" .text @ 0x%px\n" , _text); |
223 | pr_cont(" .data @ 0x%px\n" , _sdata); |
224 | pr_cont(" .bss @ 0x%px\n" , __bss_start); |
225 | } |
226 | } |
227 | |
228 | static int kernel_location_notifier_fn(struct notifier_block *self, |
229 | unsigned long v, void *p) |
230 | { |
231 | show_kernel_relocation(KERN_EMERG); |
232 | return NOTIFY_DONE; |
233 | } |
234 | |
235 | static struct notifier_block kernel_location_notifier = { |
236 | .notifier_call = kernel_location_notifier_fn |
237 | }; |
238 | |
239 | static int __init register_kernel_offset_dumper(void) |
240 | { |
241 | atomic_notifier_chain_register(nh: &panic_notifier_list, |
242 | nb: &kernel_location_notifier); |
243 | return 0; |
244 | } |
245 | |
246 | arch_initcall(register_kernel_offset_dumper); |
247 | |