1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * kexec.c - kexec_load system call |
4 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | |
9 | #include <linux/capability.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/file.h> |
12 | #include <linux/security.h> |
13 | #include <linux/kexec.h> |
14 | #include <linux/mutex.h> |
15 | #include <linux/list.h> |
16 | #include <linux/syscalls.h> |
17 | #include <linux/vmalloc.h> |
18 | #include <linux/slab.h> |
19 | |
20 | #include "kexec_internal.h" |
21 | |
22 | static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, |
23 | unsigned long nr_segments, |
24 | struct kexec_segment *segments, |
25 | unsigned long flags) |
26 | { |
27 | int ret; |
28 | struct kimage *image; |
29 | bool kexec_on_panic = flags & KEXEC_ON_CRASH; |
30 | |
31 | if (kexec_on_panic) { |
32 | /* Verify we have a valid entry point */ |
33 | if ((entry < phys_to_boot_phys(phys: crashk_res.start)) || |
34 | (entry > phys_to_boot_phys(phys: crashk_res.end))) |
35 | return -EADDRNOTAVAIL; |
36 | } |
37 | |
38 | /* Allocate and initialize a controlling structure */ |
39 | image = do_kimage_alloc_init(); |
40 | if (!image) |
41 | return -ENOMEM; |
42 | |
43 | image->start = entry; |
44 | image->nr_segments = nr_segments; |
45 | memcpy(image->segment, segments, nr_segments * sizeof(*segments)); |
46 | |
47 | if (kexec_on_panic) { |
48 | /* Enable special crash kernel control page alloc policy. */ |
49 | image->control_page = crashk_res.start; |
50 | image->type = KEXEC_TYPE_CRASH; |
51 | } |
52 | |
53 | ret = sanity_check_segment_list(image); |
54 | if (ret) |
55 | goto out_free_image; |
56 | |
57 | /* |
58 | * Find a location for the control code buffer, and add it |
59 | * the vector of segments so that it's pages will also be |
60 | * counted as destination pages. |
61 | */ |
62 | ret = -ENOMEM; |
63 | image->control_code_page = kimage_alloc_control_pages(image, |
64 | order: get_order(KEXEC_CONTROL_PAGE_SIZE)); |
65 | if (!image->control_code_page) { |
66 | pr_err("Could not allocate control_code_buffer\n" ); |
67 | goto out_free_image; |
68 | } |
69 | |
70 | if (!kexec_on_panic) { |
71 | image->swap_page = kimage_alloc_control_pages(image, order: 0); |
72 | if (!image->swap_page) { |
73 | pr_err("Could not allocate swap buffer\n" ); |
74 | goto out_free_control_pages; |
75 | } |
76 | } |
77 | |
78 | *rimage = image; |
79 | return 0; |
80 | out_free_control_pages: |
81 | kimage_free_page_list(list: &image->control_pages); |
82 | out_free_image: |
83 | kfree(objp: image); |
84 | return ret; |
85 | } |
86 | |
87 | static int do_kexec_load(unsigned long entry, unsigned long nr_segments, |
88 | struct kexec_segment *segments, unsigned long flags) |
89 | { |
90 | struct kimage **dest_image, *image; |
91 | unsigned long i; |
92 | int ret; |
93 | |
94 | /* |
95 | * Because we write directly to the reserved memory region when loading |
96 | * crash kernels we need a serialization here to prevent multiple crash |
97 | * kernels from attempting to load simultaneously. |
98 | */ |
99 | if (!kexec_trylock()) |
100 | return -EBUSY; |
101 | |
102 | if (flags & KEXEC_ON_CRASH) { |
103 | dest_image = &kexec_crash_image; |
104 | if (kexec_crash_image) |
105 | arch_kexec_unprotect_crashkres(); |
106 | } else { |
107 | dest_image = &kexec_image; |
108 | } |
109 | |
110 | if (nr_segments == 0) { |
111 | /* Uninstall image */ |
112 | kimage_free(xchg(dest_image, NULL)); |
113 | ret = 0; |
114 | goto out_unlock; |
115 | } |
116 | if (flags & KEXEC_ON_CRASH) { |
117 | /* |
118 | * Loading another kernel to switch to if this one |
119 | * crashes. Free any current crash dump kernel before |
120 | * we corrupt it. |
121 | */ |
122 | kimage_free(xchg(&kexec_crash_image, NULL)); |
123 | } |
124 | |
125 | ret = kimage_alloc_init(rimage: &image, entry, nr_segments, segments, flags); |
126 | if (ret) |
127 | goto out_unlock; |
128 | |
129 | if (flags & KEXEC_PRESERVE_CONTEXT) |
130 | image->preserve_context = 1; |
131 | |
132 | #ifdef CONFIG_CRASH_HOTPLUG |
133 | if (flags & KEXEC_UPDATE_ELFCOREHDR) |
134 | image->update_elfcorehdr = 1; |
135 | #endif |
136 | |
137 | ret = machine_kexec_prepare(image); |
138 | if (ret) |
139 | goto out; |
140 | |
141 | /* |
142 | * Some architecture(like S390) may touch the crash memory before |
143 | * machine_kexec_prepare(), we must copy vmcoreinfo data after it. |
144 | */ |
145 | ret = kimage_crash_copy_vmcoreinfo(image); |
146 | if (ret) |
147 | goto out; |
148 | |
149 | for (i = 0; i < nr_segments; i++) { |
150 | ret = kimage_load_segment(image, segment: &image->segment[i]); |
151 | if (ret) |
152 | goto out; |
153 | } |
154 | |
155 | kimage_terminate(image); |
156 | |
157 | ret = machine_kexec_post_load(image); |
158 | if (ret) |
159 | goto out; |
160 | |
161 | /* Install the new kernel and uninstall the old */ |
162 | image = xchg(dest_image, image); |
163 | |
164 | out: |
165 | if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) |
166 | arch_kexec_protect_crashkres(); |
167 | |
168 | kimage_free(image); |
169 | out_unlock: |
170 | kexec_unlock(); |
171 | return ret; |
172 | } |
173 | |
174 | /* |
175 | * Exec Kernel system call: for obvious reasons only root may call it. |
176 | * |
177 | * This call breaks up into three pieces. |
178 | * - A generic part which loads the new kernel from the current |
179 | * address space, and very carefully places the data in the |
180 | * allocated pages. |
181 | * |
182 | * - A generic part that interacts with the kernel and tells all of |
183 | * the devices to shut down. Preventing on-going dmas, and placing |
184 | * the devices in a consistent state so a later kernel can |
185 | * reinitialize them. |
186 | * |
187 | * - A machine specific part that includes the syscall number |
188 | * and then copies the image to it's final destination. And |
189 | * jumps into the image at entry. |
190 | * |
191 | * kexec does not sync, or unmount filesystems so if you need |
192 | * that to happen you need to do that yourself. |
193 | */ |
194 | |
195 | static inline int kexec_load_check(unsigned long nr_segments, |
196 | unsigned long flags) |
197 | { |
198 | int image_type = (flags & KEXEC_ON_CRASH) ? |
199 | KEXEC_TYPE_CRASH : KEXEC_TYPE_DEFAULT; |
200 | int result; |
201 | |
202 | /* We only trust the superuser with rebooting the system. */ |
203 | if (!kexec_load_permitted(kexec_image_type: image_type)) |
204 | return -EPERM; |
205 | |
206 | /* Permit LSMs and IMA to fail the kexec */ |
207 | result = security_kernel_load_data(id: LOADING_KEXEC_IMAGE, contents: false); |
208 | if (result < 0) |
209 | return result; |
210 | |
211 | /* |
212 | * kexec can be used to circumvent module loading restrictions, so |
213 | * prevent loading in that case |
214 | */ |
215 | result = security_locked_down(what: LOCKDOWN_KEXEC); |
216 | if (result) |
217 | return result; |
218 | |
219 | /* |
220 | * Verify we have a legal set of flags |
221 | * This leaves us room for future extensions. |
222 | */ |
223 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) |
224 | return -EINVAL; |
225 | |
226 | /* Put an artificial cap on the number |
227 | * of segments passed to kexec_load. |
228 | */ |
229 | if (nr_segments > KEXEC_SEGMENT_MAX) |
230 | return -EINVAL; |
231 | |
232 | return 0; |
233 | } |
234 | |
235 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, |
236 | struct kexec_segment __user *, segments, unsigned long, flags) |
237 | { |
238 | struct kexec_segment *ksegments; |
239 | unsigned long result; |
240 | |
241 | result = kexec_load_check(nr_segments, flags); |
242 | if (result) |
243 | return result; |
244 | |
245 | /* Verify we are on the appropriate architecture */ |
246 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && |
247 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) |
248 | return -EINVAL; |
249 | |
250 | ksegments = memdup_array_user(src: segments, n: nr_segments, size: sizeof(ksegments[0])); |
251 | if (IS_ERR(ptr: ksegments)) |
252 | return PTR_ERR(ptr: ksegments); |
253 | |
254 | result = do_kexec_load(entry, nr_segments, segments: ksegments, flags); |
255 | kfree(objp: ksegments); |
256 | |
257 | return result; |
258 | } |
259 | |
260 | #ifdef CONFIG_COMPAT |
261 | COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, |
262 | compat_ulong_t, nr_segments, |
263 | struct compat_kexec_segment __user *, segments, |
264 | compat_ulong_t, flags) |
265 | { |
266 | struct compat_kexec_segment in; |
267 | struct kexec_segment *ksegments; |
268 | unsigned long i, result; |
269 | |
270 | result = kexec_load_check(nr_segments, flags); |
271 | if (result) |
272 | return result; |
273 | |
274 | /* Don't allow clients that don't understand the native |
275 | * architecture to do anything. |
276 | */ |
277 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
278 | return -EINVAL; |
279 | |
280 | ksegments = kmalloc_array(n: nr_segments, size: sizeof(ksegments[0]), |
281 | GFP_KERNEL); |
282 | if (!ksegments) |
283 | return -ENOMEM; |
284 | |
285 | for (i = 0; i < nr_segments; i++) { |
286 | result = copy_from_user(to: &in, from: &segments[i], n: sizeof(in)); |
287 | if (result) |
288 | goto fail; |
289 | |
290 | ksegments[i].buf = compat_ptr(uptr: in.buf); |
291 | ksegments[i].bufsz = in.bufsz; |
292 | ksegments[i].mem = in.mem; |
293 | ksegments[i].memsz = in.memsz; |
294 | } |
295 | |
296 | result = do_kexec_load(entry, nr_segments, segments: ksegments, flags); |
297 | |
298 | fail: |
299 | kfree(objp: ksegments); |
300 | return result; |
301 | } |
302 | #endif |
303 | |