1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Kernel module help for s390. |
4 | * |
5 | * S390 version |
6 | * Copyright IBM Corp. 2002, 2003 |
7 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
9 | * |
10 | * based on i386 version |
11 | * Copyright (C) 2001 Rusty Russell. |
12 | */ |
13 | #include <linux/module.h> |
14 | #include <linux/elf.h> |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/ftrace.h> |
18 | #include <linux/string.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/kasan.h> |
21 | #include <linux/moduleloader.h> |
22 | #include <linux/bug.h> |
23 | #include <linux/memory.h> |
24 | #include <asm/alternative.h> |
25 | #include <asm/nospec-branch.h> |
26 | #include <asm/facility.h> |
27 | #include <asm/ftrace.lds.h> |
28 | #include <asm/set_memory.h> |
29 | #include <asm/setup.h> |
30 | |
31 | #if 0 |
32 | #define DEBUGP printk |
33 | #else |
34 | #define DEBUGP(fmt , ...) |
35 | #endif |
36 | |
37 | #define PLT_ENTRY_SIZE 22 |
38 | |
39 | static unsigned long get_module_load_offset(void) |
40 | { |
41 | static DEFINE_MUTEX(module_kaslr_mutex); |
42 | static unsigned long module_load_offset; |
43 | |
44 | if (!kaslr_enabled()) |
45 | return 0; |
46 | /* |
47 | * Calculate the module_load_offset the first time this code |
48 | * is called. Once calculated it stays the same until reboot. |
49 | */ |
50 | mutex_lock(&module_kaslr_mutex); |
51 | if (!module_load_offset) |
52 | module_load_offset = get_random_u32_inclusive(floor: 1, ceil: 1024) * PAGE_SIZE; |
53 | mutex_unlock(lock: &module_kaslr_mutex); |
54 | return module_load_offset; |
55 | } |
56 | |
57 | void *module_alloc(unsigned long size) |
58 | { |
59 | gfp_t gfp_mask = GFP_KERNEL; |
60 | void *p; |
61 | |
62 | if (PAGE_ALIGN(size) > MODULES_LEN) |
63 | return NULL; |
64 | p = __vmalloc_node_range(size, MODULE_ALIGN, |
65 | MODULES_VADDR + get_module_load_offset(), |
66 | MODULES_END, gfp_mask, PAGE_KERNEL, |
67 | VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK, |
68 | NUMA_NO_NODE, caller: __builtin_return_address(0)); |
69 | if (p && (kasan_alloc_module_shadow(addr: p, size, gfp_mask) < 0)) { |
70 | vfree(addr: p); |
71 | return NULL; |
72 | } |
73 | return p; |
74 | } |
75 | |
76 | #ifdef CONFIG_FUNCTION_TRACER |
77 | void module_arch_cleanup(struct module *mod) |
78 | { |
79 | module_memfree(module_region: mod->arch.trampolines_start); |
80 | } |
81 | #endif |
82 | |
83 | void module_arch_freeing_init(struct module *mod) |
84 | { |
85 | if (is_livepatch_module(mod) && |
86 | mod->state == MODULE_STATE_LIVE) |
87 | return; |
88 | |
89 | vfree(addr: mod->arch.syminfo); |
90 | mod->arch.syminfo = NULL; |
91 | } |
92 | |
93 | static void check_rela(Elf_Rela *rela, struct module *me) |
94 | { |
95 | struct mod_arch_syminfo *info; |
96 | |
97 | info = me->arch.syminfo + ELF_R_SYM (rela->r_info); |
98 | switch (ELF_R_TYPE (rela->r_info)) { |
99 | case R_390_GOT12: /* 12 bit GOT offset. */ |
100 | case R_390_GOT16: /* 16 bit GOT offset. */ |
101 | case R_390_GOT20: /* 20 bit GOT offset. */ |
102 | case R_390_GOT32: /* 32 bit GOT offset. */ |
103 | case R_390_GOT64: /* 64 bit GOT offset. */ |
104 | case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ |
105 | case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ |
106 | case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ |
107 | case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ |
108 | case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ |
109 | case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ |
110 | case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ |
111 | if (info->got_offset == -1UL) { |
112 | info->got_offset = me->arch.got_size; |
113 | me->arch.got_size += sizeof(void*); |
114 | } |
115 | break; |
116 | case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ |
117 | case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ |
118 | case R_390_PLT32: /* 32 bit PC relative PLT address. */ |
119 | case R_390_PLT64: /* 64 bit PC relative PLT address. */ |
120 | case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ |
121 | case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ |
122 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ |
123 | if (info->plt_offset == -1UL) { |
124 | info->plt_offset = me->arch.plt_size; |
125 | me->arch.plt_size += PLT_ENTRY_SIZE; |
126 | } |
127 | break; |
128 | case R_390_COPY: |
129 | case R_390_GLOB_DAT: |
130 | case R_390_JMP_SLOT: |
131 | case R_390_RELATIVE: |
132 | /* Only needed if we want to support loading of |
133 | modules linked with -shared. */ |
134 | break; |
135 | } |
136 | } |
137 | |
138 | /* |
139 | * Account for GOT and PLT relocations. We can't add sections for |
140 | * got and plt but we can increase the core module size. |
141 | */ |
142 | int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, |
143 | char *secstrings, struct module *me) |
144 | { |
145 | Elf_Shdr *symtab; |
146 | Elf_Sym *symbols; |
147 | Elf_Rela *rela; |
148 | char *strings; |
149 | int nrela, i, j; |
150 | struct module_memory *mod_mem; |
151 | |
152 | /* Find symbol table and string table. */ |
153 | symtab = NULL; |
154 | for (i = 0; i < hdr->e_shnum; i++) |
155 | switch (sechdrs[i].sh_type) { |
156 | case SHT_SYMTAB: |
157 | symtab = sechdrs + i; |
158 | break; |
159 | } |
160 | if (!symtab) { |
161 | printk(KERN_ERR "module %s: no symbol table\n" , me->name); |
162 | return -ENOEXEC; |
163 | } |
164 | |
165 | /* Allocate one syminfo structure per symbol. */ |
166 | me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); |
167 | me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo), |
168 | me->arch.nsyms)); |
169 | if (!me->arch.syminfo) |
170 | return -ENOMEM; |
171 | symbols = (void *) hdr + symtab->sh_offset; |
172 | strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset; |
173 | for (i = 0; i < me->arch.nsyms; i++) { |
174 | if (symbols[i].st_shndx == SHN_UNDEF && |
175 | strcmp(strings + symbols[i].st_name, |
176 | "_GLOBAL_OFFSET_TABLE_" ) == 0) |
177 | /* "Define" it as absolute. */ |
178 | symbols[i].st_shndx = SHN_ABS; |
179 | me->arch.syminfo[i].got_offset = -1UL; |
180 | me->arch.syminfo[i].plt_offset = -1UL; |
181 | me->arch.syminfo[i].got_initialized = 0; |
182 | me->arch.syminfo[i].plt_initialized = 0; |
183 | } |
184 | |
185 | /* Search for got/plt relocations. */ |
186 | me->arch.got_size = me->arch.plt_size = 0; |
187 | for (i = 0; i < hdr->e_shnum; i++) { |
188 | if (sechdrs[i].sh_type != SHT_RELA) |
189 | continue; |
190 | nrela = sechdrs[i].sh_size / sizeof(Elf_Rela); |
191 | rela = (void *) hdr + sechdrs[i].sh_offset; |
192 | for (j = 0; j < nrela; j++) |
193 | check_rela(rela: rela + j, me); |
194 | } |
195 | |
196 | /* Increase core size by size of got & plt and set start |
197 | offsets for got and plt. */ |
198 | mod_mem = &me->mem[MOD_TEXT]; |
199 | mod_mem->size = ALIGN(mod_mem->size, 4); |
200 | me->arch.got_offset = mod_mem->size; |
201 | mod_mem->size += me->arch.got_size; |
202 | me->arch.plt_offset = mod_mem->size; |
203 | if (me->arch.plt_size) { |
204 | if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) |
205 | me->arch.plt_size += PLT_ENTRY_SIZE; |
206 | mod_mem->size += me->arch.plt_size; |
207 | } |
208 | return 0; |
209 | } |
210 | |
211 | static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, |
212 | int sign, int bits, int shift, |
213 | void *(*write)(void *dest, const void *src, size_t len)) |
214 | { |
215 | unsigned long umax; |
216 | long min, max; |
217 | void *dest = (void *)loc; |
218 | |
219 | if (val & ((1UL << shift) - 1)) |
220 | return -ENOEXEC; |
221 | if (sign) { |
222 | val = (Elf_Addr)(((long) val) >> shift); |
223 | min = -(1L << (bits - 1)); |
224 | max = (1L << (bits - 1)) - 1; |
225 | if ((long) val < min || (long) val > max) |
226 | return -ENOEXEC; |
227 | } else { |
228 | val >>= shift; |
229 | umax = ((1UL << (bits - 1)) << 1) - 1; |
230 | if ((unsigned long) val > umax) |
231 | return -ENOEXEC; |
232 | } |
233 | |
234 | if (bits == 8) { |
235 | unsigned char tmp = val; |
236 | write(dest, &tmp, 1); |
237 | } else if (bits == 12) { |
238 | unsigned short tmp = (val & 0xfff) | |
239 | (*(unsigned short *) loc & 0xf000); |
240 | write(dest, &tmp, 2); |
241 | } else if (bits == 16) { |
242 | unsigned short tmp = val; |
243 | write(dest, &tmp, 2); |
244 | } else if (bits == 20) { |
245 | unsigned int tmp = (val & 0xfff) << 16 | |
246 | (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff); |
247 | write(dest, &tmp, 4); |
248 | } else if (bits == 32) { |
249 | unsigned int tmp = val; |
250 | write(dest, &tmp, 4); |
251 | } else if (bits == 64) { |
252 | unsigned long tmp = val; |
253 | write(dest, &tmp, 8); |
254 | } |
255 | return 0; |
256 | } |
257 | |
258 | static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, |
259 | const char *strtab, struct module *me, |
260 | void *(*write)(void *dest, const void *src, size_t len)) |
261 | { |
262 | struct mod_arch_syminfo *info; |
263 | Elf_Addr loc, val; |
264 | int r_type, r_sym; |
265 | int rc = -ENOEXEC; |
266 | |
267 | /* This is where to make the change */ |
268 | loc = base + rela->r_offset; |
269 | /* This is the symbol it is referring to. Note that all |
270 | undefined symbols have been resolved. */ |
271 | r_sym = ELF_R_SYM(rela->r_info); |
272 | r_type = ELF_R_TYPE(rela->r_info); |
273 | info = me->arch.syminfo + r_sym; |
274 | val = symtab[r_sym].st_value; |
275 | |
276 | switch (r_type) { |
277 | case R_390_NONE: /* No relocation. */ |
278 | rc = 0; |
279 | break; |
280 | case R_390_8: /* Direct 8 bit. */ |
281 | case R_390_12: /* Direct 12 bit. */ |
282 | case R_390_16: /* Direct 16 bit. */ |
283 | case R_390_20: /* Direct 20 bit. */ |
284 | case R_390_32: /* Direct 32 bit. */ |
285 | case R_390_64: /* Direct 64 bit. */ |
286 | val += rela->r_addend; |
287 | if (r_type == R_390_8) |
288 | rc = apply_rela_bits(loc, val, sign: 0, bits: 8, shift: 0, write); |
289 | else if (r_type == R_390_12) |
290 | rc = apply_rela_bits(loc, val, sign: 0, bits: 12, shift: 0, write); |
291 | else if (r_type == R_390_16) |
292 | rc = apply_rela_bits(loc, val, sign: 0, bits: 16, shift: 0, write); |
293 | else if (r_type == R_390_20) |
294 | rc = apply_rela_bits(loc, val, sign: 1, bits: 20, shift: 0, write); |
295 | else if (r_type == R_390_32) |
296 | rc = apply_rela_bits(loc, val, sign: 0, bits: 32, shift: 0, write); |
297 | else if (r_type == R_390_64) |
298 | rc = apply_rela_bits(loc, val, sign: 0, bits: 64, shift: 0, write); |
299 | break; |
300 | case R_390_PC16: /* PC relative 16 bit. */ |
301 | case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ |
302 | case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ |
303 | case R_390_PC32: /* PC relative 32 bit. */ |
304 | case R_390_PC64: /* PC relative 64 bit. */ |
305 | val += rela->r_addend - loc; |
306 | if (r_type == R_390_PC16) |
307 | rc = apply_rela_bits(loc, val, sign: 1, bits: 16, shift: 0, write); |
308 | else if (r_type == R_390_PC16DBL) |
309 | rc = apply_rela_bits(loc, val, sign: 1, bits: 16, shift: 1, write); |
310 | else if (r_type == R_390_PC32DBL) |
311 | rc = apply_rela_bits(loc, val, sign: 1, bits: 32, shift: 1, write); |
312 | else if (r_type == R_390_PC32) |
313 | rc = apply_rela_bits(loc, val, sign: 1, bits: 32, shift: 0, write); |
314 | else if (r_type == R_390_PC64) |
315 | rc = apply_rela_bits(loc, val, sign: 1, bits: 64, shift: 0, write); |
316 | break; |
317 | case R_390_GOT12: /* 12 bit GOT offset. */ |
318 | case R_390_GOT16: /* 16 bit GOT offset. */ |
319 | case R_390_GOT20: /* 20 bit GOT offset. */ |
320 | case R_390_GOT32: /* 32 bit GOT offset. */ |
321 | case R_390_GOT64: /* 64 bit GOT offset. */ |
322 | case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ |
323 | case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ |
324 | case R_390_GOTPLT20: /* 20 bit offset to jump slot. */ |
325 | case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ |
326 | case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ |
327 | case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ |
328 | case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ |
329 | if (info->got_initialized == 0) { |
330 | Elf_Addr *gotent = me->mem[MOD_TEXT].base + |
331 | me->arch.got_offset + |
332 | info->got_offset; |
333 | |
334 | write(gotent, &val, sizeof(*gotent)); |
335 | info->got_initialized = 1; |
336 | } |
337 | val = info->got_offset + rela->r_addend; |
338 | if (r_type == R_390_GOT12 || |
339 | r_type == R_390_GOTPLT12) |
340 | rc = apply_rela_bits(loc, val, sign: 0, bits: 12, shift: 0, write); |
341 | else if (r_type == R_390_GOT16 || |
342 | r_type == R_390_GOTPLT16) |
343 | rc = apply_rela_bits(loc, val, sign: 0, bits: 16, shift: 0, write); |
344 | else if (r_type == R_390_GOT20 || |
345 | r_type == R_390_GOTPLT20) |
346 | rc = apply_rela_bits(loc, val, sign: 1, bits: 20, shift: 0, write); |
347 | else if (r_type == R_390_GOT32 || |
348 | r_type == R_390_GOTPLT32) |
349 | rc = apply_rela_bits(loc, val, sign: 0, bits: 32, shift: 0, write); |
350 | else if (r_type == R_390_GOT64 || |
351 | r_type == R_390_GOTPLT64) |
352 | rc = apply_rela_bits(loc, val, sign: 0, bits: 64, shift: 0, write); |
353 | else if (r_type == R_390_GOTENT || |
354 | r_type == R_390_GOTPLTENT) { |
355 | val += (Elf_Addr)me->mem[MOD_TEXT].base + |
356 | me->arch.got_offset - loc; |
357 | rc = apply_rela_bits(loc, val, sign: 1, bits: 32, shift: 1, write); |
358 | } |
359 | break; |
360 | case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ |
361 | case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ |
362 | case R_390_PLT32: /* 32 bit PC relative PLT address. */ |
363 | case R_390_PLT64: /* 64 bit PC relative PLT address. */ |
364 | case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ |
365 | case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ |
366 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ |
367 | if (info->plt_initialized == 0) { |
368 | unsigned char insn[PLT_ENTRY_SIZE]; |
369 | char *plt_base; |
370 | char *ip; |
371 | |
372 | plt_base = me->mem[MOD_TEXT].base + me->arch.plt_offset; |
373 | ip = plt_base + info->plt_offset; |
374 | *(int *)insn = 0x0d10e310; /* basr 1,0 */ |
375 | *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */ |
376 | if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { |
377 | char *jump_r1; |
378 | |
379 | jump_r1 = plt_base + me->arch.plt_size - |
380 | PLT_ENTRY_SIZE; |
381 | /* brcl 0xf,__jump_r1 */ |
382 | *(short *)&insn[8] = 0xc0f4; |
383 | *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2; |
384 | } else { |
385 | *(int *)&insn[8] = 0x07f10000; /* br %r1 */ |
386 | } |
387 | *(long *)&insn[14] = val; |
388 | |
389 | write(ip, insn, sizeof(insn)); |
390 | info->plt_initialized = 1; |
391 | } |
392 | if (r_type == R_390_PLTOFF16 || |
393 | r_type == R_390_PLTOFF32 || |
394 | r_type == R_390_PLTOFF64) |
395 | val = me->arch.plt_offset - me->arch.got_offset + |
396 | info->plt_offset + rela->r_addend; |
397 | else { |
398 | if (!((r_type == R_390_PLT16DBL && |
399 | val - loc + 0xffffUL < 0x1ffffeUL) || |
400 | (r_type == R_390_PLT32DBL && |
401 | val - loc + 0xffffffffULL < 0x1fffffffeULL))) |
402 | val = (Elf_Addr) me->mem[MOD_TEXT].base + |
403 | me->arch.plt_offset + |
404 | info->plt_offset; |
405 | val += rela->r_addend - loc; |
406 | } |
407 | if (r_type == R_390_PLT16DBL) |
408 | rc = apply_rela_bits(loc, val, sign: 1, bits: 16, shift: 1, write); |
409 | else if (r_type == R_390_PLTOFF16) |
410 | rc = apply_rela_bits(loc, val, sign: 0, bits: 16, shift: 0, write); |
411 | else if (r_type == R_390_PLT32DBL) |
412 | rc = apply_rela_bits(loc, val, sign: 1, bits: 32, shift: 1, write); |
413 | else if (r_type == R_390_PLT32 || |
414 | r_type == R_390_PLTOFF32) |
415 | rc = apply_rela_bits(loc, val, sign: 0, bits: 32, shift: 0, write); |
416 | else if (r_type == R_390_PLT64 || |
417 | r_type == R_390_PLTOFF64) |
418 | rc = apply_rela_bits(loc, val, sign: 0, bits: 64, shift: 0, write); |
419 | break; |
420 | case R_390_GOTOFF16: /* 16 bit offset to GOT. */ |
421 | case R_390_GOTOFF32: /* 32 bit offset to GOT. */ |
422 | case R_390_GOTOFF64: /* 64 bit offset to GOT. */ |
423 | val = val + rela->r_addend - |
424 | ((Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset); |
425 | if (r_type == R_390_GOTOFF16) |
426 | rc = apply_rela_bits(loc, val, sign: 0, bits: 16, shift: 0, write); |
427 | else if (r_type == R_390_GOTOFF32) |
428 | rc = apply_rela_bits(loc, val, sign: 0, bits: 32, shift: 0, write); |
429 | else if (r_type == R_390_GOTOFF64) |
430 | rc = apply_rela_bits(loc, val, sign: 0, bits: 64, shift: 0, write); |
431 | break; |
432 | case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ |
433 | case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ |
434 | val = (Elf_Addr) me->mem[MOD_TEXT].base + me->arch.got_offset + |
435 | rela->r_addend - loc; |
436 | if (r_type == R_390_GOTPC) |
437 | rc = apply_rela_bits(loc, val, sign: 1, bits: 32, shift: 0, write); |
438 | else if (r_type == R_390_GOTPCDBL) |
439 | rc = apply_rela_bits(loc, val, sign: 1, bits: 32, shift: 1, write); |
440 | break; |
441 | case R_390_COPY: |
442 | case R_390_GLOB_DAT: /* Create GOT entry. */ |
443 | case R_390_JMP_SLOT: /* Create PLT entry. */ |
444 | case R_390_RELATIVE: /* Adjust by program base. */ |
445 | /* Only needed if we want to support loading of |
446 | modules linked with -shared. */ |
447 | return -ENOEXEC; |
448 | default: |
449 | printk(KERN_ERR "module %s: unknown relocation: %u\n" , |
450 | me->name, r_type); |
451 | return -ENOEXEC; |
452 | } |
453 | if (rc) { |
454 | printk(KERN_ERR "module %s: relocation error for symbol %s " |
455 | "(r_type %i, value 0x%lx)\n" , |
456 | me->name, strtab + symtab[r_sym].st_name, |
457 | r_type, (unsigned long) val); |
458 | return rc; |
459 | } |
460 | return 0; |
461 | } |
462 | |
463 | static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, |
464 | unsigned int symindex, unsigned int relsec, |
465 | struct module *me, |
466 | void *(*write)(void *dest, const void *src, size_t len)) |
467 | { |
468 | Elf_Addr base; |
469 | Elf_Sym *symtab; |
470 | Elf_Rela *rela; |
471 | unsigned long i, n; |
472 | int rc; |
473 | |
474 | DEBUGP("Applying relocate section %u to %u\n" , |
475 | relsec, sechdrs[relsec].sh_info); |
476 | base = sechdrs[sechdrs[relsec].sh_info].sh_addr; |
477 | symtab = (Elf_Sym *) sechdrs[symindex].sh_addr; |
478 | rela = (Elf_Rela *) sechdrs[relsec].sh_addr; |
479 | n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); |
480 | |
481 | for (i = 0; i < n; i++, rela++) { |
482 | rc = apply_rela(rela, base, symtab, strtab, me, write); |
483 | if (rc) |
484 | return rc; |
485 | } |
486 | return 0; |
487 | } |
488 | |
489 | int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, |
490 | unsigned int symindex, unsigned int relsec, |
491 | struct module *me) |
492 | { |
493 | bool early = me->state == MODULE_STATE_UNFORMED; |
494 | void *(*write)(void *, const void *, size_t) = memcpy; |
495 | |
496 | if (!early) |
497 | write = s390_kernel_write; |
498 | |
499 | return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, |
500 | write); |
501 | } |
502 | |
503 | #ifdef CONFIG_FUNCTION_TRACER |
504 | static int module_alloc_ftrace_hotpatch_trampolines(struct module *me, |
505 | const Elf_Shdr *s) |
506 | { |
507 | char *start, *end; |
508 | int numpages; |
509 | size_t size; |
510 | |
511 | size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size); |
512 | numpages = DIV_ROUND_UP(size, PAGE_SIZE); |
513 | start = module_alloc(size: numpages * PAGE_SIZE); |
514 | if (!start) |
515 | return -ENOMEM; |
516 | set_memory_rox(addr: (unsigned long)start, numpages); |
517 | end = start + size; |
518 | |
519 | me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start; |
520 | me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end; |
521 | me->arch.next_trampoline = me->arch.trampolines_start; |
522 | |
523 | return 0; |
524 | } |
525 | #endif /* CONFIG_FUNCTION_TRACER */ |
526 | |
527 | int module_finalize(const Elf_Ehdr *hdr, |
528 | const Elf_Shdr *sechdrs, |
529 | struct module *me) |
530 | { |
531 | const Elf_Shdr *s; |
532 | char *secstrings, *secname; |
533 | void *aseg; |
534 | #ifdef CONFIG_FUNCTION_TRACER |
535 | int ret; |
536 | #endif |
537 | |
538 | if (IS_ENABLED(CONFIG_EXPOLINE) && |
539 | !nospec_disable && me->arch.plt_size) { |
540 | unsigned int *ij; |
541 | |
542 | ij = me->mem[MOD_TEXT].base + me->arch.plt_offset + |
543 | me->arch.plt_size - PLT_ENTRY_SIZE; |
544 | ij[0] = 0xc6000000; /* exrl %r0,.+10 */ |
545 | ij[1] = 0x0005a7f4; /* j . */ |
546 | ij[2] = 0x000007f1; /* br %r1 */ |
547 | } |
548 | |
549 | secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
550 | for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { |
551 | aseg = (void *) s->sh_addr; |
552 | secname = secstrings + s->sh_name; |
553 | |
554 | if (!strcmp(".altinstructions" , secname)) |
555 | /* patch .altinstructions */ |
556 | apply_alternatives(start: aseg, end: aseg + s->sh_size); |
557 | |
558 | if (IS_ENABLED(CONFIG_EXPOLINE) && |
559 | (str_has_prefix(str: secname, prefix: ".s390_indirect" ))) |
560 | nospec_revert(aseg, aseg + s->sh_size); |
561 | |
562 | if (IS_ENABLED(CONFIG_EXPOLINE) && |
563 | (str_has_prefix(str: secname, prefix: ".s390_return" ))) |
564 | nospec_revert(aseg, aseg + s->sh_size); |
565 | |
566 | #ifdef CONFIG_FUNCTION_TRACER |
567 | if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) { |
568 | ret = module_alloc_ftrace_hotpatch_trampolines(me, s); |
569 | if (ret < 0) |
570 | return ret; |
571 | } |
572 | #endif /* CONFIG_FUNCTION_TRACER */ |
573 | } |
574 | |
575 | return 0; |
576 | } |
577 | |