1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* Rewritten by Rusty Russell, on the backs of many others... |
3 | Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM. |
4 | |
5 | */ |
6 | #include <linux/elf.h> |
7 | #include <linux/ftrace.h> |
8 | #include <linux/memory.h> |
9 | #include <linux/extable.h> |
10 | #include <linux/module.h> |
11 | #include <linux/mutex.h> |
12 | #include <linux/init.h> |
13 | #include <linux/kprobes.h> |
14 | #include <linux/filter.h> |
15 | |
16 | #include <asm/sections.h> |
17 | #include <linux/uaccess.h> |
18 | |
19 | /* |
20 | * mutex protecting text section modification (dynamic code patching). |
21 | * some users need to sleep (allocating memory...) while they hold this lock. |
22 | * |
23 | * Note: Also protects SMP-alternatives modification on x86. |
24 | * |
25 | * NOT exported to modules - patching kernel text is a really delicate matter. |
26 | */ |
27 | DEFINE_MUTEX(text_mutex); |
28 | |
29 | extern struct exception_table_entry __start___ex_table[]; |
30 | extern struct exception_table_entry __stop___ex_table[]; |
31 | |
32 | /* Cleared by build time tools if the table is already sorted. */ |
33 | u32 __initdata __visible main_extable_sort_needed = 1; |
34 | |
35 | /* Sort the kernel's built-in exception table */ |
36 | void __init sort_main_extable(void) |
37 | { |
38 | if (main_extable_sort_needed && |
39 | &__stop___ex_table > &__start___ex_table) { |
40 | pr_notice("Sorting __ex_table...\n" ); |
41 | sort_extable(start: __start___ex_table, finish: __stop___ex_table); |
42 | } |
43 | } |
44 | |
45 | /* Given an address, look for it in the kernel exception table */ |
46 | const |
47 | struct exception_table_entry *search_kernel_exception_table(unsigned long addr) |
48 | { |
49 | return search_extable(base: __start___ex_table, |
50 | num: __stop___ex_table - __start___ex_table, value: addr); |
51 | } |
52 | |
53 | /* Given an address, look for it in the exception tables. */ |
54 | const struct exception_table_entry *search_exception_tables(unsigned long addr) |
55 | { |
56 | const struct exception_table_entry *e; |
57 | |
58 | e = search_kernel_exception_table(addr); |
59 | if (!e) |
60 | e = search_module_extables(addr); |
61 | if (!e) |
62 | e = search_bpf_extables(addr); |
63 | return e; |
64 | } |
65 | |
66 | int notrace core_kernel_text(unsigned long addr) |
67 | { |
68 | if (is_kernel_text(addr)) |
69 | return 1; |
70 | |
71 | if (system_state < SYSTEM_FREEING_INITMEM && |
72 | is_kernel_inittext(addr)) |
73 | return 1; |
74 | return 0; |
75 | } |
76 | |
77 | int __kernel_text_address(unsigned long addr) |
78 | { |
79 | if (kernel_text_address(addr)) |
80 | return 1; |
81 | /* |
82 | * There might be init symbols in saved stacktraces. |
83 | * Give those symbols a chance to be printed in |
84 | * backtraces (such as lockdep traces). |
85 | * |
86 | * Since we are after the module-symbols check, there's |
87 | * no danger of address overlap: |
88 | */ |
89 | if (is_kernel_inittext(addr)) |
90 | return 1; |
91 | return 0; |
92 | } |
93 | |
94 | int kernel_text_address(unsigned long addr) |
95 | { |
96 | bool no_rcu; |
97 | int ret = 1; |
98 | |
99 | if (core_kernel_text(addr)) |
100 | return 1; |
101 | |
102 | /* |
103 | * If a stack dump happens while RCU is not watching, then |
104 | * RCU needs to be notified that it requires to start |
105 | * watching again. This can happen either by tracing that |
106 | * triggers a stack trace, or a WARN() that happens during |
107 | * coming back from idle, or cpu on or offlining. |
108 | * |
109 | * is_module_text_address() as well as the kprobe slots, |
110 | * is_bpf_text_address() and is_bpf_image_address require |
111 | * RCU to be watching. |
112 | */ |
113 | no_rcu = !rcu_is_watching(); |
114 | |
115 | /* Treat this like an NMI as it can happen anywhere */ |
116 | if (no_rcu) |
117 | ct_nmi_enter(); |
118 | |
119 | if (is_module_text_address(addr)) |
120 | goto out; |
121 | if (is_ftrace_trampoline(addr)) |
122 | goto out; |
123 | if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) |
124 | goto out; |
125 | if (is_bpf_text_address(addr)) |
126 | goto out; |
127 | ret = 0; |
128 | out: |
129 | if (no_rcu) |
130 | ct_nmi_exit(); |
131 | |
132 | return ret; |
133 | } |
134 | |
135 | /* |
136 | * On some architectures (PPC64, IA64, PARISC) function pointers |
137 | * are actually only tokens to some data that then holds the |
138 | * real function address. As a result, to find if a function |
139 | * pointer is part of the kernel text, we need to do some |
140 | * special dereferencing first. |
141 | */ |
142 | #ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS |
143 | void *dereference_function_descriptor(void *ptr) |
144 | { |
145 | func_desc_t *desc = ptr; |
146 | void *p; |
147 | |
148 | if (!get_kernel_nofault(p, (void *)&desc->addr)) |
149 | ptr = p; |
150 | return ptr; |
151 | } |
152 | EXPORT_SYMBOL_GPL(dereference_function_descriptor); |
153 | |
154 | void *dereference_kernel_function_descriptor(void *ptr) |
155 | { |
156 | if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd) |
157 | return ptr; |
158 | |
159 | return dereference_function_descriptor(ptr); |
160 | } |
161 | #endif |
162 | |
163 | int func_ptr_is_kernel_text(void *ptr) |
164 | { |
165 | unsigned long addr; |
166 | addr = (unsigned long) dereference_function_descriptor(ptr); |
167 | if (core_kernel_text(addr)) |
168 | return 1; |
169 | return is_module_text_address(addr); |
170 | } |
171 | |