1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (C) 2017 Arm Ltd. |
3 | #define pr_fmt(fmt) "sdei: " fmt |
4 | |
5 | #include <linux/arm-smccc.h> |
6 | #include <linux/arm_sdei.h> |
7 | #include <linux/hardirq.h> |
8 | #include <linux/irqflags.h> |
9 | #include <linux/sched/task_stack.h> |
10 | #include <linux/scs.h> |
11 | #include <linux/uaccess.h> |
12 | |
13 | #include <asm/alternative.h> |
14 | #include <asm/exception.h> |
15 | #include <asm/kprobes.h> |
16 | #include <asm/mmu.h> |
17 | #include <asm/ptrace.h> |
18 | #include <asm/sections.h> |
19 | #include <asm/stacktrace.h> |
20 | #include <asm/sysreg.h> |
21 | #include <asm/vmap_stack.h> |
22 | |
23 | unsigned long sdei_exit_mode; |
24 | |
25 | /* |
26 | * VMAP'd stacks checking for stack overflow on exception using sp as a scratch |
27 | * register, meaning SDEI has to switch to its own stack. We need two stacks as |
28 | * a critical event may interrupt a normal event that has just taken a |
29 | * synchronous exception, and is using sp as scratch register. For a critical |
30 | * event interrupting a normal event, we can't reliably tell if we were on the |
31 | * sdei stack. |
32 | * For now, we allocate stacks when the driver is probed. |
33 | */ |
34 | DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); |
35 | DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); |
36 | |
37 | #ifdef CONFIG_VMAP_STACK |
38 | DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); |
39 | DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); |
40 | #endif |
41 | |
42 | DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); |
43 | DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); |
44 | |
45 | #ifdef CONFIG_SHADOW_CALL_STACK |
46 | DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); |
47 | DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); |
48 | #endif |
49 | |
50 | DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); |
51 | DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); |
52 | |
53 | static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) |
54 | { |
55 | unsigned long *p; |
56 | |
57 | p = per_cpu(*ptr, cpu); |
58 | if (p) { |
59 | per_cpu(*ptr, cpu) = NULL; |
60 | vfree(addr: p); |
61 | } |
62 | } |
63 | |
64 | static void free_sdei_stacks(void) |
65 | { |
66 | int cpu; |
67 | |
68 | if (!IS_ENABLED(CONFIG_VMAP_STACK)) |
69 | return; |
70 | |
71 | for_each_possible_cpu(cpu) { |
72 | _free_sdei_stack(ptr: &sdei_stack_normal_ptr, cpu); |
73 | _free_sdei_stack(ptr: &sdei_stack_critical_ptr, cpu); |
74 | } |
75 | } |
76 | |
77 | static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu) |
78 | { |
79 | unsigned long *p; |
80 | |
81 | p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu)); |
82 | if (!p) |
83 | return -ENOMEM; |
84 | per_cpu(*ptr, cpu) = p; |
85 | |
86 | return 0; |
87 | } |
88 | |
89 | static int init_sdei_stacks(void) |
90 | { |
91 | int cpu; |
92 | int err = 0; |
93 | |
94 | if (!IS_ENABLED(CONFIG_VMAP_STACK)) |
95 | return 0; |
96 | |
97 | for_each_possible_cpu(cpu) { |
98 | err = _init_sdei_stack(ptr: &sdei_stack_normal_ptr, cpu); |
99 | if (err) |
100 | break; |
101 | err = _init_sdei_stack(ptr: &sdei_stack_critical_ptr, cpu); |
102 | if (err) |
103 | break; |
104 | } |
105 | |
106 | if (err) |
107 | free_sdei_stacks(); |
108 | |
109 | return err; |
110 | } |
111 | |
112 | static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu) |
113 | { |
114 | void *s; |
115 | |
116 | s = per_cpu(*ptr, cpu); |
117 | if (s) { |
118 | per_cpu(*ptr, cpu) = NULL; |
119 | scs_free(s); |
120 | } |
121 | } |
122 | |
123 | static void free_sdei_scs(void) |
124 | { |
125 | int cpu; |
126 | |
127 | for_each_possible_cpu(cpu) { |
128 | _free_sdei_scs(ptr: &sdei_shadow_call_stack_normal_ptr, cpu); |
129 | _free_sdei_scs(ptr: &sdei_shadow_call_stack_critical_ptr, cpu); |
130 | } |
131 | } |
132 | |
133 | static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu) |
134 | { |
135 | void *s; |
136 | |
137 | s = scs_alloc(cpu_to_node(cpu)); |
138 | if (!s) |
139 | return -ENOMEM; |
140 | per_cpu(*ptr, cpu) = s; |
141 | |
142 | return 0; |
143 | } |
144 | |
145 | static int init_sdei_scs(void) |
146 | { |
147 | int cpu; |
148 | int err = 0; |
149 | |
150 | if (!scs_is_enabled()) |
151 | return 0; |
152 | |
153 | for_each_possible_cpu(cpu) { |
154 | err = _init_sdei_scs(ptr: &sdei_shadow_call_stack_normal_ptr, cpu); |
155 | if (err) |
156 | break; |
157 | err = _init_sdei_scs(ptr: &sdei_shadow_call_stack_critical_ptr, cpu); |
158 | if (err) |
159 | break; |
160 | } |
161 | |
162 | if (err) |
163 | free_sdei_scs(); |
164 | |
165 | return err; |
166 | } |
167 | |
168 | unsigned long sdei_arch_get_entry_point(int conduit) |
169 | { |
170 | /* |
171 | * SDEI works between adjacent exception levels. If we booted at EL1 we |
172 | * assume a hypervisor is marshalling events. If we booted at EL2 and |
173 | * dropped to EL1 because we don't support VHE, then we can't support |
174 | * SDEI. |
175 | */ |
176 | if (is_hyp_nvhe()) { |
177 | pr_err("Not supported on this hardware/boot configuration\n" ); |
178 | goto out_err; |
179 | } |
180 | |
181 | if (init_sdei_stacks()) |
182 | goto out_err; |
183 | |
184 | if (init_sdei_scs()) |
185 | goto out_err_free_stacks; |
186 | |
187 | sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC; |
188 | |
189 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
190 | if (arm64_kernel_unmapped_at_el0()) { |
191 | unsigned long offset; |
192 | |
193 | offset = (unsigned long)__sdei_asm_entry_trampoline - |
194 | (unsigned long)__entry_tramp_text_start; |
195 | return TRAMP_VALIAS + offset; |
196 | } else |
197 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
198 | return (unsigned long)__sdei_asm_handler; |
199 | |
200 | out_err_free_stacks: |
201 | free_sdei_stacks(); |
202 | out_err: |
203 | return 0; |
204 | } |
205 | |
206 | /* |
207 | * do_sdei_event() returns one of: |
208 | * SDEI_EV_HANDLED - success, return to the interrupted context. |
209 | * SDEI_EV_FAILED - failure, return this error code to firmare. |
210 | * virtual-address - success, return to this address. |
211 | */ |
212 | unsigned long __kprobes do_sdei_event(struct pt_regs *regs, |
213 | struct sdei_registered_event *arg) |
214 | { |
215 | u32 mode; |
216 | int i, err = 0; |
217 | int clobbered_registers = 4; |
218 | u64 elr = read_sysreg(elr_el1); |
219 | u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */ |
220 | unsigned long vbar = read_sysreg(vbar_el1); |
221 | |
222 | if (arm64_kernel_unmapped_at_el0()) |
223 | clobbered_registers++; |
224 | |
225 | /* Retrieve the missing registers values */ |
226 | for (i = 0; i < clobbered_registers; i++) { |
227 | /* from within the handler, this call always succeeds */ |
228 | sdei_api_event_context(query: i, result: ®s->regs[i]); |
229 | } |
230 | |
231 | err = sdei_event_handler(regs, arg); |
232 | if (err) |
233 | return SDEI_EV_FAILED; |
234 | |
235 | if (elr != read_sysreg(elr_el1)) { |
236 | /* |
237 | * We took a synchronous exception from the SDEI handler. |
238 | * This could deadlock, and if you interrupt KVM it will |
239 | * hyp-panic instead. |
240 | */ |
241 | pr_warn("unsafe: exception during handler\n" ); |
242 | } |
243 | |
244 | mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK); |
245 | |
246 | /* |
247 | * If we interrupted the kernel with interrupts masked, we always go |
248 | * back to wherever we came from. |
249 | */ |
250 | if (mode == kernel_mode && !interrupts_enabled(regs)) |
251 | return SDEI_EV_HANDLED; |
252 | |
253 | /* |
254 | * Otherwise, we pretend this was an IRQ. This lets user space tasks |
255 | * receive signals before we return to them, and KVM to invoke it's |
256 | * world switch to do the same. |
257 | * |
258 | * See DDI0487B.a Table D1-7 'Vector offsets from vector table base |
259 | * address'. |
260 | */ |
261 | if (mode == kernel_mode) |
262 | return vbar + 0x280; |
263 | else if (mode & PSR_MODE32_BIT) |
264 | return vbar + 0x680; |
265 | |
266 | return vbar + 0x480; |
267 | } |
268 | |