1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2017 Arm Ltd.
3#define pr_fmt(fmt) "sdei: " fmt
4
5#include <acpi/ghes.h>
6#include <linux/acpi.h>
7#include <linux/arm_sdei.h>
8#include <linux/arm-smccc.h>
9#include <linux/atomic.h>
10#include <linux/bitops.h>
11#include <linux/compiler.h>
12#include <linux/cpuhotplug.h>
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/errno.h>
16#include <linux/hardirq.h>
17#include <linux/kernel.h>
18#include <linux/kprobes.h>
19#include <linux/kvm_host.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/notifier.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/percpu.h>
26#include <linux/platform_device.h>
27#include <linux/pm.h>
28#include <linux/ptrace.h>
29#include <linux/preempt.h>
30#include <linux/reboot.h>
31#include <linux/slab.h>
32#include <linux/smp.h>
33#include <linux/spinlock.h>
34
35/*
36 * The call to use to reach the firmware.
37 */
38static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
39 unsigned long arg0, unsigned long arg1,
40 unsigned long arg2, unsigned long arg3,
41 unsigned long arg4, struct arm_smccc_res *res);
42
43/* entry point from firmware to arch asm code */
44static unsigned long sdei_entry_point;
45
46static int sdei_hp_state;
47
48struct sdei_event {
49 /* These three are protected by the sdei_list_lock */
50 struct list_head list;
51 bool reregister;
52 bool reenable;
53
54 u32 event_num;
55 u8 type;
56 u8 priority;
57
58 /* This pointer is handed to firmware as the event argument. */
59 union {
60 /* Shared events */
61 struct sdei_registered_event *registered;
62
63 /* CPU private events */
64 struct sdei_registered_event __percpu *private_registered;
65 };
66};
67
68/* Take the mutex for any API call or modification. Take the mutex first. */
69static DEFINE_MUTEX(sdei_events_lock);
70
71/* and then hold this when modifying the list */
72static DEFINE_SPINLOCK(sdei_list_lock);
73static LIST_HEAD(sdei_list);
74
75/* Private events are registered/enabled via IPI passing one of these */
76struct sdei_crosscall_args {
77 struct sdei_event *event;
78 atomic_t errors;
79 int first_error;
80};
81
82#define CROSSCALL_INIT(arg, event) \
83 do { \
84 arg.event = event; \
85 arg.first_error = 0; \
86 atomic_set(&arg.errors, 0); \
87 } while (0)
88
89static inline int sdei_do_local_call(smp_call_func_t fn,
90 struct sdei_event *event)
91{
92 struct sdei_crosscall_args arg;
93
94 CROSSCALL_INIT(arg, event);
95 fn(&arg);
96
97 return arg.first_error;
98}
99
100static inline int sdei_do_cross_call(smp_call_func_t fn,
101 struct sdei_event *event)
102{
103 struct sdei_crosscall_args arg;
104
105 CROSSCALL_INIT(arg, event);
106 on_each_cpu(func: fn, info: &arg, wait: true);
107
108 return arg.first_error;
109}
110
111static inline void
112sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
113{
114 if (err && (atomic_inc_return(v: &arg->errors) == 1))
115 arg->first_error = err;
116}
117
118static int sdei_to_linux_errno(unsigned long sdei_err)
119{
120 switch (sdei_err) {
121 case SDEI_NOT_SUPPORTED:
122 return -EOPNOTSUPP;
123 case SDEI_INVALID_PARAMETERS:
124 return -EINVAL;
125 case SDEI_DENIED:
126 return -EPERM;
127 case SDEI_PENDING:
128 return -EINPROGRESS;
129 case SDEI_OUT_OF_RESOURCE:
130 return -ENOMEM;
131 }
132
133 return 0;
134}
135
136static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
137 unsigned long arg1, unsigned long arg2,
138 unsigned long arg3, unsigned long arg4,
139 u64 *result)
140{
141 int err;
142 struct arm_smccc_res res;
143
144 if (sdei_firmware_call) {
145 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
146 &res);
147 err = sdei_to_linux_errno(sdei_err: res.a0);
148 } else {
149 /*
150 * !sdei_firmware_call means we failed to probe or called
151 * sdei_mark_interface_broken(). -EIO is not an error returned
152 * by sdei_to_linux_errno() and is used to suppress messages
153 * from this driver.
154 */
155 err = -EIO;
156 res.a0 = SDEI_NOT_SUPPORTED;
157 }
158
159 if (result)
160 *result = res.a0;
161
162 return err;
163}
164NOKPROBE_SYMBOL(invoke_sdei_fn);
165
166static struct sdei_event *sdei_event_find(u32 event_num)
167{
168 struct sdei_event *e, *found = NULL;
169
170 lockdep_assert_held(&sdei_events_lock);
171
172 spin_lock(lock: &sdei_list_lock);
173 list_for_each_entry(e, &sdei_list, list) {
174 if (e->event_num == event_num) {
175 found = e;
176 break;
177 }
178 }
179 spin_unlock(lock: &sdei_list_lock);
180
181 return found;
182}
183
184int sdei_api_event_context(u32 query, u64 *result)
185{
186 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, arg0: query, arg1: 0, arg2: 0, arg3: 0, arg4: 0,
187 result);
188}
189NOKPROBE_SYMBOL(sdei_api_event_context);
190
191static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
192{
193 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, arg0: event, arg1: info, arg2: 0,
194 arg3: 0, arg4: 0, result);
195}
196
197static struct sdei_event *sdei_event_create(u32 event_num,
198 sdei_event_callback *cb,
199 void *cb_arg)
200{
201 int err;
202 u64 result;
203 struct sdei_event *event;
204 struct sdei_registered_event *reg;
205
206 lockdep_assert_held(&sdei_events_lock);
207
208 event = kzalloc(size: sizeof(*event), GFP_KERNEL);
209 if (!event) {
210 err = -ENOMEM;
211 goto fail;
212 }
213
214 INIT_LIST_HEAD(list: &event->list);
215 event->event_num = event_num;
216
217 err = sdei_api_event_get_info(event: event_num, SDEI_EVENT_INFO_EV_PRIORITY,
218 result: &result);
219 if (err)
220 goto fail;
221 event->priority = result;
222
223 err = sdei_api_event_get_info(event: event_num, SDEI_EVENT_INFO_EV_TYPE,
224 result: &result);
225 if (err)
226 goto fail;
227 event->type = result;
228
229 if (event->type == SDEI_EVENT_TYPE_SHARED) {
230 reg = kzalloc(size: sizeof(*reg), GFP_KERNEL);
231 if (!reg) {
232 err = -ENOMEM;
233 goto fail;
234 }
235
236 reg->event_num = event->event_num;
237 reg->priority = event->priority;
238
239 reg->callback = cb;
240 reg->callback_arg = cb_arg;
241 event->registered = reg;
242 } else {
243 int cpu;
244 struct sdei_registered_event __percpu *regs;
245
246 regs = alloc_percpu(struct sdei_registered_event);
247 if (!regs) {
248 err = -ENOMEM;
249 goto fail;
250 }
251
252 for_each_possible_cpu(cpu) {
253 reg = per_cpu_ptr(regs, cpu);
254
255 reg->event_num = event->event_num;
256 reg->priority = event->priority;
257 reg->callback = cb;
258 reg->callback_arg = cb_arg;
259 }
260
261 event->private_registered = regs;
262 }
263
264 spin_lock(lock: &sdei_list_lock);
265 list_add(new: &event->list, head: &sdei_list);
266 spin_unlock(lock: &sdei_list_lock);
267
268 return event;
269
270fail:
271 kfree(objp: event);
272 return ERR_PTR(error: err);
273}
274
275static void sdei_event_destroy_llocked(struct sdei_event *event)
276{
277 lockdep_assert_held(&sdei_events_lock);
278 lockdep_assert_held(&sdei_list_lock);
279
280 list_del(entry: &event->list);
281
282 if (event->type == SDEI_EVENT_TYPE_SHARED)
283 kfree(objp: event->registered);
284 else
285 free_percpu(pdata: event->private_registered);
286
287 kfree(objp: event);
288}
289
290static void sdei_event_destroy(struct sdei_event *event)
291{
292 spin_lock(lock: &sdei_list_lock);
293 sdei_event_destroy_llocked(event);
294 spin_unlock(lock: &sdei_list_lock);
295}
296
297static int sdei_api_get_version(u64 *version)
298{
299 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, arg0: 0, arg1: 0, arg2: 0, arg3: 0, arg4: 0, result: version);
300}
301
302int sdei_mask_local_cpu(void)
303{
304 int err;
305
306 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, arg0: 0, arg1: 0, arg2: 0, arg3: 0, arg4: 0, NULL);
307 if (err && err != -EIO) {
308 pr_warn_once("failed to mask CPU[%u]: %d\n",
309 smp_processor_id(), err);
310 return err;
311 }
312
313 return 0;
314}
315
316static void _ipi_mask_cpu(void *ignored)
317{
318 WARN_ON_ONCE(preemptible());
319 sdei_mask_local_cpu();
320}
321
322int sdei_unmask_local_cpu(void)
323{
324 int err;
325
326 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, arg0: 0, arg1: 0, arg2: 0, arg3: 0, arg4: 0, NULL);
327 if (err && err != -EIO) {
328 pr_warn_once("failed to unmask CPU[%u]: %d\n",
329 smp_processor_id(), err);
330 return err;
331 }
332
333 return 0;
334}
335
336static void _ipi_unmask_cpu(void *ignored)
337{
338 WARN_ON_ONCE(preemptible());
339 sdei_unmask_local_cpu();
340}
341
342static void _ipi_private_reset(void *ignored)
343{
344 int err;
345
346 WARN_ON_ONCE(preemptible());
347
348 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, arg0: 0, arg1: 0, arg2: 0, arg3: 0, arg4: 0,
349 NULL);
350 if (err && err != -EIO)
351 pr_warn_once("failed to reset CPU[%u]: %d\n",
352 smp_processor_id(), err);
353}
354
355static int sdei_api_shared_reset(void)
356{
357 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, arg0: 0, arg1: 0, arg2: 0, arg3: 0, arg4: 0,
358 NULL);
359}
360
361static void sdei_mark_interface_broken(void)
362{
363 pr_err("disabling SDEI firmware interface\n");
364 on_each_cpu(func: &_ipi_mask_cpu, NULL, wait: true);
365 sdei_firmware_call = NULL;
366}
367
368static int sdei_platform_reset(void)
369{
370 int err;
371
372 on_each_cpu(func: &_ipi_private_reset, NULL, wait: true);
373 err = sdei_api_shared_reset();
374 if (err) {
375 pr_err("Failed to reset platform: %d\n", err);
376 sdei_mark_interface_broken();
377 }
378
379 return err;
380}
381
382static int sdei_api_event_enable(u32 event_num)
383{
384 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, arg0: event_num, arg1: 0, arg2: 0, arg3: 0,
385 arg4: 0, NULL);
386}
387
388/* Called directly by the hotplug callbacks */
389static void _local_event_enable(void *data)
390{
391 int err;
392 struct sdei_crosscall_args *arg = data;
393
394 err = sdei_api_event_enable(event_num: arg->event->event_num);
395
396 sdei_cross_call_return(arg, err);
397}
398
399int sdei_event_enable(u32 event_num)
400{
401 int err = -EINVAL;
402 struct sdei_event *event;
403
404 mutex_lock(&sdei_events_lock);
405 event = sdei_event_find(event_num);
406 if (!event) {
407 mutex_unlock(lock: &sdei_events_lock);
408 return -ENOENT;
409 }
410
411
412 cpus_read_lock();
413 if (event->type == SDEI_EVENT_TYPE_SHARED)
414 err = sdei_api_event_enable(event_num: event->event_num);
415 else
416 err = sdei_do_cross_call(fn: _local_event_enable, event);
417
418 if (!err) {
419 spin_lock(lock: &sdei_list_lock);
420 event->reenable = true;
421 spin_unlock(lock: &sdei_list_lock);
422 }
423 cpus_read_unlock();
424 mutex_unlock(lock: &sdei_events_lock);
425
426 return err;
427}
428
429static int sdei_api_event_disable(u32 event_num)
430{
431 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, arg0: event_num, arg1: 0, arg2: 0,
432 arg3: 0, arg4: 0, NULL);
433}
434
435static void _ipi_event_disable(void *data)
436{
437 int err;
438 struct sdei_crosscall_args *arg = data;
439
440 err = sdei_api_event_disable(event_num: arg->event->event_num);
441
442 sdei_cross_call_return(arg, err);
443}
444
445int sdei_event_disable(u32 event_num)
446{
447 int err = -EINVAL;
448 struct sdei_event *event;
449
450 mutex_lock(&sdei_events_lock);
451 event = sdei_event_find(event_num);
452 if (!event) {
453 mutex_unlock(lock: &sdei_events_lock);
454 return -ENOENT;
455 }
456
457 spin_lock(lock: &sdei_list_lock);
458 event->reenable = false;
459 spin_unlock(lock: &sdei_list_lock);
460
461 if (event->type == SDEI_EVENT_TYPE_SHARED)
462 err = sdei_api_event_disable(event_num: event->event_num);
463 else
464 err = sdei_do_cross_call(fn: _ipi_event_disable, event);
465 mutex_unlock(lock: &sdei_events_lock);
466
467 return err;
468}
469
470static int sdei_api_event_unregister(u32 event_num)
471{
472 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, arg0: event_num, arg1: 0,
473 arg2: 0, arg3: 0, arg4: 0, NULL);
474}
475
476/* Called directly by the hotplug callbacks */
477static void _local_event_unregister(void *data)
478{
479 int err;
480 struct sdei_crosscall_args *arg = data;
481
482 err = sdei_api_event_unregister(event_num: arg->event->event_num);
483
484 sdei_cross_call_return(arg, err);
485}
486
487int sdei_event_unregister(u32 event_num)
488{
489 int err;
490 struct sdei_event *event;
491
492 WARN_ON(in_nmi());
493
494 mutex_lock(&sdei_events_lock);
495 event = sdei_event_find(event_num);
496 if (!event) {
497 pr_warn("Event %u not registered\n", event_num);
498 err = -ENOENT;
499 goto unlock;
500 }
501
502 spin_lock(lock: &sdei_list_lock);
503 event->reregister = false;
504 event->reenable = false;
505 spin_unlock(lock: &sdei_list_lock);
506
507 if (event->type == SDEI_EVENT_TYPE_SHARED)
508 err = sdei_api_event_unregister(event_num: event->event_num);
509 else
510 err = sdei_do_cross_call(fn: _local_event_unregister, event);
511
512 if (err)
513 goto unlock;
514
515 sdei_event_destroy(event);
516unlock:
517 mutex_unlock(lock: &sdei_events_lock);
518
519 return err;
520}
521
522/*
523 * unregister events, but don't destroy them as they are re-registered by
524 * sdei_reregister_shared().
525 */
526static int sdei_unregister_shared(void)
527{
528 int err = 0;
529 struct sdei_event *event;
530
531 mutex_lock(&sdei_events_lock);
532 spin_lock(lock: &sdei_list_lock);
533 list_for_each_entry(event, &sdei_list, list) {
534 if (event->type != SDEI_EVENT_TYPE_SHARED)
535 continue;
536
537 err = sdei_api_event_unregister(event_num: event->event_num);
538 if (err)
539 break;
540 }
541 spin_unlock(lock: &sdei_list_lock);
542 mutex_unlock(lock: &sdei_events_lock);
543
544 return err;
545}
546
547static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
548 void *arg, u64 flags, u64 affinity)
549{
550 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, arg0: event_num,
551 arg1: (unsigned long)entry_point, arg2: (unsigned long)arg,
552 arg3: flags, arg4: affinity, NULL);
553}
554
555/* Called directly by the hotplug callbacks */
556static void _local_event_register(void *data)
557{
558 int err;
559 struct sdei_registered_event *reg;
560 struct sdei_crosscall_args *arg = data;
561
562 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
563 err = sdei_api_event_register(event_num: arg->event->event_num, entry_point: sdei_entry_point,
564 arg: reg, flags: 0, affinity: 0);
565
566 sdei_cross_call_return(arg, err);
567}
568
569int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
570{
571 int err;
572 struct sdei_event *event;
573
574 WARN_ON(in_nmi());
575
576 mutex_lock(&sdei_events_lock);
577 if (sdei_event_find(event_num)) {
578 pr_warn("Event %u already registered\n", event_num);
579 err = -EBUSY;
580 goto unlock;
581 }
582
583 event = sdei_event_create(event_num, cb, cb_arg: arg);
584 if (IS_ERR(ptr: event)) {
585 err = PTR_ERR(ptr: event);
586 pr_warn("Failed to create event %u: %d\n", event_num, err);
587 goto unlock;
588 }
589
590 cpus_read_lock();
591 if (event->type == SDEI_EVENT_TYPE_SHARED) {
592 err = sdei_api_event_register(event_num: event->event_num,
593 entry_point: sdei_entry_point,
594 arg: event->registered,
595 SDEI_EVENT_REGISTER_RM_ANY, affinity: 0);
596 } else {
597 err = sdei_do_cross_call(fn: _local_event_register, event);
598 if (err)
599 sdei_do_cross_call(fn: _local_event_unregister, event);
600 }
601
602 if (err) {
603 sdei_event_destroy(event);
604 pr_warn("Failed to register event %u: %d\n", event_num, err);
605 goto cpu_unlock;
606 }
607
608 spin_lock(lock: &sdei_list_lock);
609 event->reregister = true;
610 spin_unlock(lock: &sdei_list_lock);
611cpu_unlock:
612 cpus_read_unlock();
613unlock:
614 mutex_unlock(lock: &sdei_events_lock);
615 return err;
616}
617
618static int sdei_reregister_shared(void)
619{
620 int err = 0;
621 struct sdei_event *event;
622
623 mutex_lock(&sdei_events_lock);
624 spin_lock(lock: &sdei_list_lock);
625 list_for_each_entry(event, &sdei_list, list) {
626 if (event->type != SDEI_EVENT_TYPE_SHARED)
627 continue;
628
629 if (event->reregister) {
630 err = sdei_api_event_register(event_num: event->event_num,
631 entry_point: sdei_entry_point, arg: event->registered,
632 SDEI_EVENT_REGISTER_RM_ANY, affinity: 0);
633 if (err) {
634 pr_err("Failed to re-register event %u\n",
635 event->event_num);
636 sdei_event_destroy_llocked(event);
637 break;
638 }
639 }
640
641 if (event->reenable) {
642 err = sdei_api_event_enable(event_num: event->event_num);
643 if (err) {
644 pr_err("Failed to re-enable event %u\n",
645 event->event_num);
646 break;
647 }
648 }
649 }
650 spin_unlock(lock: &sdei_list_lock);
651 mutex_unlock(lock: &sdei_events_lock);
652
653 return err;
654}
655
656static int sdei_cpuhp_down(unsigned int cpu)
657{
658 struct sdei_event *event;
659 int err;
660
661 /* un-register private events */
662 spin_lock(lock: &sdei_list_lock);
663 list_for_each_entry(event, &sdei_list, list) {
664 if (event->type == SDEI_EVENT_TYPE_SHARED)
665 continue;
666
667 err = sdei_do_local_call(fn: _local_event_unregister, event);
668 if (err) {
669 pr_err("Failed to unregister event %u: %d\n",
670 event->event_num, err);
671 }
672 }
673 spin_unlock(lock: &sdei_list_lock);
674
675 return sdei_mask_local_cpu();
676}
677
678static int sdei_cpuhp_up(unsigned int cpu)
679{
680 struct sdei_event *event;
681 int err;
682
683 /* re-register/enable private events */
684 spin_lock(lock: &sdei_list_lock);
685 list_for_each_entry(event, &sdei_list, list) {
686 if (event->type == SDEI_EVENT_TYPE_SHARED)
687 continue;
688
689 if (event->reregister) {
690 err = sdei_do_local_call(fn: _local_event_register, event);
691 if (err) {
692 pr_err("Failed to re-register event %u: %d\n",
693 event->event_num, err);
694 }
695 }
696
697 if (event->reenable) {
698 err = sdei_do_local_call(fn: _local_event_enable, event);
699 if (err) {
700 pr_err("Failed to re-enable event %u: %d\n",
701 event->event_num, err);
702 }
703 }
704 }
705 spin_unlock(lock: &sdei_list_lock);
706
707 return sdei_unmask_local_cpu();
708}
709
710/* When entering idle, mask/unmask events for this cpu */
711static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
712 void *data)
713{
714 int rv;
715
716 WARN_ON_ONCE(preemptible());
717
718 switch (action) {
719 case CPU_PM_ENTER:
720 rv = sdei_mask_local_cpu();
721 break;
722 case CPU_PM_EXIT:
723 case CPU_PM_ENTER_FAILED:
724 rv = sdei_unmask_local_cpu();
725 break;
726 default:
727 return NOTIFY_DONE;
728 }
729
730 if (rv)
731 return notifier_from_errno(err: rv);
732
733 return NOTIFY_OK;
734}
735
736static struct notifier_block sdei_pm_nb = {
737 .notifier_call = sdei_pm_notifier,
738};
739
740static int sdei_device_suspend(struct device *dev)
741{
742 on_each_cpu(func: _ipi_mask_cpu, NULL, wait: true);
743
744 return 0;
745}
746
747static int sdei_device_resume(struct device *dev)
748{
749 on_each_cpu(func: _ipi_unmask_cpu, NULL, wait: true);
750
751 return 0;
752}
753
754/*
755 * We need all events to be reregistered when we resume from hibernate.
756 *
757 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
758 * events during freeze, then re-register and re-enable them during thaw
759 * and restore.
760 */
761static int sdei_device_freeze(struct device *dev)
762{
763 int err;
764
765 /* unregister private events */
766 cpuhp_remove_state(state: sdei_entry_point);
767
768 err = sdei_unregister_shared();
769 if (err)
770 return err;
771
772 return 0;
773}
774
775static int sdei_device_thaw(struct device *dev)
776{
777 int err;
778
779 /* re-register shared events */
780 err = sdei_reregister_shared();
781 if (err) {
782 pr_warn("Failed to re-register shared events...\n");
783 sdei_mark_interface_broken();
784 return err;
785 }
786
787 err = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "SDEI",
788 startup: &sdei_cpuhp_up, teardown: &sdei_cpuhp_down);
789 if (err < 0) {
790 pr_warn("Failed to re-register CPU hotplug notifier...\n");
791 return err;
792 }
793
794 sdei_hp_state = err;
795 return 0;
796}
797
798static int sdei_device_restore(struct device *dev)
799{
800 int err;
801
802 err = sdei_platform_reset();
803 if (err)
804 return err;
805
806 return sdei_device_thaw(dev);
807}
808
809static const struct dev_pm_ops sdei_pm_ops = {
810 .suspend = sdei_device_suspend,
811 .resume = sdei_device_resume,
812 .freeze = sdei_device_freeze,
813 .thaw = sdei_device_thaw,
814 .restore = sdei_device_restore,
815};
816
817/*
818 * Mask all CPUs and unregister all events on panic, reboot or kexec.
819 */
820static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
821 void *data)
822{
823 /*
824 * We are going to reset the interface, after this there is no point
825 * doing work when we take CPUs offline.
826 */
827 cpuhp_remove_state(state: sdei_hp_state);
828
829 sdei_platform_reset();
830
831 return NOTIFY_OK;
832}
833
834static struct notifier_block sdei_reboot_nb = {
835 .notifier_call = sdei_reboot_notifier,
836};
837
838static void sdei_smccc_smc(unsigned long function_id,
839 unsigned long arg0, unsigned long arg1,
840 unsigned long arg2, unsigned long arg3,
841 unsigned long arg4, struct arm_smccc_res *res)
842{
843 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
844}
845NOKPROBE_SYMBOL(sdei_smccc_smc);
846
847static void sdei_smccc_hvc(unsigned long function_id,
848 unsigned long arg0, unsigned long arg1,
849 unsigned long arg2, unsigned long arg3,
850 unsigned long arg4, struct arm_smccc_res *res)
851{
852 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
853}
854NOKPROBE_SYMBOL(sdei_smccc_hvc);
855
856int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
857 sdei_event_callback *critical_cb)
858{
859 int err;
860 u64 result;
861 u32 event_num;
862 sdei_event_callback *cb;
863
864 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
865 return -EOPNOTSUPP;
866
867 event_num = ghes->generic->notify.vector;
868 if (event_num == 0) {
869 /*
870 * Event 0 is reserved by the specification for
871 * SDEI_EVENT_SIGNAL.
872 */
873 return -EINVAL;
874 }
875
876 err = sdei_api_event_get_info(event: event_num, SDEI_EVENT_INFO_EV_PRIORITY,
877 result: &result);
878 if (err)
879 return err;
880
881 if (result == SDEI_EVENT_PRIORITY_CRITICAL)
882 cb = critical_cb;
883 else
884 cb = normal_cb;
885
886 err = sdei_event_register(event_num, cb, arg: ghes);
887 if (!err)
888 err = sdei_event_enable(event_num);
889
890 return err;
891}
892
893int sdei_unregister_ghes(struct ghes *ghes)
894{
895 int i;
896 int err;
897 u32 event_num = ghes->generic->notify.vector;
898
899 might_sleep();
900
901 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
902 return -EOPNOTSUPP;
903
904 /*
905 * The event may be running on another CPU. Disable it
906 * to stop new events, then try to unregister a few times.
907 */
908 err = sdei_event_disable(event_num);
909 if (err)
910 return err;
911
912 for (i = 0; i < 3; i++) {
913 err = sdei_event_unregister(event_num);
914 if (err != -EINPROGRESS)
915 break;
916
917 schedule();
918 }
919
920 return err;
921}
922
923static int sdei_get_conduit(struct platform_device *pdev)
924{
925 const char *method;
926 struct device_node *np = pdev->dev.of_node;
927
928 sdei_firmware_call = NULL;
929 if (np) {
930 if (of_property_read_string(np, propname: "method", out_string: &method)) {
931 pr_warn("missing \"method\" property\n");
932 return SMCCC_CONDUIT_NONE;
933 }
934
935 if (!strcmp("hvc", method)) {
936 sdei_firmware_call = &sdei_smccc_hvc;
937 return SMCCC_CONDUIT_HVC;
938 } else if (!strcmp("smc", method)) {
939 sdei_firmware_call = &sdei_smccc_smc;
940 return SMCCC_CONDUIT_SMC;
941 }
942
943 pr_warn("invalid \"method\" property: %s\n", method);
944 } else if (!acpi_disabled) {
945 if (acpi_psci_use_hvc()) {
946 sdei_firmware_call = &sdei_smccc_hvc;
947 return SMCCC_CONDUIT_HVC;
948 } else {
949 sdei_firmware_call = &sdei_smccc_smc;
950 return SMCCC_CONDUIT_SMC;
951 }
952 }
953
954 return SMCCC_CONDUIT_NONE;
955}
956
957static int sdei_probe(struct platform_device *pdev)
958{
959 int err;
960 u64 ver = 0;
961 int conduit;
962
963 conduit = sdei_get_conduit(pdev);
964 if (!sdei_firmware_call)
965 return 0;
966
967 err = sdei_api_get_version(version: &ver);
968 if (err) {
969 pr_err("Failed to get SDEI version: %d\n", err);
970 sdei_mark_interface_broken();
971 return err;
972 }
973
974 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
975 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
976 (int)SDEI_VERSION_VENDOR(ver));
977
978 if (SDEI_VERSION_MAJOR(ver) != 1) {
979 pr_warn("Conflicting SDEI version detected.\n");
980 sdei_mark_interface_broken();
981 return -EINVAL;
982 }
983
984 err = sdei_platform_reset();
985 if (err)
986 return err;
987
988 sdei_entry_point = sdei_arch_get_entry_point(conduit);
989 if (!sdei_entry_point) {
990 /* Not supported due to hardware or boot configuration */
991 sdei_mark_interface_broken();
992 return 0;
993 }
994
995 err = cpu_pm_register_notifier(nb: &sdei_pm_nb);
996 if (err) {
997 pr_warn("Failed to register CPU PM notifier...\n");
998 goto error;
999 }
1000
1001 err = register_reboot_notifier(&sdei_reboot_nb);
1002 if (err) {
1003 pr_warn("Failed to register reboot notifier...\n");
1004 goto remove_cpupm;
1005 }
1006
1007 err = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "SDEI",
1008 startup: &sdei_cpuhp_up, teardown: &sdei_cpuhp_down);
1009 if (err < 0) {
1010 pr_warn("Failed to register CPU hotplug notifier...\n");
1011 goto remove_reboot;
1012 }
1013
1014 sdei_hp_state = err;
1015
1016 return 0;
1017
1018remove_reboot:
1019 unregister_reboot_notifier(&sdei_reboot_nb);
1020
1021remove_cpupm:
1022 cpu_pm_unregister_notifier(nb: &sdei_pm_nb);
1023
1024error:
1025 sdei_mark_interface_broken();
1026 return err;
1027}
1028
1029static const struct of_device_id sdei_of_match[] = {
1030 { .compatible = "arm,sdei-1.0" },
1031 {}
1032};
1033
1034static struct platform_driver sdei_driver = {
1035 .driver = {
1036 .name = "sdei",
1037 .pm = &sdei_pm_ops,
1038 .of_match_table = sdei_of_match,
1039 },
1040 .probe = sdei_probe,
1041};
1042
1043static bool __init sdei_present_acpi(void)
1044{
1045 acpi_status status;
1046 struct acpi_table_header *sdei_table_header;
1047
1048 if (acpi_disabled)
1049 return false;
1050
1051 status = acpi_get_table(ACPI_SIG_SDEI, instance: 0, out_table: &sdei_table_header);
1052 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1053 const char *msg = acpi_format_exception(exception: status);
1054
1055 pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1056 }
1057 if (ACPI_FAILURE(status))
1058 return false;
1059
1060 acpi_put_table(table: sdei_table_header);
1061
1062 return true;
1063}
1064
1065void __init sdei_init(void)
1066{
1067 struct platform_device *pdev;
1068 int ret;
1069
1070 ret = platform_driver_register(&sdei_driver);
1071 if (ret || !sdei_present_acpi())
1072 return;
1073
1074 pdev = platform_device_register_simple(name: sdei_driver.driver.name,
1075 id: 0, NULL, num: 0);
1076 if (IS_ERR(ptr: pdev)) {
1077 ret = PTR_ERR(ptr: pdev);
1078 platform_driver_unregister(&sdei_driver);
1079 pr_info("Failed to register ACPI:SDEI platform device %d\n",
1080 ret);
1081 }
1082}
1083
1084int sdei_event_handler(struct pt_regs *regs,
1085 struct sdei_registered_event *arg)
1086{
1087 int err;
1088 u32 event_num = arg->event_num;
1089
1090 err = arg->callback(event_num, regs, arg->callback_arg);
1091 if (err)
1092 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1093 event_num, smp_processor_id(), err);
1094
1095 return err;
1096}
1097NOKPROBE_SYMBOL(sdei_event_handler);
1098
1099void sdei_handler_abort(void)
1100{
1101 /*
1102 * If the crash happened in an SDEI event handler then we need to
1103 * finish the handler with the firmware so that we can have working
1104 * interrupts in the crash kernel.
1105 */
1106 if (__this_cpu_read(sdei_active_critical_event)) {
1107 pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
1108 __sdei_handler_abort();
1109 __this_cpu_write(sdei_active_critical_event, NULL);
1110 }
1111 if (__this_cpu_read(sdei_active_normal_event)) {
1112 pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
1113 __sdei_handler_abort();
1114 __this_cpu_write(sdei_active_normal_event, NULL);
1115 }
1116}
1117

source code of linux/drivers/firmware/arm_sdei.c