1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Split spinlock implementation out into its own file, so it can be |
4 | * compiled in a FTRACE-compatible way. |
5 | */ |
6 | #include <linux/kernel.h> |
7 | #include <linux/spinlock.h> |
8 | #include <linux/slab.h> |
9 | #include <linux/atomic.h> |
10 | |
11 | #include <asm/paravirt.h> |
12 | #include <asm/qspinlock.h> |
13 | |
14 | #include <xen/events.h> |
15 | |
16 | #include "xen-ops.h" |
17 | |
18 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
19 | static DEFINE_PER_CPU(char *, irq_name); |
20 | static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); |
21 | static bool xen_pvspin = true; |
22 | |
23 | static void xen_qlock_kick(int cpu) |
24 | { |
25 | int irq = per_cpu(lock_kicker_irq, cpu); |
26 | |
27 | /* Don't kick if the target's kicker interrupt is not initialized. */ |
28 | if (irq == -1) |
29 | return; |
30 | |
31 | xen_send_IPI_one(cpu, vector: XEN_SPIN_UNLOCK_VECTOR); |
32 | } |
33 | |
34 | /* |
35 | * Halt the current CPU & release it back to the host |
36 | */ |
37 | static void xen_qlock_wait(u8 *byte, u8 val) |
38 | { |
39 | int irq = __this_cpu_read(lock_kicker_irq); |
40 | atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); |
41 | |
42 | /* If kicker interrupts not initialized yet, just spin */ |
43 | if (irq == -1 || in_nmi()) |
44 | return; |
45 | |
46 | /* Detect reentry. */ |
47 | atomic_inc(v: nest_cnt); |
48 | |
49 | /* If irq pending already and no nested call clear it. */ |
50 | if (atomic_read(v: nest_cnt) == 1 && xen_test_irq_pending(irq)) { |
51 | xen_clear_irq_pending(irq); |
52 | } else if (READ_ONCE(*byte) == val) { |
53 | /* Block until irq becomes pending (or a spurious wakeup) */ |
54 | xen_poll_irq(irq); |
55 | } |
56 | |
57 | atomic_dec(v: nest_cnt); |
58 | } |
59 | |
60 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
61 | { |
62 | BUG(); |
63 | return IRQ_HANDLED; |
64 | } |
65 | |
66 | void xen_init_lock_cpu(int cpu) |
67 | { |
68 | int irq; |
69 | char *name; |
70 | |
71 | if (!xen_pvspin) |
72 | return; |
73 | |
74 | WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n" , |
75 | cpu, per_cpu(lock_kicker_irq, cpu)); |
76 | |
77 | name = kasprintf(GFP_KERNEL, fmt: "spinlock%d" , cpu); |
78 | per_cpu(irq_name, cpu) = name; |
79 | irq = bind_ipi_to_irqhandler(ipi: XEN_SPIN_UNLOCK_VECTOR, |
80 | cpu, |
81 | handler: dummy_handler, |
82 | IRQF_PERCPU|IRQF_NOBALANCING, |
83 | devname: name, |
84 | NULL); |
85 | |
86 | if (irq >= 0) { |
87 | disable_irq(irq); /* make sure it's never delivered */ |
88 | per_cpu(lock_kicker_irq, cpu) = irq; |
89 | } |
90 | |
91 | printk("cpu %d spinlock event irq %d\n" , cpu, irq); |
92 | } |
93 | |
94 | void xen_uninit_lock_cpu(int cpu) |
95 | { |
96 | int irq; |
97 | |
98 | if (!xen_pvspin) |
99 | return; |
100 | |
101 | kfree(per_cpu(irq_name, cpu)); |
102 | per_cpu(irq_name, cpu) = NULL; |
103 | /* |
104 | * When booting the kernel with 'mitigations=auto,nosmt', the secondary |
105 | * CPUs are not activated, and lock_kicker_irq is not initialized. |
106 | */ |
107 | irq = per_cpu(lock_kicker_irq, cpu); |
108 | if (irq == -1) |
109 | return; |
110 | |
111 | unbind_from_irqhandler(irq, NULL); |
112 | per_cpu(lock_kicker_irq, cpu) = -1; |
113 | } |
114 | |
115 | PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); |
116 | |
117 | /* |
118 | * Our init of PV spinlocks is split in two init functions due to us |
119 | * using paravirt patching and jump labels patching and having to do |
120 | * all of this before SMP code is invoked. |
121 | * |
122 | * The paravirt patching needs to be done _before_ the alternative asm code |
123 | * is started, otherwise we would not patch the core kernel code. |
124 | */ |
125 | void __init xen_init_spinlocks(void) |
126 | { |
127 | /* Don't need to use pvqspinlock code if there is only 1 vCPU. */ |
128 | if (num_possible_cpus() == 1 || nopvspin) |
129 | xen_pvspin = false; |
130 | |
131 | if (!xen_pvspin) { |
132 | printk(KERN_DEBUG "xen: PV spinlocks disabled\n" ); |
133 | static_branch_disable(&virt_spin_lock_key); |
134 | return; |
135 | } |
136 | printk(KERN_DEBUG "xen: PV spinlocks enabled\n" ); |
137 | |
138 | __pv_init_lock_hash(); |
139 | pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; |
140 | pv_ops.lock.queued_spin_unlock = |
141 | PV_CALLEE_SAVE(__pv_queued_spin_unlock); |
142 | pv_ops.lock.wait = xen_qlock_wait; |
143 | pv_ops.lock.kick = xen_qlock_kick; |
144 | pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); |
145 | } |
146 | |
147 | static __init int xen_parse_nopvspin(char *arg) |
148 | { |
149 | pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n" ); |
150 | xen_pvspin = false; |
151 | return 0; |
152 | } |
153 | early_param("xen_nopvspin" , xen_parse_nopvspin); |
154 | |
155 | |