1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers |
4 | * |
5 | * Copyright 2018 Arm Limited |
6 | * Author: Dave Martin <Dave.Martin@arm.com> |
7 | */ |
8 | #include <linux/irqflags.h> |
9 | #include <linux/sched.h> |
10 | #include <linux/kvm_host.h> |
11 | #include <asm/fpsimd.h> |
12 | #include <asm/kvm_asm.h> |
13 | #include <asm/kvm_hyp.h> |
14 | #include <asm/kvm_mmu.h> |
15 | #include <asm/sysreg.h> |
16 | |
17 | void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu) |
18 | { |
19 | struct task_struct *p = vcpu->arch.parent_task; |
20 | struct user_fpsimd_state *fpsimd; |
21 | |
22 | if (!is_protected_kvm_enabled() || !p) |
23 | return; |
24 | |
25 | fpsimd = &p->thread.uw.fpsimd_state; |
26 | kvm_unshare_hyp(fpsimd, fpsimd + 1); |
27 | put_task_struct(t: p); |
28 | } |
29 | |
30 | /* |
31 | * Called on entry to KVM_RUN unless this vcpu previously ran at least |
32 | * once and the most recent prior KVM_RUN for this vcpu was called from |
33 | * the same task as current (highly likely). |
34 | * |
35 | * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), |
36 | * such that on entering hyp the relevant parts of current are already |
37 | * mapped. |
38 | */ |
39 | int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) |
40 | { |
41 | int ret; |
42 | |
43 | struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; |
44 | |
45 | kvm_vcpu_unshare_task_fp(vcpu); |
46 | |
47 | /* Make sure the host task fpsimd state is visible to hyp: */ |
48 | ret = kvm_share_hyp(fpsimd, fpsimd + 1); |
49 | if (ret) |
50 | return ret; |
51 | |
52 | vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); |
53 | |
54 | /* |
55 | * We need to keep current's task_struct pinned until its data has been |
56 | * unshared with the hypervisor to make sure it is not re-used by the |
57 | * kernel and donated to someone else while already shared -- see |
58 | * kvm_vcpu_unshare_task_fp() for the matching put_task_struct(). |
59 | */ |
60 | if (is_protected_kvm_enabled()) { |
61 | get_task_struct(current); |
62 | vcpu->arch.parent_task = current; |
63 | } |
64 | |
65 | return 0; |
66 | } |
67 | |
68 | /* |
69 | * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. |
70 | * The actual loading is done by the FPSIMD access trap taken to hyp. |
71 | * |
72 | * Here, we just set the correct metadata to indicate that the FPSIMD |
73 | * state in the cpu regs (if any) belongs to current on the host. |
74 | */ |
75 | void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) |
76 | { |
77 | BUG_ON(!current->mm); |
78 | |
79 | if (!system_supports_fpsimd()) |
80 | return; |
81 | |
82 | fpsimd_kvm_prepare(); |
83 | |
84 | /* |
85 | * We will check TIF_FOREIGN_FPSTATE just before entering the |
86 | * guest in kvm_arch_vcpu_ctxflush_fp() and override this to |
87 | * FP_STATE_FREE if the flag set. |
88 | */ |
89 | vcpu->arch.fp_state = FP_STATE_HOST_OWNED; |
90 | |
91 | vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); |
92 | if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) |
93 | vcpu_set_flag(vcpu, HOST_SVE_ENABLED); |
94 | |
95 | if (system_supports_sme()) { |
96 | vcpu_clear_flag(vcpu, HOST_SME_ENABLED); |
97 | if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) |
98 | vcpu_set_flag(vcpu, HOST_SME_ENABLED); |
99 | |
100 | /* |
101 | * If PSTATE.SM is enabled then save any pending FP |
102 | * state and disable PSTATE.SM. If we leave PSTATE.SM |
103 | * enabled and the guest does not enable SME via |
104 | * CPACR_EL1.SMEN then operations that should be valid |
105 | * may generate SME traps from EL1 to EL1 which we |
106 | * can't intercept and which would confuse the guest. |
107 | * |
108 | * Do the same for PSTATE.ZA in the case where there |
109 | * is state in the registers which has not already |
110 | * been saved, this is very unlikely to happen. |
111 | */ |
112 | if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { |
113 | vcpu->arch.fp_state = FP_STATE_FREE; |
114 | fpsimd_save_and_flush_cpu_state(); |
115 | } |
116 | } |
117 | } |
118 | |
119 | /* |
120 | * Called just before entering the guest once we are no longer preemptible |
121 | * and interrupts are disabled. If we have managed to run anything using |
122 | * FP while we were preemptible (such as off the back of an interrupt), |
123 | * then neither the host nor the guest own the FP hardware (and it was the |
124 | * responsibility of the code that used FP to save the existing state). |
125 | */ |
126 | void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) |
127 | { |
128 | if (test_thread_flag(TIF_FOREIGN_FPSTATE)) |
129 | vcpu->arch.fp_state = FP_STATE_FREE; |
130 | } |
131 | |
132 | /* |
133 | * Called just after exiting the guest. If the guest FPSIMD state |
134 | * was loaded, update the host's context tracking data mark the CPU |
135 | * FPSIMD regs as dirty and belonging to vcpu so that they will be |
136 | * written back if the kernel clobbers them due to kernel-mode NEON |
137 | * before re-entry into the guest. |
138 | */ |
139 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) |
140 | { |
141 | struct cpu_fp_state fp_state; |
142 | |
143 | WARN_ON_ONCE(!irqs_disabled()); |
144 | |
145 | if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { |
146 | |
147 | /* |
148 | * Currently we do not support SME guests so SVCR is |
149 | * always 0 and we just need a variable to point to. |
150 | */ |
151 | fp_state.st = &vcpu->arch.ctxt.fp_regs; |
152 | fp_state.sve_state = vcpu->arch.sve_state; |
153 | fp_state.sve_vl = vcpu->arch.sve_max_vl; |
154 | fp_state.sme_state = NULL; |
155 | fp_state.svcr = &vcpu->arch.svcr; |
156 | fp_state.fpmr = &vcpu->arch.fpmr; |
157 | fp_state.fp_type = &vcpu->arch.fp_type; |
158 | |
159 | if (vcpu_has_sve(vcpu)) |
160 | fp_state.to_save = FP_STATE_SVE; |
161 | else |
162 | fp_state.to_save = FP_STATE_FPSIMD; |
163 | |
164 | fpsimd_bind_state_to_cpu(&fp_state); |
165 | |
166 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
167 | } |
168 | } |
169 | |
170 | /* |
171 | * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the |
172 | * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu |
173 | * disappears and another task or vcpu appears that recycles the same |
174 | * struct fpsimd_state. |
175 | */ |
176 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) |
177 | { |
178 | unsigned long flags; |
179 | |
180 | local_irq_save(flags); |
181 | |
182 | /* |
183 | * If we have VHE then the Hyp code will reset CPACR_EL1 to |
184 | * the default value and we need to reenable SME. |
185 | */ |
186 | if (has_vhe() && system_supports_sme()) { |
187 | /* Also restore EL0 state seen on entry */ |
188 | if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) |
189 | sysreg_clear_set(CPACR_EL1, 0, |
190 | CPACR_EL1_SMEN_EL0EN | |
191 | CPACR_EL1_SMEN_EL1EN); |
192 | else |
193 | sysreg_clear_set(CPACR_EL1, |
194 | CPACR_EL1_SMEN_EL0EN, |
195 | CPACR_EL1_SMEN_EL1EN); |
196 | isb(); |
197 | } |
198 | |
199 | if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { |
200 | if (vcpu_has_sve(vcpu)) { |
201 | __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); |
202 | |
203 | /* Restore the VL that was saved when bound to the CPU */ |
204 | if (!has_vhe()) |
205 | sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, |
206 | SYS_ZCR_EL1); |
207 | } |
208 | |
209 | fpsimd_save_and_flush_cpu_state(); |
210 | } else if (has_vhe() && system_supports_sve()) { |
211 | /* |
212 | * The FPSIMD/SVE state in the CPU has not been touched, and we |
213 | * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been |
214 | * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE |
215 | * for EL0. To avoid spurious traps, restore the trap state |
216 | * seen by kvm_arch_vcpu_load_fp(): |
217 | */ |
218 | if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED)) |
219 | sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); |
220 | else |
221 | sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); |
222 | } |
223 | |
224 | local_irq_restore(flags); |
225 | } |
226 | |