1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2021 Western Digital Corporation or its affiliates. |
4 | * Copyright (C) 2022 Ventana Micro Systems Inc. |
5 | * |
6 | * Authors: |
7 | * Anup Patel <apatel@ventanamicro.com> |
8 | */ |
9 | |
10 | #include <linux/atomic.h> |
11 | #include <linux/bitmap.h> |
12 | #include <linux/kvm_host.h> |
13 | #include <linux/math.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/swab.h> |
16 | #include <kvm/iodev.h> |
17 | #include <asm/csr.h> |
18 | #include <asm/kvm_aia_imsic.h> |
19 | |
20 | #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64)) |
21 | |
22 | struct imsic_mrif_eix { |
23 | unsigned long eip[BITS_PER_TYPE(u64) / BITS_PER_LONG]; |
24 | unsigned long eie[BITS_PER_TYPE(u64) / BITS_PER_LONG]; |
25 | }; |
26 | |
27 | struct imsic_mrif { |
28 | struct imsic_mrif_eix eix[IMSIC_MAX_EIX]; |
29 | unsigned long eithreshold; |
30 | unsigned long eidelivery; |
31 | }; |
32 | |
33 | struct imsic { |
34 | struct kvm_io_device iodev; |
35 | |
36 | u32 nr_msis; |
37 | u32 nr_eix; |
38 | u32 nr_hw_eix; |
39 | |
40 | /* |
41 | * At any point in time, the register state is in |
42 | * one of the following places: |
43 | * |
44 | * 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0) |
45 | * 2) Software: IMSIC SW-file (vsfile_cpu < 0) |
46 | */ |
47 | |
48 | /* IMSIC VS-file */ |
49 | rwlock_t vsfile_lock; |
50 | int vsfile_cpu; |
51 | int vsfile_hgei; |
52 | void __iomem *vsfile_va; |
53 | phys_addr_t vsfile_pa; |
54 | |
55 | /* IMSIC SW-file */ |
56 | struct imsic_mrif *swfile; |
57 | phys_addr_t swfile_pa; |
58 | spinlock_t swfile_extirq_lock; |
59 | }; |
60 | |
61 | #define imsic_vs_csr_read(__c) \ |
62 | ({ \ |
63 | unsigned long __r; \ |
64 | csr_write(CSR_VSISELECT, __c); \ |
65 | __r = csr_read(CSR_VSIREG); \ |
66 | __r; \ |
67 | }) |
68 | |
69 | #define imsic_read_switchcase(__ireg) \ |
70 | case __ireg: \ |
71 | return imsic_vs_csr_read(__ireg); |
72 | #define imsic_read_switchcase_2(__ireg) \ |
73 | imsic_read_switchcase(__ireg + 0) \ |
74 | imsic_read_switchcase(__ireg + 1) |
75 | #define imsic_read_switchcase_4(__ireg) \ |
76 | imsic_read_switchcase_2(__ireg + 0) \ |
77 | imsic_read_switchcase_2(__ireg + 2) |
78 | #define imsic_read_switchcase_8(__ireg) \ |
79 | imsic_read_switchcase_4(__ireg + 0) \ |
80 | imsic_read_switchcase_4(__ireg + 4) |
81 | #define imsic_read_switchcase_16(__ireg) \ |
82 | imsic_read_switchcase_8(__ireg + 0) \ |
83 | imsic_read_switchcase_8(__ireg + 8) |
84 | #define imsic_read_switchcase_32(__ireg) \ |
85 | imsic_read_switchcase_16(__ireg + 0) \ |
86 | imsic_read_switchcase_16(__ireg + 16) |
87 | #define imsic_read_switchcase_64(__ireg) \ |
88 | imsic_read_switchcase_32(__ireg + 0) \ |
89 | imsic_read_switchcase_32(__ireg + 32) |
90 | |
91 | static unsigned long imsic_eix_read(int ireg) |
92 | { |
93 | switch (ireg) { |
94 | imsic_read_switchcase_64(IMSIC_EIP0) |
95 | imsic_read_switchcase_64(IMSIC_EIE0) |
96 | } |
97 | |
98 | return 0; |
99 | } |
100 | |
101 | #define imsic_vs_csr_swap(__c, __v) \ |
102 | ({ \ |
103 | unsigned long __r; \ |
104 | csr_write(CSR_VSISELECT, __c); \ |
105 | __r = csr_swap(CSR_VSIREG, __v); \ |
106 | __r; \ |
107 | }) |
108 | |
109 | #define imsic_swap_switchcase(__ireg, __v) \ |
110 | case __ireg: \ |
111 | return imsic_vs_csr_swap(__ireg, __v); |
112 | #define imsic_swap_switchcase_2(__ireg, __v) \ |
113 | imsic_swap_switchcase(__ireg + 0, __v) \ |
114 | imsic_swap_switchcase(__ireg + 1, __v) |
115 | #define imsic_swap_switchcase_4(__ireg, __v) \ |
116 | imsic_swap_switchcase_2(__ireg + 0, __v) \ |
117 | imsic_swap_switchcase_2(__ireg + 2, __v) |
118 | #define imsic_swap_switchcase_8(__ireg, __v) \ |
119 | imsic_swap_switchcase_4(__ireg + 0, __v) \ |
120 | imsic_swap_switchcase_4(__ireg + 4, __v) |
121 | #define imsic_swap_switchcase_16(__ireg, __v) \ |
122 | imsic_swap_switchcase_8(__ireg + 0, __v) \ |
123 | imsic_swap_switchcase_8(__ireg + 8, __v) |
124 | #define imsic_swap_switchcase_32(__ireg, __v) \ |
125 | imsic_swap_switchcase_16(__ireg + 0, __v) \ |
126 | imsic_swap_switchcase_16(__ireg + 16, __v) |
127 | #define imsic_swap_switchcase_64(__ireg, __v) \ |
128 | imsic_swap_switchcase_32(__ireg + 0, __v) \ |
129 | imsic_swap_switchcase_32(__ireg + 32, __v) |
130 | |
131 | static unsigned long imsic_eix_swap(int ireg, unsigned long val) |
132 | { |
133 | switch (ireg) { |
134 | imsic_swap_switchcase_64(IMSIC_EIP0, val) |
135 | imsic_swap_switchcase_64(IMSIC_EIE0, val) |
136 | } |
137 | |
138 | return 0; |
139 | } |
140 | |
141 | #define imsic_vs_csr_write(__c, __v) \ |
142 | do { \ |
143 | csr_write(CSR_VSISELECT, __c); \ |
144 | csr_write(CSR_VSIREG, __v); \ |
145 | } while (0) |
146 | |
147 | #define imsic_write_switchcase(__ireg, __v) \ |
148 | case __ireg: \ |
149 | imsic_vs_csr_write(__ireg, __v); \ |
150 | break; |
151 | #define imsic_write_switchcase_2(__ireg, __v) \ |
152 | imsic_write_switchcase(__ireg + 0, __v) \ |
153 | imsic_write_switchcase(__ireg + 1, __v) |
154 | #define imsic_write_switchcase_4(__ireg, __v) \ |
155 | imsic_write_switchcase_2(__ireg + 0, __v) \ |
156 | imsic_write_switchcase_2(__ireg + 2, __v) |
157 | #define imsic_write_switchcase_8(__ireg, __v) \ |
158 | imsic_write_switchcase_4(__ireg + 0, __v) \ |
159 | imsic_write_switchcase_4(__ireg + 4, __v) |
160 | #define imsic_write_switchcase_16(__ireg, __v) \ |
161 | imsic_write_switchcase_8(__ireg + 0, __v) \ |
162 | imsic_write_switchcase_8(__ireg + 8, __v) |
163 | #define imsic_write_switchcase_32(__ireg, __v) \ |
164 | imsic_write_switchcase_16(__ireg + 0, __v) \ |
165 | imsic_write_switchcase_16(__ireg + 16, __v) |
166 | #define imsic_write_switchcase_64(__ireg, __v) \ |
167 | imsic_write_switchcase_32(__ireg + 0, __v) \ |
168 | imsic_write_switchcase_32(__ireg + 32, __v) |
169 | |
170 | static void imsic_eix_write(int ireg, unsigned long val) |
171 | { |
172 | switch (ireg) { |
173 | imsic_write_switchcase_64(IMSIC_EIP0, val) |
174 | imsic_write_switchcase_64(IMSIC_EIE0, val) |
175 | } |
176 | } |
177 | |
178 | #define imsic_vs_csr_set(__c, __v) \ |
179 | do { \ |
180 | csr_write(CSR_VSISELECT, __c); \ |
181 | csr_set(CSR_VSIREG, __v); \ |
182 | } while (0) |
183 | |
184 | #define imsic_set_switchcase(__ireg, __v) \ |
185 | case __ireg: \ |
186 | imsic_vs_csr_set(__ireg, __v); \ |
187 | break; |
188 | #define imsic_set_switchcase_2(__ireg, __v) \ |
189 | imsic_set_switchcase(__ireg + 0, __v) \ |
190 | imsic_set_switchcase(__ireg + 1, __v) |
191 | #define imsic_set_switchcase_4(__ireg, __v) \ |
192 | imsic_set_switchcase_2(__ireg + 0, __v) \ |
193 | imsic_set_switchcase_2(__ireg + 2, __v) |
194 | #define imsic_set_switchcase_8(__ireg, __v) \ |
195 | imsic_set_switchcase_4(__ireg + 0, __v) \ |
196 | imsic_set_switchcase_4(__ireg + 4, __v) |
197 | #define imsic_set_switchcase_16(__ireg, __v) \ |
198 | imsic_set_switchcase_8(__ireg + 0, __v) \ |
199 | imsic_set_switchcase_8(__ireg + 8, __v) |
200 | #define imsic_set_switchcase_32(__ireg, __v) \ |
201 | imsic_set_switchcase_16(__ireg + 0, __v) \ |
202 | imsic_set_switchcase_16(__ireg + 16, __v) |
203 | #define imsic_set_switchcase_64(__ireg, __v) \ |
204 | imsic_set_switchcase_32(__ireg + 0, __v) \ |
205 | imsic_set_switchcase_32(__ireg + 32, __v) |
206 | |
207 | static void imsic_eix_set(int ireg, unsigned long val) |
208 | { |
209 | switch (ireg) { |
210 | imsic_set_switchcase_64(IMSIC_EIP0, val) |
211 | imsic_set_switchcase_64(IMSIC_EIE0, val) |
212 | } |
213 | } |
214 | |
215 | static unsigned long imsic_mrif_atomic_rmw(struct imsic_mrif *mrif, |
216 | unsigned long *ptr, |
217 | unsigned long new_val, |
218 | unsigned long wr_mask) |
219 | { |
220 | unsigned long old_val = 0, tmp = 0; |
221 | |
222 | __asm__ __volatile__ ( |
223 | "0: lr.w.aq %1, %0\n" |
224 | " and %2, %1, %3\n" |
225 | " or %2, %2, %4\n" |
226 | " sc.w.rl %2, %2, %0\n" |
227 | " bnez %2, 0b" |
228 | : "+A" (*ptr), "+r" (old_val), "+r" (tmp) |
229 | : "r" (~wr_mask), "r" (new_val & wr_mask) |
230 | : "memory" ); |
231 | |
232 | return old_val; |
233 | } |
234 | |
235 | static unsigned long imsic_mrif_atomic_or(struct imsic_mrif *mrif, |
236 | unsigned long *ptr, |
237 | unsigned long val) |
238 | { |
239 | return atomic_long_fetch_or(i: val, v: (atomic_long_t *)ptr); |
240 | } |
241 | |
242 | #define imsic_mrif_atomic_write(__mrif, __ptr, __new_val) \ |
243 | imsic_mrif_atomic_rmw(__mrif, __ptr, __new_val, -1UL) |
244 | #define imsic_mrif_atomic_read(__mrif, __ptr) \ |
245 | imsic_mrif_atomic_or(__mrif, __ptr, 0) |
246 | |
247 | static u32 imsic_mrif_topei(struct imsic_mrif *mrif, u32 nr_eix, u32 nr_msis) |
248 | { |
249 | struct imsic_mrif_eix *eix; |
250 | u32 i, imin, imax, ei, max_msi; |
251 | unsigned long eipend[BITS_PER_TYPE(u64) / BITS_PER_LONG]; |
252 | unsigned long eithreshold = imsic_mrif_atomic_read(mrif, |
253 | &mrif->eithreshold); |
254 | |
255 | max_msi = (eithreshold && (eithreshold <= nr_msis)) ? |
256 | eithreshold : nr_msis; |
257 | for (ei = 0; ei < nr_eix; ei++) { |
258 | eix = &mrif->eix[ei]; |
259 | eipend[0] = imsic_mrif_atomic_read(mrif, &eix->eie[0]) & |
260 | imsic_mrif_atomic_read(mrif, &eix->eip[0]); |
261 | #ifdef CONFIG_32BIT |
262 | eipend[1] = imsic_mrif_atomic_read(mrif, &eix->eie[1]) & |
263 | imsic_mrif_atomic_read(mrif, &eix->eip[1]); |
264 | if (!eipend[0] && !eipend[1]) |
265 | #else |
266 | if (!eipend[0]) |
267 | #endif |
268 | continue; |
269 | |
270 | imin = ei * BITS_PER_TYPE(u64); |
271 | imax = ((imin + BITS_PER_TYPE(u64)) < max_msi) ? |
272 | imin + BITS_PER_TYPE(u64) : max_msi; |
273 | for (i = (!imin) ? 1 : imin; i < imax; i++) { |
274 | if (test_bit(i - imin, eipend)) |
275 | return (i << TOPEI_ID_SHIFT) | i; |
276 | } |
277 | } |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | static int imsic_mrif_isel_check(u32 nr_eix, unsigned long isel) |
283 | { |
284 | u32 num = 0; |
285 | |
286 | switch (isel) { |
287 | case IMSIC_EIDELIVERY: |
288 | case IMSIC_EITHRESHOLD: |
289 | break; |
290 | case IMSIC_EIP0 ... IMSIC_EIP63: |
291 | num = isel - IMSIC_EIP0; |
292 | break; |
293 | case IMSIC_EIE0 ... IMSIC_EIE63: |
294 | num = isel - IMSIC_EIE0; |
295 | break; |
296 | default: |
297 | return -ENOENT; |
298 | } |
299 | #ifndef CONFIG_32BIT |
300 | if (num & 0x1) |
301 | return -EINVAL; |
302 | #endif |
303 | if ((num / 2) >= nr_eix) |
304 | return -EINVAL; |
305 | |
306 | return 0; |
307 | } |
308 | |
309 | static int imsic_mrif_rmw(struct imsic_mrif *mrif, u32 nr_eix, |
310 | unsigned long isel, unsigned long *val, |
311 | unsigned long new_val, unsigned long wr_mask) |
312 | { |
313 | bool pend; |
314 | struct imsic_mrif_eix *eix; |
315 | unsigned long *ei, num, old_val = 0; |
316 | |
317 | switch (isel) { |
318 | case IMSIC_EIDELIVERY: |
319 | old_val = imsic_mrif_atomic_rmw(mrif, ptr: &mrif->eidelivery, |
320 | new_val, wr_mask: wr_mask & 0x1); |
321 | break; |
322 | case IMSIC_EITHRESHOLD: |
323 | old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eithreshold, |
324 | new_val, wr_mask & (IMSIC_MAX_ID - 1)); |
325 | break; |
326 | case IMSIC_EIP0 ... IMSIC_EIP63: |
327 | case IMSIC_EIE0 ... IMSIC_EIE63: |
328 | if (isel >= IMSIC_EIP0 && isel <= IMSIC_EIP63) { |
329 | pend = true; |
330 | num = isel - IMSIC_EIP0; |
331 | } else { |
332 | pend = false; |
333 | num = isel - IMSIC_EIE0; |
334 | } |
335 | |
336 | if ((num / 2) >= nr_eix) |
337 | return -EINVAL; |
338 | eix = &mrif->eix[num / 2]; |
339 | |
340 | #ifndef CONFIG_32BIT |
341 | if (num & 0x1) |
342 | return -EINVAL; |
343 | ei = (pend) ? &eix->eip[0] : &eix->eie[0]; |
344 | #else |
345 | ei = (pend) ? &eix->eip[num & 0x1] : &eix->eie[num & 0x1]; |
346 | #endif |
347 | |
348 | /* Bit0 of EIP0 or EIE0 is read-only */ |
349 | if (!num) |
350 | wr_mask &= ~BIT(0); |
351 | |
352 | old_val = imsic_mrif_atomic_rmw(mrif, ptr: ei, new_val, wr_mask); |
353 | break; |
354 | default: |
355 | return -ENOENT; |
356 | } |
357 | |
358 | if (val) |
359 | *val = old_val; |
360 | |
361 | return 0; |
362 | } |
363 | |
364 | struct imsic_vsfile_read_data { |
365 | int hgei; |
366 | u32 nr_eix; |
367 | bool clear; |
368 | struct imsic_mrif *mrif; |
369 | }; |
370 | |
371 | static void imsic_vsfile_local_read(void *data) |
372 | { |
373 | u32 i; |
374 | struct imsic_mrif_eix *eix; |
375 | struct imsic_vsfile_read_data *idata = data; |
376 | struct imsic_mrif *mrif = idata->mrif; |
377 | unsigned long new_hstatus, old_hstatus, old_vsiselect; |
378 | |
379 | old_vsiselect = csr_read(CSR_VSISELECT); |
380 | old_hstatus = csr_read(CSR_HSTATUS); |
381 | new_hstatus = old_hstatus & ~HSTATUS_VGEIN; |
382 | new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT; |
383 | csr_write(CSR_HSTATUS, new_hstatus); |
384 | |
385 | /* |
386 | * We don't use imsic_mrif_atomic_xyz() functions to store |
387 | * values in MRIF because imsic_vsfile_read() is always called |
388 | * with pointer to temporary MRIF on stack. |
389 | */ |
390 | |
391 | if (idata->clear) { |
392 | mrif->eidelivery = imsic_vs_csr_swap(IMSIC_EIDELIVERY, 0); |
393 | mrif->eithreshold = imsic_vs_csr_swap(IMSIC_EITHRESHOLD, 0); |
394 | for (i = 0; i < idata->nr_eix; i++) { |
395 | eix = &mrif->eix[i]; |
396 | eix->eip[0] = imsic_eix_swap(IMSIC_EIP0 + i * 2, 0); |
397 | eix->eie[0] = imsic_eix_swap(IMSIC_EIE0 + i * 2, 0); |
398 | #ifdef CONFIG_32BIT |
399 | eix->eip[1] = imsic_eix_swap(IMSIC_EIP0 + i * 2 + 1, 0); |
400 | eix->eie[1] = imsic_eix_swap(IMSIC_EIE0 + i * 2 + 1, 0); |
401 | #endif |
402 | } |
403 | } else { |
404 | mrif->eidelivery = imsic_vs_csr_read(IMSIC_EIDELIVERY); |
405 | mrif->eithreshold = imsic_vs_csr_read(IMSIC_EITHRESHOLD); |
406 | for (i = 0; i < idata->nr_eix; i++) { |
407 | eix = &mrif->eix[i]; |
408 | eix->eip[0] = imsic_eix_read(IMSIC_EIP0 + i * 2); |
409 | eix->eie[0] = imsic_eix_read(IMSIC_EIE0 + i * 2); |
410 | #ifdef CONFIG_32BIT |
411 | eix->eip[1] = imsic_eix_read(IMSIC_EIP0 + i * 2 + 1); |
412 | eix->eie[1] = imsic_eix_read(IMSIC_EIE0 + i * 2 + 1); |
413 | #endif |
414 | } |
415 | } |
416 | |
417 | csr_write(CSR_HSTATUS, old_hstatus); |
418 | csr_write(CSR_VSISELECT, old_vsiselect); |
419 | } |
420 | |
421 | static void imsic_vsfile_read(int vsfile_hgei, int vsfile_cpu, u32 nr_eix, |
422 | bool clear, struct imsic_mrif *mrif) |
423 | { |
424 | struct imsic_vsfile_read_data idata; |
425 | |
426 | /* We can only read clear if we have a IMSIC VS-file */ |
427 | if (vsfile_cpu < 0 || vsfile_hgei <= 0) |
428 | return; |
429 | |
430 | /* We can only read clear on local CPU */ |
431 | idata.hgei = vsfile_hgei; |
432 | idata.nr_eix = nr_eix; |
433 | idata.clear = clear; |
434 | idata.mrif = mrif; |
435 | on_each_cpu_mask(cpumask_of(vsfile_cpu), |
436 | func: imsic_vsfile_local_read, info: &idata, wait: 1); |
437 | } |
438 | |
439 | struct imsic_vsfile_rw_data { |
440 | int hgei; |
441 | int isel; |
442 | bool write; |
443 | unsigned long val; |
444 | }; |
445 | |
446 | static void imsic_vsfile_local_rw(void *data) |
447 | { |
448 | struct imsic_vsfile_rw_data *idata = data; |
449 | unsigned long new_hstatus, old_hstatus, old_vsiselect; |
450 | |
451 | old_vsiselect = csr_read(CSR_VSISELECT); |
452 | old_hstatus = csr_read(CSR_HSTATUS); |
453 | new_hstatus = old_hstatus & ~HSTATUS_VGEIN; |
454 | new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT; |
455 | csr_write(CSR_HSTATUS, new_hstatus); |
456 | |
457 | switch (idata->isel) { |
458 | case IMSIC_EIDELIVERY: |
459 | if (idata->write) |
460 | imsic_vs_csr_write(IMSIC_EIDELIVERY, idata->val); |
461 | else |
462 | idata->val = imsic_vs_csr_read(IMSIC_EIDELIVERY); |
463 | break; |
464 | case IMSIC_EITHRESHOLD: |
465 | if (idata->write) |
466 | imsic_vs_csr_write(IMSIC_EITHRESHOLD, idata->val); |
467 | else |
468 | idata->val = imsic_vs_csr_read(IMSIC_EITHRESHOLD); |
469 | break; |
470 | case IMSIC_EIP0 ... IMSIC_EIP63: |
471 | case IMSIC_EIE0 ... IMSIC_EIE63: |
472 | #ifndef CONFIG_32BIT |
473 | if (idata->isel & 0x1) |
474 | break; |
475 | #endif |
476 | if (idata->write) |
477 | imsic_eix_write(ireg: idata->isel, val: idata->val); |
478 | else |
479 | idata->val = imsic_eix_read(ireg: idata->isel); |
480 | break; |
481 | default: |
482 | break; |
483 | } |
484 | |
485 | csr_write(CSR_HSTATUS, old_hstatus); |
486 | csr_write(CSR_VSISELECT, old_vsiselect); |
487 | } |
488 | |
489 | static int imsic_vsfile_rw(int vsfile_hgei, int vsfile_cpu, u32 nr_eix, |
490 | unsigned long isel, bool write, |
491 | unsigned long *val) |
492 | { |
493 | int rc; |
494 | struct imsic_vsfile_rw_data rdata; |
495 | |
496 | /* We can only access register if we have a IMSIC VS-file */ |
497 | if (vsfile_cpu < 0 || vsfile_hgei <= 0) |
498 | return -EINVAL; |
499 | |
500 | /* Check IMSIC register iselect */ |
501 | rc = imsic_mrif_isel_check(nr_eix, isel); |
502 | if (rc) |
503 | return rc; |
504 | |
505 | /* We can only access register on local CPU */ |
506 | rdata.hgei = vsfile_hgei; |
507 | rdata.isel = isel; |
508 | rdata.write = write; |
509 | rdata.val = (write) ? *val : 0; |
510 | on_each_cpu_mask(cpumask_of(vsfile_cpu), |
511 | func: imsic_vsfile_local_rw, info: &rdata, wait: 1); |
512 | |
513 | if (!write) |
514 | *val = rdata.val; |
515 | |
516 | return 0; |
517 | } |
518 | |
519 | static void imsic_vsfile_local_clear(int vsfile_hgei, u32 nr_eix) |
520 | { |
521 | u32 i; |
522 | unsigned long new_hstatus, old_hstatus, old_vsiselect; |
523 | |
524 | /* We can only zero-out if we have a IMSIC VS-file */ |
525 | if (vsfile_hgei <= 0) |
526 | return; |
527 | |
528 | old_vsiselect = csr_read(CSR_VSISELECT); |
529 | old_hstatus = csr_read(CSR_HSTATUS); |
530 | new_hstatus = old_hstatus & ~HSTATUS_VGEIN; |
531 | new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT; |
532 | csr_write(CSR_HSTATUS, new_hstatus); |
533 | |
534 | imsic_vs_csr_write(IMSIC_EIDELIVERY, 0); |
535 | imsic_vs_csr_write(IMSIC_EITHRESHOLD, 0); |
536 | for (i = 0; i < nr_eix; i++) { |
537 | imsic_eix_write(IMSIC_EIP0 + i * 2, 0); |
538 | imsic_eix_write(IMSIC_EIE0 + i * 2, 0); |
539 | #ifdef CONFIG_32BIT |
540 | imsic_eix_write(IMSIC_EIP0 + i * 2 + 1, 0); |
541 | imsic_eix_write(IMSIC_EIE0 + i * 2 + 1, 0); |
542 | #endif |
543 | } |
544 | |
545 | csr_write(CSR_HSTATUS, old_hstatus); |
546 | csr_write(CSR_VSISELECT, old_vsiselect); |
547 | } |
548 | |
549 | static void imsic_vsfile_local_update(int vsfile_hgei, u32 nr_eix, |
550 | struct imsic_mrif *mrif) |
551 | { |
552 | u32 i; |
553 | struct imsic_mrif_eix *eix; |
554 | unsigned long new_hstatus, old_hstatus, old_vsiselect; |
555 | |
556 | /* We can only update if we have a HW IMSIC context */ |
557 | if (vsfile_hgei <= 0) |
558 | return; |
559 | |
560 | /* |
561 | * We don't use imsic_mrif_atomic_xyz() functions to read values |
562 | * from MRIF in this function because it is always called with |
563 | * pointer to temporary MRIF on stack. |
564 | */ |
565 | |
566 | old_vsiselect = csr_read(CSR_VSISELECT); |
567 | old_hstatus = csr_read(CSR_HSTATUS); |
568 | new_hstatus = old_hstatus & ~HSTATUS_VGEIN; |
569 | new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT; |
570 | csr_write(CSR_HSTATUS, new_hstatus); |
571 | |
572 | for (i = 0; i < nr_eix; i++) { |
573 | eix = &mrif->eix[i]; |
574 | imsic_eix_set(IMSIC_EIP0 + i * 2, eix->eip[0]); |
575 | imsic_eix_set(IMSIC_EIE0 + i * 2, eix->eie[0]); |
576 | #ifdef CONFIG_32BIT |
577 | imsic_eix_set(IMSIC_EIP0 + i * 2 + 1, eix->eip[1]); |
578 | imsic_eix_set(IMSIC_EIE0 + i * 2 + 1, eix->eie[1]); |
579 | #endif |
580 | } |
581 | imsic_vs_csr_write(IMSIC_EITHRESHOLD, mrif->eithreshold); |
582 | imsic_vs_csr_write(IMSIC_EIDELIVERY, mrif->eidelivery); |
583 | |
584 | csr_write(CSR_HSTATUS, old_hstatus); |
585 | csr_write(CSR_VSISELECT, old_vsiselect); |
586 | } |
587 | |
588 | static void imsic_vsfile_cleanup(struct imsic *imsic) |
589 | { |
590 | int old_vsfile_hgei, old_vsfile_cpu; |
591 | unsigned long flags; |
592 | |
593 | /* |
594 | * We don't use imsic_mrif_atomic_xyz() functions to clear the |
595 | * SW-file in this function because it is always called when the |
596 | * VCPU is being destroyed. |
597 | */ |
598 | |
599 | write_lock_irqsave(&imsic->vsfile_lock, flags); |
600 | old_vsfile_hgei = imsic->vsfile_hgei; |
601 | old_vsfile_cpu = imsic->vsfile_cpu; |
602 | imsic->vsfile_cpu = imsic->vsfile_hgei = -1; |
603 | imsic->vsfile_va = NULL; |
604 | imsic->vsfile_pa = 0; |
605 | write_unlock_irqrestore(&imsic->vsfile_lock, flags); |
606 | |
607 | memset(imsic->swfile, 0, sizeof(*imsic->swfile)); |
608 | |
609 | if (old_vsfile_cpu >= 0) |
610 | kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei); |
611 | } |
612 | |
613 | static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu) |
614 | { |
615 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
616 | struct imsic_mrif *mrif = imsic->swfile; |
617 | unsigned long flags; |
618 | |
619 | /* |
620 | * The critical section is necessary during external interrupt |
621 | * updates to avoid the risk of losing interrupts due to potential |
622 | * interruptions between reading topei and updating pending status. |
623 | */ |
624 | |
625 | spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); |
626 | |
627 | if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) && |
628 | imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) |
629 | kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); |
630 | else |
631 | kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); |
632 | |
633 | spin_unlock_irqrestore(lock: &imsic->swfile_extirq_lock, flags); |
634 | } |
635 | |
636 | static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear, |
637 | struct imsic_mrif *mrif) |
638 | { |
639 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
640 | |
641 | /* |
642 | * We don't use imsic_mrif_atomic_xyz() functions to read and |
643 | * write SW-file and MRIF in this function because it is always |
644 | * called when VCPU is not using SW-file and the MRIF points to |
645 | * a temporary MRIF on stack. |
646 | */ |
647 | |
648 | memcpy(mrif, imsic->swfile, sizeof(*mrif)); |
649 | if (clear) { |
650 | memset(imsic->swfile, 0, sizeof(*imsic->swfile)); |
651 | kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); |
652 | } |
653 | } |
654 | |
655 | static void imsic_swfile_update(struct kvm_vcpu *vcpu, |
656 | struct imsic_mrif *mrif) |
657 | { |
658 | u32 i; |
659 | struct imsic_mrif_eix *seix, *eix; |
660 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
661 | struct imsic_mrif *smrif = imsic->swfile; |
662 | |
663 | imsic_mrif_atomic_write(smrif, &smrif->eidelivery, mrif->eidelivery); |
664 | imsic_mrif_atomic_write(smrif, &smrif->eithreshold, mrif->eithreshold); |
665 | for (i = 0; i < imsic->nr_eix; i++) { |
666 | seix = &smrif->eix[i]; |
667 | eix = &mrif->eix[i]; |
668 | imsic_mrif_atomic_or(mrif: smrif, ptr: &seix->eip[0], val: eix->eip[0]); |
669 | imsic_mrif_atomic_or(mrif: smrif, ptr: &seix->eie[0], val: eix->eie[0]); |
670 | #ifdef CONFIG_32BIT |
671 | imsic_mrif_atomic_or(smrif, &seix->eip[1], eix->eip[1]); |
672 | imsic_mrif_atomic_or(smrif, &seix->eie[1], eix->eie[1]); |
673 | #endif |
674 | } |
675 | |
676 | imsic_swfile_extirq_update(vcpu); |
677 | } |
678 | |
679 | void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu) |
680 | { |
681 | unsigned long flags; |
682 | struct imsic_mrif tmrif; |
683 | int old_vsfile_hgei, old_vsfile_cpu; |
684 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
685 | |
686 | /* Read and clear IMSIC VS-file details */ |
687 | write_lock_irqsave(&imsic->vsfile_lock, flags); |
688 | old_vsfile_hgei = imsic->vsfile_hgei; |
689 | old_vsfile_cpu = imsic->vsfile_cpu; |
690 | imsic->vsfile_cpu = imsic->vsfile_hgei = -1; |
691 | imsic->vsfile_va = NULL; |
692 | imsic->vsfile_pa = 0; |
693 | write_unlock_irqrestore(&imsic->vsfile_lock, flags); |
694 | |
695 | /* Do nothing, if no IMSIC VS-file to release */ |
696 | if (old_vsfile_cpu < 0) |
697 | return; |
698 | |
699 | /* |
700 | * At this point, all interrupt producers are still using |
701 | * the old IMSIC VS-file so we first re-direct all interrupt |
702 | * producers. |
703 | */ |
704 | |
705 | /* Purge the G-stage mapping */ |
706 | kvm_riscv_gstage_iounmap(vcpu->kvm, |
707 | vcpu->arch.aia_context.imsic_addr, |
708 | IMSIC_MMIO_PAGE_SZ); |
709 | |
710 | /* TODO: Purge the IOMMU mapping ??? */ |
711 | |
712 | /* |
713 | * At this point, all interrupt producers have been re-directed |
714 | * to somewhere else so we move register state from the old IMSIC |
715 | * VS-file to the IMSIC SW-file. |
716 | */ |
717 | |
718 | /* Read and clear register state from old IMSIC VS-file */ |
719 | memset(&tmrif, 0, sizeof(tmrif)); |
720 | imsic_vsfile_read(vsfile_hgei: old_vsfile_hgei, vsfile_cpu: old_vsfile_cpu, nr_eix: imsic->nr_hw_eix, |
721 | clear: true, mrif: &tmrif); |
722 | |
723 | /* Update register state in IMSIC SW-file */ |
724 | imsic_swfile_update(vcpu, mrif: &tmrif); |
725 | |
726 | /* Free-up old IMSIC VS-file */ |
727 | kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei); |
728 | } |
729 | |
730 | int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu) |
731 | { |
732 | unsigned long flags; |
733 | phys_addr_t new_vsfile_pa; |
734 | struct imsic_mrif tmrif; |
735 | void __iomem *new_vsfile_va; |
736 | struct kvm *kvm = vcpu->kvm; |
737 | struct kvm_run *run = vcpu->run; |
738 | struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context; |
739 | struct imsic *imsic = vaia->imsic_state; |
740 | int ret = 0, new_vsfile_hgei = -1, old_vsfile_hgei, old_vsfile_cpu; |
741 | |
742 | /* Do nothing for emulation mode */ |
743 | if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL) |
744 | return 1; |
745 | |
746 | /* Read old IMSIC VS-file details */ |
747 | read_lock_irqsave(&imsic->vsfile_lock, flags); |
748 | old_vsfile_hgei = imsic->vsfile_hgei; |
749 | old_vsfile_cpu = imsic->vsfile_cpu; |
750 | read_unlock_irqrestore(&imsic->vsfile_lock, flags); |
751 | |
752 | /* Do nothing if we are continuing on same CPU */ |
753 | if (old_vsfile_cpu == vcpu->cpu) |
754 | return 1; |
755 | |
756 | /* Allocate new IMSIC VS-file */ |
757 | ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu, |
758 | &new_vsfile_va, &new_vsfile_pa); |
759 | if (ret <= 0) { |
760 | /* For HW acceleration mode, we can't continue */ |
761 | if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) { |
762 | run->fail_entry.hardware_entry_failure_reason = |
763 | CSR_HSTATUS; |
764 | run->fail_entry.cpu = vcpu->cpu; |
765 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
766 | return 0; |
767 | } |
768 | |
769 | /* Release old IMSIC VS-file */ |
770 | if (old_vsfile_cpu >= 0) |
771 | kvm_riscv_vcpu_aia_imsic_release(vcpu); |
772 | |
773 | /* For automatic mode, we continue */ |
774 | goto done; |
775 | } |
776 | new_vsfile_hgei = ret; |
777 | |
778 | /* |
779 | * At this point, all interrupt producers are still using |
780 | * to the old IMSIC VS-file so we first move all interrupt |
781 | * producers to the new IMSIC VS-file. |
782 | */ |
783 | |
784 | /* Zero-out new IMSIC VS-file */ |
785 | imsic_vsfile_local_clear(vsfile_hgei: new_vsfile_hgei, nr_eix: imsic->nr_hw_eix); |
786 | |
787 | /* Update G-stage mapping for the new IMSIC VS-file */ |
788 | ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr, |
789 | new_vsfile_pa, IMSIC_MMIO_PAGE_SZ, |
790 | true, true); |
791 | if (ret) |
792 | goto fail_free_vsfile_hgei; |
793 | |
794 | /* TODO: Update the IOMMU mapping ??? */ |
795 | |
796 | /* Update new IMSIC VS-file details in IMSIC context */ |
797 | write_lock_irqsave(&imsic->vsfile_lock, flags); |
798 | imsic->vsfile_hgei = new_vsfile_hgei; |
799 | imsic->vsfile_cpu = vcpu->cpu; |
800 | imsic->vsfile_va = new_vsfile_va; |
801 | imsic->vsfile_pa = new_vsfile_pa; |
802 | write_unlock_irqrestore(&imsic->vsfile_lock, flags); |
803 | |
804 | /* |
805 | * At this point, all interrupt producers have been moved |
806 | * to the new IMSIC VS-file so we move register state from |
807 | * the old IMSIC VS/SW-file to the new IMSIC VS-file. |
808 | */ |
809 | |
810 | memset(&tmrif, 0, sizeof(tmrif)); |
811 | if (old_vsfile_cpu >= 0) { |
812 | /* Read and clear register state from old IMSIC VS-file */ |
813 | imsic_vsfile_read(vsfile_hgei: old_vsfile_hgei, vsfile_cpu: old_vsfile_cpu, |
814 | nr_eix: imsic->nr_hw_eix, clear: true, mrif: &tmrif); |
815 | |
816 | /* Free-up old IMSIC VS-file */ |
817 | kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei); |
818 | } else { |
819 | /* Read and clear register state from IMSIC SW-file */ |
820 | imsic_swfile_read(vcpu, clear: true, mrif: &tmrif); |
821 | } |
822 | |
823 | /* Restore register state in the new IMSIC VS-file */ |
824 | imsic_vsfile_local_update(vsfile_hgei: new_vsfile_hgei, nr_eix: imsic->nr_hw_eix, mrif: &tmrif); |
825 | |
826 | done: |
827 | /* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */ |
828 | vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN; |
829 | if (new_vsfile_hgei > 0) |
830 | vcpu->arch.guest_context.hstatus |= |
831 | ((unsigned long)new_vsfile_hgei) << HSTATUS_VGEIN_SHIFT; |
832 | |
833 | /* Continue run-loop */ |
834 | return 1; |
835 | |
836 | fail_free_vsfile_hgei: |
837 | kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei); |
838 | return ret; |
839 | } |
840 | |
841 | int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel, |
842 | unsigned long *val, unsigned long new_val, |
843 | unsigned long wr_mask) |
844 | { |
845 | u32 topei; |
846 | struct imsic_mrif_eix *eix; |
847 | int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC; |
848 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
849 | |
850 | if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) { |
851 | /* Read pending and enabled interrupt with highest priority */ |
852 | topei = imsic_mrif_topei(mrif: imsic->swfile, nr_eix: imsic->nr_eix, |
853 | nr_msis: imsic->nr_msis); |
854 | if (val) |
855 | *val = topei; |
856 | |
857 | /* Writes ignore value and clear top pending interrupt */ |
858 | if (topei && wr_mask) { |
859 | topei >>= TOPEI_ID_SHIFT; |
860 | if (topei) { |
861 | eix = &imsic->swfile->eix[topei / |
862 | BITS_PER_TYPE(u64)]; |
863 | clear_bit(nr: topei & (BITS_PER_TYPE(u64) - 1), |
864 | addr: eix->eip); |
865 | } |
866 | } |
867 | } else { |
868 | r = imsic_mrif_rmw(mrif: imsic->swfile, nr_eix: imsic->nr_eix, isel, |
869 | val, new_val, wr_mask); |
870 | /* Forward unknown IMSIC register to user-space */ |
871 | if (r) |
872 | rc = (r == -ENOENT) ? 0 : KVM_INSN_ILLEGAL_TRAP; |
873 | } |
874 | |
875 | if (wr_mask) |
876 | imsic_swfile_extirq_update(vcpu); |
877 | |
878 | return rc; |
879 | } |
880 | |
881 | int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type, |
882 | bool write, unsigned long *val) |
883 | { |
884 | u32 isel, vcpu_id; |
885 | unsigned long flags; |
886 | struct imsic *imsic; |
887 | struct kvm_vcpu *vcpu; |
888 | int rc, vsfile_hgei, vsfile_cpu; |
889 | |
890 | if (!kvm_riscv_aia_initialized(kvm)) |
891 | return -ENODEV; |
892 | |
893 | vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type); |
894 | vcpu = kvm_get_vcpu_by_id(kvm, id: vcpu_id); |
895 | if (!vcpu) |
896 | return -ENODEV; |
897 | |
898 | isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type); |
899 | imsic = vcpu->arch.aia_context.imsic_state; |
900 | |
901 | read_lock_irqsave(&imsic->vsfile_lock, flags); |
902 | |
903 | rc = 0; |
904 | vsfile_hgei = imsic->vsfile_hgei; |
905 | vsfile_cpu = imsic->vsfile_cpu; |
906 | if (vsfile_cpu < 0) { |
907 | if (write) { |
908 | rc = imsic_mrif_rmw(mrif: imsic->swfile, nr_eix: imsic->nr_eix, |
909 | isel, NULL, new_val: *val, wr_mask: -1UL); |
910 | imsic_swfile_extirq_update(vcpu); |
911 | } else |
912 | rc = imsic_mrif_rmw(mrif: imsic->swfile, nr_eix: imsic->nr_eix, |
913 | isel, val, new_val: 0, wr_mask: 0); |
914 | } |
915 | |
916 | read_unlock_irqrestore(&imsic->vsfile_lock, flags); |
917 | |
918 | if (!rc && vsfile_cpu >= 0) |
919 | rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, nr_eix: imsic->nr_eix, |
920 | isel, write, val); |
921 | |
922 | return rc; |
923 | } |
924 | |
925 | int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type) |
926 | { |
927 | u32 isel, vcpu_id; |
928 | struct imsic *imsic; |
929 | struct kvm_vcpu *vcpu; |
930 | |
931 | if (!kvm_riscv_aia_initialized(kvm)) |
932 | return -ENODEV; |
933 | |
934 | vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type); |
935 | vcpu = kvm_get_vcpu_by_id(kvm, id: vcpu_id); |
936 | if (!vcpu) |
937 | return -ENODEV; |
938 | |
939 | isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type); |
940 | imsic = vcpu->arch.aia_context.imsic_state; |
941 | return imsic_mrif_isel_check(nr_eix: imsic->nr_eix, isel); |
942 | } |
943 | |
944 | void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu) |
945 | { |
946 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
947 | |
948 | if (!imsic) |
949 | return; |
950 | |
951 | kvm_riscv_vcpu_aia_imsic_release(vcpu); |
952 | |
953 | memset(imsic->swfile, 0, sizeof(*imsic->swfile)); |
954 | } |
955 | |
956 | int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu, |
957 | u32 guest_index, u32 offset, u32 iid) |
958 | { |
959 | unsigned long flags; |
960 | struct imsic_mrif_eix *eix; |
961 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
962 | |
963 | /* We only emulate one IMSIC MMIO page for each Guest VCPU */ |
964 | if (!imsic || !iid || guest_index || |
965 | (offset != IMSIC_MMIO_SETIPNUM_LE && |
966 | offset != IMSIC_MMIO_SETIPNUM_BE)) |
967 | return -ENODEV; |
968 | |
969 | iid = (offset == IMSIC_MMIO_SETIPNUM_BE) ? __swab32(iid) : iid; |
970 | if (imsic->nr_msis <= iid) |
971 | return -EINVAL; |
972 | |
973 | read_lock_irqsave(&imsic->vsfile_lock, flags); |
974 | |
975 | if (imsic->vsfile_cpu >= 0) { |
976 | writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE); |
977 | kvm_vcpu_kick(vcpu); |
978 | } else { |
979 | eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)]; |
980 | set_bit(nr: iid & (BITS_PER_TYPE(u64) - 1), addr: eix->eip); |
981 | imsic_swfile_extirq_update(vcpu); |
982 | } |
983 | |
984 | read_unlock_irqrestore(&imsic->vsfile_lock, flags); |
985 | |
986 | return 0; |
987 | } |
988 | |
989 | static int imsic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
990 | gpa_t addr, int len, void *val) |
991 | { |
992 | if (len != 4 || (addr & 0x3) != 0) |
993 | return -EOPNOTSUPP; |
994 | |
995 | *((u32 *)val) = 0; |
996 | |
997 | return 0; |
998 | } |
999 | |
1000 | static int imsic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
1001 | gpa_t addr, int len, const void *val) |
1002 | { |
1003 | struct kvm_msi msi = { 0 }; |
1004 | |
1005 | if (len != 4 || (addr & 0x3) != 0) |
1006 | return -EOPNOTSUPP; |
1007 | |
1008 | msi.address_hi = addr >> 32; |
1009 | msi.address_lo = (u32)addr; |
1010 | msi.data = *((const u32 *)val); |
1011 | kvm_riscv_aia_inject_msi(vcpu->kvm, &msi); |
1012 | |
1013 | return 0; |
1014 | }; |
1015 | |
1016 | static struct kvm_io_device_ops imsic_iodoev_ops = { |
1017 | .read = imsic_mmio_read, |
1018 | .write = imsic_mmio_write, |
1019 | }; |
1020 | |
1021 | int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu) |
1022 | { |
1023 | int ret = 0; |
1024 | struct imsic *imsic; |
1025 | struct page *swfile_page; |
1026 | struct kvm *kvm = vcpu->kvm; |
1027 | |
1028 | /* Fail if we have zero IDs */ |
1029 | if (!kvm->arch.aia.nr_ids) |
1030 | return -EINVAL; |
1031 | |
1032 | /* Allocate IMSIC context */ |
1033 | imsic = kzalloc(size: sizeof(*imsic), GFP_KERNEL); |
1034 | if (!imsic) |
1035 | return -ENOMEM; |
1036 | vcpu->arch.aia_context.imsic_state = imsic; |
1037 | |
1038 | /* Setup IMSIC context */ |
1039 | imsic->nr_msis = kvm->arch.aia.nr_ids + 1; |
1040 | rwlock_init(&imsic->vsfile_lock); |
1041 | imsic->nr_eix = BITS_TO_U64(imsic->nr_msis); |
1042 | imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids); |
1043 | imsic->vsfile_hgei = imsic->vsfile_cpu = -1; |
1044 | |
1045 | /* Setup IMSIC SW-file */ |
1046 | swfile_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, |
1047 | order: get_order(size: sizeof(*imsic->swfile))); |
1048 | if (!swfile_page) { |
1049 | ret = -ENOMEM; |
1050 | goto fail_free_imsic; |
1051 | } |
1052 | imsic->swfile = page_to_virt(swfile_page); |
1053 | imsic->swfile_pa = page_to_phys(swfile_page); |
1054 | spin_lock_init(&imsic->swfile_extirq_lock); |
1055 | |
1056 | /* Setup IO device */ |
1057 | kvm_iodevice_init(dev: &imsic->iodev, ops: &imsic_iodoev_ops); |
1058 | mutex_lock(&kvm->slots_lock); |
1059 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, |
1060 | vcpu->arch.aia_context.imsic_addr, |
1061 | KVM_DEV_RISCV_IMSIC_SIZE, |
1062 | &imsic->iodev); |
1063 | mutex_unlock(lock: &kvm->slots_lock); |
1064 | if (ret) |
1065 | goto fail_free_swfile; |
1066 | |
1067 | return 0; |
1068 | |
1069 | fail_free_swfile: |
1070 | free_pages(addr: (unsigned long)imsic->swfile, |
1071 | order: get_order(size: sizeof(*imsic->swfile))); |
1072 | fail_free_imsic: |
1073 | vcpu->arch.aia_context.imsic_state = NULL; |
1074 | kfree(objp: imsic); |
1075 | return ret; |
1076 | } |
1077 | |
1078 | void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu) |
1079 | { |
1080 | struct kvm *kvm = vcpu->kvm; |
1081 | struct imsic *imsic = vcpu->arch.aia_context.imsic_state; |
1082 | |
1083 | if (!imsic) |
1084 | return; |
1085 | |
1086 | imsic_vsfile_cleanup(imsic); |
1087 | |
1088 | mutex_lock(&kvm->slots_lock); |
1089 | kvm_io_bus_unregister_dev(kvm, bus_idx: KVM_MMIO_BUS, dev: &imsic->iodev); |
1090 | mutex_unlock(lock: &kvm->slots_lock); |
1091 | |
1092 | free_pages(addr: (unsigned long)imsic->swfile, |
1093 | order: get_order(size: sizeof(*imsic->swfile))); |
1094 | |
1095 | vcpu->arch.aia_context.imsic_state = NULL; |
1096 | kfree(objp: imsic); |
1097 | } |
1098 | |