1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2021 Western Digital Corporation or its affiliates. |
4 | * Copyright (C) 2022 Ventana Micro Systems Inc. |
5 | * |
6 | * Authors: |
7 | * Anup Patel <apatel@ventanamicro.com> |
8 | */ |
9 | |
10 | #include <linux/kvm_host.h> |
11 | #include <linux/math.h> |
12 | #include <linux/spinlock.h> |
13 | #include <linux/swab.h> |
14 | #include <kvm/iodev.h> |
15 | #include <asm/kvm_aia_aplic.h> |
16 | |
17 | struct aplic_irq { |
18 | raw_spinlock_t lock; |
19 | u32 sourcecfg; |
20 | u32 state; |
21 | #define APLIC_IRQ_STATE_PENDING BIT(0) |
22 | #define APLIC_IRQ_STATE_ENABLED BIT(1) |
23 | #define APLIC_IRQ_STATE_ENPEND (APLIC_IRQ_STATE_PENDING | \ |
24 | APLIC_IRQ_STATE_ENABLED) |
25 | #define APLIC_IRQ_STATE_INPUT BIT(8) |
26 | u32 target; |
27 | }; |
28 | |
29 | struct aplic { |
30 | struct kvm_io_device iodev; |
31 | |
32 | u32 domaincfg; |
33 | u32 genmsi; |
34 | |
35 | u32 nr_irqs; |
36 | u32 nr_words; |
37 | struct aplic_irq *irqs; |
38 | }; |
39 | |
40 | static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq) |
41 | { |
42 | u32 ret; |
43 | unsigned long flags; |
44 | struct aplic_irq *irqd; |
45 | |
46 | if (!irq || aplic->nr_irqs <= irq) |
47 | return 0; |
48 | irqd = &aplic->irqs[irq]; |
49 | |
50 | raw_spin_lock_irqsave(&irqd->lock, flags); |
51 | ret = irqd->sourcecfg; |
52 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
53 | |
54 | return ret; |
55 | } |
56 | |
57 | static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val) |
58 | { |
59 | unsigned long flags; |
60 | struct aplic_irq *irqd; |
61 | |
62 | if (!irq || aplic->nr_irqs <= irq) |
63 | return; |
64 | irqd = &aplic->irqs[irq]; |
65 | |
66 | if (val & APLIC_SOURCECFG_D) |
67 | val = 0; |
68 | else |
69 | val &= APLIC_SOURCECFG_SM_MASK; |
70 | |
71 | raw_spin_lock_irqsave(&irqd->lock, flags); |
72 | irqd->sourcecfg = val; |
73 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
74 | } |
75 | |
76 | static u32 aplic_read_target(struct aplic *aplic, u32 irq) |
77 | { |
78 | u32 ret; |
79 | unsigned long flags; |
80 | struct aplic_irq *irqd; |
81 | |
82 | if (!irq || aplic->nr_irqs <= irq) |
83 | return 0; |
84 | irqd = &aplic->irqs[irq]; |
85 | |
86 | raw_spin_lock_irqsave(&irqd->lock, flags); |
87 | ret = irqd->target; |
88 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
89 | |
90 | return ret; |
91 | } |
92 | |
93 | static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val) |
94 | { |
95 | unsigned long flags; |
96 | struct aplic_irq *irqd; |
97 | |
98 | if (!irq || aplic->nr_irqs <= irq) |
99 | return; |
100 | irqd = &aplic->irqs[irq]; |
101 | |
102 | val &= APLIC_TARGET_EIID_MASK | |
103 | (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) | |
104 | (APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT); |
105 | |
106 | raw_spin_lock_irqsave(&irqd->lock, flags); |
107 | irqd->target = val; |
108 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
109 | } |
110 | |
111 | static bool aplic_read_pending(struct aplic *aplic, u32 irq) |
112 | { |
113 | bool ret; |
114 | unsigned long flags; |
115 | struct aplic_irq *irqd; |
116 | |
117 | if (!irq || aplic->nr_irqs <= irq) |
118 | return false; |
119 | irqd = &aplic->irqs[irq]; |
120 | |
121 | raw_spin_lock_irqsave(&irqd->lock, flags); |
122 | ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false; |
123 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
124 | |
125 | return ret; |
126 | } |
127 | |
128 | static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending) |
129 | { |
130 | unsigned long flags, sm; |
131 | struct aplic_irq *irqd; |
132 | |
133 | if (!irq || aplic->nr_irqs <= irq) |
134 | return; |
135 | irqd = &aplic->irqs[irq]; |
136 | |
137 | raw_spin_lock_irqsave(&irqd->lock, flags); |
138 | |
139 | sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK; |
140 | if (sm == APLIC_SOURCECFG_SM_INACTIVE) |
141 | goto skip_write_pending; |
142 | |
143 | if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH || |
144 | sm == APLIC_SOURCECFG_SM_LEVEL_LOW) { |
145 | if (!pending) |
146 | goto skip_write_pending; |
147 | if ((irqd->state & APLIC_IRQ_STATE_INPUT) && |
148 | sm == APLIC_SOURCECFG_SM_LEVEL_LOW) |
149 | goto skip_write_pending; |
150 | if (!(irqd->state & APLIC_IRQ_STATE_INPUT) && |
151 | sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) |
152 | goto skip_write_pending; |
153 | } |
154 | |
155 | if (pending) |
156 | irqd->state |= APLIC_IRQ_STATE_PENDING; |
157 | else |
158 | irqd->state &= ~APLIC_IRQ_STATE_PENDING; |
159 | |
160 | skip_write_pending: |
161 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
162 | } |
163 | |
164 | static bool aplic_read_enabled(struct aplic *aplic, u32 irq) |
165 | { |
166 | bool ret; |
167 | unsigned long flags; |
168 | struct aplic_irq *irqd; |
169 | |
170 | if (!irq || aplic->nr_irqs <= irq) |
171 | return false; |
172 | irqd = &aplic->irqs[irq]; |
173 | |
174 | raw_spin_lock_irqsave(&irqd->lock, flags); |
175 | ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false; |
176 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
177 | |
178 | return ret; |
179 | } |
180 | |
181 | static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled) |
182 | { |
183 | unsigned long flags; |
184 | struct aplic_irq *irqd; |
185 | |
186 | if (!irq || aplic->nr_irqs <= irq) |
187 | return; |
188 | irqd = &aplic->irqs[irq]; |
189 | |
190 | raw_spin_lock_irqsave(&irqd->lock, flags); |
191 | if (enabled) |
192 | irqd->state |= APLIC_IRQ_STATE_ENABLED; |
193 | else |
194 | irqd->state &= ~APLIC_IRQ_STATE_ENABLED; |
195 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
196 | } |
197 | |
198 | static bool aplic_read_input(struct aplic *aplic, u32 irq) |
199 | { |
200 | u32 sourcecfg, sm, raw_input, irq_inverted; |
201 | struct aplic_irq *irqd; |
202 | unsigned long flags; |
203 | bool ret = false; |
204 | |
205 | if (!irq || aplic->nr_irqs <= irq) |
206 | return false; |
207 | irqd = &aplic->irqs[irq]; |
208 | |
209 | raw_spin_lock_irqsave(&irqd->lock, flags); |
210 | |
211 | sourcecfg = irqd->sourcecfg; |
212 | if (sourcecfg & APLIC_SOURCECFG_D) |
213 | goto skip; |
214 | |
215 | sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; |
216 | if (sm == APLIC_SOURCECFG_SM_INACTIVE) |
217 | goto skip; |
218 | |
219 | raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0; |
220 | irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW || |
221 | sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0; |
222 | ret = !!(raw_input ^ irq_inverted); |
223 | |
224 | skip: |
225 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
226 | |
227 | return ret; |
228 | } |
229 | |
230 | static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target) |
231 | { |
232 | u32 hart_idx, guest_idx, eiid; |
233 | |
234 | hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT; |
235 | hart_idx &= APLIC_TARGET_HART_IDX_MASK; |
236 | guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT; |
237 | guest_idx &= APLIC_TARGET_GUEST_IDX_MASK; |
238 | eiid = target & APLIC_TARGET_EIID_MASK; |
239 | kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid); |
240 | } |
241 | |
242 | static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last) |
243 | { |
244 | bool inject; |
245 | u32 irq, target; |
246 | unsigned long flags; |
247 | struct aplic_irq *irqd; |
248 | struct aplic *aplic = kvm->arch.aia.aplic_state; |
249 | |
250 | if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE)) |
251 | return; |
252 | |
253 | for (irq = first; irq <= last; irq++) { |
254 | if (!irq || aplic->nr_irqs <= irq) |
255 | continue; |
256 | irqd = &aplic->irqs[irq]; |
257 | |
258 | raw_spin_lock_irqsave(&irqd->lock, flags); |
259 | |
260 | inject = false; |
261 | target = irqd->target; |
262 | if ((irqd->state & APLIC_IRQ_STATE_ENPEND) == |
263 | APLIC_IRQ_STATE_ENPEND) { |
264 | irqd->state &= ~APLIC_IRQ_STATE_PENDING; |
265 | inject = true; |
266 | } |
267 | |
268 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
269 | |
270 | if (inject) |
271 | aplic_inject_msi(kvm, irq, target); |
272 | } |
273 | } |
274 | |
275 | int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level) |
276 | { |
277 | u32 target; |
278 | bool inject = false, ie; |
279 | unsigned long flags; |
280 | struct aplic_irq *irqd; |
281 | struct aplic *aplic = kvm->arch.aia.aplic_state; |
282 | |
283 | if (!aplic || !source || (aplic->nr_irqs <= source)) |
284 | return -ENODEV; |
285 | irqd = &aplic->irqs[source]; |
286 | ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false; |
287 | |
288 | raw_spin_lock_irqsave(&irqd->lock, flags); |
289 | |
290 | if (irqd->sourcecfg & APLIC_SOURCECFG_D) |
291 | goto skip_unlock; |
292 | |
293 | switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) { |
294 | case APLIC_SOURCECFG_SM_EDGE_RISE: |
295 | if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) && |
296 | !(irqd->state & APLIC_IRQ_STATE_PENDING)) |
297 | irqd->state |= APLIC_IRQ_STATE_PENDING; |
298 | break; |
299 | case APLIC_SOURCECFG_SM_EDGE_FALL: |
300 | if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) && |
301 | !(irqd->state & APLIC_IRQ_STATE_PENDING)) |
302 | irqd->state |= APLIC_IRQ_STATE_PENDING; |
303 | break; |
304 | case APLIC_SOURCECFG_SM_LEVEL_HIGH: |
305 | if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING)) |
306 | irqd->state |= APLIC_IRQ_STATE_PENDING; |
307 | break; |
308 | case APLIC_SOURCECFG_SM_LEVEL_LOW: |
309 | if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING)) |
310 | irqd->state |= APLIC_IRQ_STATE_PENDING; |
311 | break; |
312 | } |
313 | |
314 | if (level) |
315 | irqd->state |= APLIC_IRQ_STATE_INPUT; |
316 | else |
317 | irqd->state &= ~APLIC_IRQ_STATE_INPUT; |
318 | |
319 | target = irqd->target; |
320 | if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) == |
321 | APLIC_IRQ_STATE_ENPEND)) { |
322 | irqd->state &= ~APLIC_IRQ_STATE_PENDING; |
323 | inject = true; |
324 | } |
325 | |
326 | skip_unlock: |
327 | raw_spin_unlock_irqrestore(&irqd->lock, flags); |
328 | |
329 | if (inject) |
330 | aplic_inject_msi(kvm, irq: source, target); |
331 | |
332 | return 0; |
333 | } |
334 | |
335 | static u32 aplic_read_input_word(struct aplic *aplic, u32 word) |
336 | { |
337 | u32 i, ret = 0; |
338 | |
339 | for (i = 0; i < 32; i++) |
340 | ret |= aplic_read_input(aplic, irq: word * 32 + i) ? BIT(i) : 0; |
341 | |
342 | return ret; |
343 | } |
344 | |
345 | static u32 aplic_read_pending_word(struct aplic *aplic, u32 word) |
346 | { |
347 | u32 i, ret = 0; |
348 | |
349 | for (i = 0; i < 32; i++) |
350 | ret |= aplic_read_pending(aplic, irq: word * 32 + i) ? BIT(i) : 0; |
351 | |
352 | return ret; |
353 | } |
354 | |
355 | static void aplic_write_pending_word(struct aplic *aplic, u32 word, |
356 | u32 val, bool pending) |
357 | { |
358 | u32 i; |
359 | |
360 | for (i = 0; i < 32; i++) { |
361 | if (val & BIT(i)) |
362 | aplic_write_pending(aplic, irq: word * 32 + i, pending); |
363 | } |
364 | } |
365 | |
366 | static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word) |
367 | { |
368 | u32 i, ret = 0; |
369 | |
370 | for (i = 0; i < 32; i++) |
371 | ret |= aplic_read_enabled(aplic, irq: word * 32 + i) ? BIT(i) : 0; |
372 | |
373 | return ret; |
374 | } |
375 | |
376 | static void aplic_write_enabled_word(struct aplic *aplic, u32 word, |
377 | u32 val, bool enabled) |
378 | { |
379 | u32 i; |
380 | |
381 | for (i = 0; i < 32; i++) { |
382 | if (val & BIT(i)) |
383 | aplic_write_enabled(aplic, irq: word * 32 + i, enabled); |
384 | } |
385 | } |
386 | |
387 | static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32) |
388 | { |
389 | u32 i; |
390 | struct aplic *aplic = kvm->arch.aia.aplic_state; |
391 | |
392 | if ((off & 0x3) != 0) |
393 | return -EOPNOTSUPP; |
394 | |
395 | if (off == APLIC_DOMAINCFG) { |
396 | *val32 = APLIC_DOMAINCFG_RDONLY | |
397 | aplic->domaincfg | APLIC_DOMAINCFG_DM; |
398 | } else if ((off >= APLIC_SOURCECFG_BASE) && |
399 | (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) { |
400 | i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1; |
401 | *val32 = aplic_read_sourcecfg(aplic, irq: i); |
402 | } else if ((off >= APLIC_SETIP_BASE) && |
403 | (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) { |
404 | i = (off - APLIC_SETIP_BASE) >> 2; |
405 | *val32 = aplic_read_pending_word(aplic, word: i); |
406 | } else if (off == APLIC_SETIPNUM) { |
407 | *val32 = 0; |
408 | } else if ((off >= APLIC_CLRIP_BASE) && |
409 | (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) { |
410 | i = (off - APLIC_CLRIP_BASE) >> 2; |
411 | *val32 = aplic_read_input_word(aplic, word: i); |
412 | } else if (off == APLIC_CLRIPNUM) { |
413 | *val32 = 0; |
414 | } else if ((off >= APLIC_SETIE_BASE) && |
415 | (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) { |
416 | i = (off - APLIC_SETIE_BASE) >> 2; |
417 | *val32 = aplic_read_enabled_word(aplic, word: i); |
418 | } else if (off == APLIC_SETIENUM) { |
419 | *val32 = 0; |
420 | } else if ((off >= APLIC_CLRIE_BASE) && |
421 | (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) { |
422 | *val32 = 0; |
423 | } else if (off == APLIC_CLRIENUM) { |
424 | *val32 = 0; |
425 | } else if (off == APLIC_SETIPNUM_LE) { |
426 | *val32 = 0; |
427 | } else if (off == APLIC_SETIPNUM_BE) { |
428 | *val32 = 0; |
429 | } else if (off == APLIC_GENMSI) { |
430 | *val32 = aplic->genmsi; |
431 | } else if ((off >= APLIC_TARGET_BASE) && |
432 | (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) { |
433 | i = ((off - APLIC_TARGET_BASE) >> 2) + 1; |
434 | *val32 = aplic_read_target(aplic, irq: i); |
435 | } else |
436 | return -ENODEV; |
437 | |
438 | return 0; |
439 | } |
440 | |
441 | static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
442 | gpa_t addr, int len, void *val) |
443 | { |
444 | if (len != 4) |
445 | return -EOPNOTSUPP; |
446 | |
447 | return aplic_mmio_read_offset(kvm: vcpu->kvm, |
448 | off: addr - vcpu->kvm->arch.aia.aplic_addr, |
449 | val32: val); |
450 | } |
451 | |
452 | static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32) |
453 | { |
454 | u32 i; |
455 | struct aplic *aplic = kvm->arch.aia.aplic_state; |
456 | |
457 | if ((off & 0x3) != 0) |
458 | return -EOPNOTSUPP; |
459 | |
460 | if (off == APLIC_DOMAINCFG) { |
461 | /* Only IE bit writeable */ |
462 | aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE; |
463 | } else if ((off >= APLIC_SOURCECFG_BASE) && |
464 | (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) { |
465 | i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1; |
466 | aplic_write_sourcecfg(aplic, irq: i, val: val32); |
467 | } else if ((off >= APLIC_SETIP_BASE) && |
468 | (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) { |
469 | i = (off - APLIC_SETIP_BASE) >> 2; |
470 | aplic_write_pending_word(aplic, word: i, val: val32, pending: true); |
471 | } else if (off == APLIC_SETIPNUM) { |
472 | aplic_write_pending(aplic, irq: val32, pending: true); |
473 | } else if ((off >= APLIC_CLRIP_BASE) && |
474 | (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) { |
475 | i = (off - APLIC_CLRIP_BASE) >> 2; |
476 | aplic_write_pending_word(aplic, word: i, val: val32, pending: false); |
477 | } else if (off == APLIC_CLRIPNUM) { |
478 | aplic_write_pending(aplic, irq: val32, pending: false); |
479 | } else if ((off >= APLIC_SETIE_BASE) && |
480 | (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) { |
481 | i = (off - APLIC_SETIE_BASE) >> 2; |
482 | aplic_write_enabled_word(aplic, word: i, val: val32, enabled: true); |
483 | } else if (off == APLIC_SETIENUM) { |
484 | aplic_write_enabled(aplic, irq: val32, enabled: true); |
485 | } else if ((off >= APLIC_CLRIE_BASE) && |
486 | (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) { |
487 | i = (off - APLIC_CLRIE_BASE) >> 2; |
488 | aplic_write_enabled_word(aplic, word: i, val: val32, enabled: false); |
489 | } else if (off == APLIC_CLRIENUM) { |
490 | aplic_write_enabled(aplic, irq: val32, enabled: false); |
491 | } else if (off == APLIC_SETIPNUM_LE) { |
492 | aplic_write_pending(aplic, irq: val32, pending: true); |
493 | } else if (off == APLIC_SETIPNUM_BE) { |
494 | aplic_write_pending(aplic, __swab32(val32), pending: true); |
495 | } else if (off == APLIC_GENMSI) { |
496 | aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK << |
497 | APLIC_TARGET_GUEST_IDX_SHIFT); |
498 | kvm_riscv_aia_inject_msi_by_id(kvm, |
499 | val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0, |
500 | val32 & APLIC_TARGET_EIID_MASK); |
501 | } else if ((off >= APLIC_TARGET_BASE) && |
502 | (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) { |
503 | i = ((off - APLIC_TARGET_BASE) >> 2) + 1; |
504 | aplic_write_target(aplic, irq: i, val: val32); |
505 | } else |
506 | return -ENODEV; |
507 | |
508 | aplic_update_irq_range(kvm, first: 1, last: aplic->nr_irqs - 1); |
509 | |
510 | return 0; |
511 | } |
512 | |
513 | static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
514 | gpa_t addr, int len, const void *val) |
515 | { |
516 | if (len != 4) |
517 | return -EOPNOTSUPP; |
518 | |
519 | return aplic_mmio_write_offset(kvm: vcpu->kvm, |
520 | off: addr - vcpu->kvm->arch.aia.aplic_addr, |
521 | val32: *((const u32 *)val)); |
522 | } |
523 | |
524 | static struct kvm_io_device_ops aplic_iodoev_ops = { |
525 | .read = aplic_mmio_read, |
526 | .write = aplic_mmio_write, |
527 | }; |
528 | |
529 | int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v) |
530 | { |
531 | int rc; |
532 | |
533 | if (!kvm->arch.aia.aplic_state) |
534 | return -ENODEV; |
535 | |
536 | rc = aplic_mmio_write_offset(kvm, off: type, val32: v); |
537 | if (rc) |
538 | return rc; |
539 | |
540 | return 0; |
541 | } |
542 | |
543 | int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v) |
544 | { |
545 | int rc; |
546 | |
547 | if (!kvm->arch.aia.aplic_state) |
548 | return -ENODEV; |
549 | |
550 | rc = aplic_mmio_read_offset(kvm, off: type, val32: v); |
551 | if (rc) |
552 | return rc; |
553 | |
554 | return 0; |
555 | } |
556 | |
557 | int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type) |
558 | { |
559 | int rc; |
560 | u32 val; |
561 | |
562 | if (!kvm->arch.aia.aplic_state) |
563 | return -ENODEV; |
564 | |
565 | rc = aplic_mmio_read_offset(kvm, off: type, val32: &val); |
566 | if (rc) |
567 | return rc; |
568 | |
569 | return 0; |
570 | } |
571 | |
572 | int kvm_riscv_aia_aplic_init(struct kvm *kvm) |
573 | { |
574 | int i, ret = 0; |
575 | struct aplic *aplic; |
576 | |
577 | /* Do nothing if we have zero sources */ |
578 | if (!kvm->arch.aia.nr_sources) |
579 | return 0; |
580 | |
581 | /* Allocate APLIC global state */ |
582 | aplic = kzalloc(size: sizeof(*aplic), GFP_KERNEL); |
583 | if (!aplic) |
584 | return -ENOMEM; |
585 | kvm->arch.aia.aplic_state = aplic; |
586 | |
587 | /* Setup APLIC IRQs */ |
588 | aplic->nr_irqs = kvm->arch.aia.nr_sources + 1; |
589 | aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32); |
590 | aplic->irqs = kcalloc(n: aplic->nr_irqs, |
591 | size: sizeof(*aplic->irqs), GFP_KERNEL); |
592 | if (!aplic->irqs) { |
593 | ret = -ENOMEM; |
594 | goto fail_free_aplic; |
595 | } |
596 | for (i = 0; i < aplic->nr_irqs; i++) |
597 | raw_spin_lock_init(&aplic->irqs[i].lock); |
598 | |
599 | /* Setup IO device */ |
600 | kvm_iodevice_init(dev: &aplic->iodev, ops: &aplic_iodoev_ops); |
601 | mutex_lock(&kvm->slots_lock); |
602 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, |
603 | kvm->arch.aia.aplic_addr, |
604 | KVM_DEV_RISCV_APLIC_SIZE, |
605 | &aplic->iodev); |
606 | mutex_unlock(lock: &kvm->slots_lock); |
607 | if (ret) |
608 | goto fail_free_aplic_irqs; |
609 | |
610 | /* Setup default IRQ routing */ |
611 | ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs); |
612 | if (ret) |
613 | goto fail_unreg_iodev; |
614 | |
615 | return 0; |
616 | |
617 | fail_unreg_iodev: |
618 | mutex_lock(&kvm->slots_lock); |
619 | kvm_io_bus_unregister_dev(kvm, bus_idx: KVM_MMIO_BUS, dev: &aplic->iodev); |
620 | mutex_unlock(lock: &kvm->slots_lock); |
621 | fail_free_aplic_irqs: |
622 | kfree(objp: aplic->irqs); |
623 | fail_free_aplic: |
624 | kvm->arch.aia.aplic_state = NULL; |
625 | kfree(objp: aplic); |
626 | return ret; |
627 | } |
628 | |
629 | void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm) |
630 | { |
631 | struct aplic *aplic = kvm->arch.aia.aplic_state; |
632 | |
633 | if (!aplic) |
634 | return; |
635 | |
636 | mutex_lock(&kvm->slots_lock); |
637 | kvm_io_bus_unregister_dev(kvm, bus_idx: KVM_MMIO_BUS, dev: &aplic->iodev); |
638 | mutex_unlock(lock: &kvm->slots_lock); |
639 | |
640 | kfree(objp: aplic->irqs); |
641 | |
642 | kvm->arch.aia.aplic_state = NULL; |
643 | kfree(objp: aplic); |
644 | } |
645 | |