1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VFIO platform devices interrupt handling
4 *
5 * Copyright (C) 2013 - Virtual Open Systems
6 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
7 */
8
9#include <linux/eventfd.h>
10#include <linux/interrupt.h>
11#include <linux/slab.h>
12#include <linux/types.h>
13#include <linux/vfio.h>
14#include <linux/irq.h>
15
16#include "vfio_platform_private.h"
17
18static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx)
19{
20 unsigned long flags;
21
22 spin_lock_irqsave(&irq_ctx->lock, flags);
23
24 if (!irq_ctx->masked) {
25 disable_irq_nosync(irq: irq_ctx->hwirq);
26 irq_ctx->masked = true;
27 }
28
29 spin_unlock_irqrestore(lock: &irq_ctx->lock, flags);
30}
31
32static int vfio_platform_mask_handler(void *opaque, void *unused)
33{
34 struct vfio_platform_irq *irq_ctx = opaque;
35
36 vfio_platform_mask(irq_ctx);
37
38 return 0;
39}
40
41static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev,
42 unsigned index, unsigned start,
43 unsigned count, uint32_t flags,
44 void *data)
45{
46 if (start != 0 || count != 1)
47 return -EINVAL;
48
49 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
50 return -EINVAL;
51
52 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
53 int32_t fd = *(int32_t *)data;
54
55 if (fd >= 0)
56 return vfio_virqfd_enable(opaque: (void *) &vdev->irqs[index],
57 handler: vfio_platform_mask_handler,
58 NULL, NULL,
59 pvirqfd: &vdev->irqs[index].mask, fd);
60
61 vfio_virqfd_disable(pvirqfd: &vdev->irqs[index].mask);
62 return 0;
63 }
64
65 if (flags & VFIO_IRQ_SET_DATA_NONE) {
66 vfio_platform_mask(irq_ctx: &vdev->irqs[index]);
67
68 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
69 uint8_t mask = *(uint8_t *)data;
70
71 if (mask)
72 vfio_platform_mask(irq_ctx: &vdev->irqs[index]);
73 }
74
75 return 0;
76}
77
78static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx)
79{
80 unsigned long flags;
81
82 spin_lock_irqsave(&irq_ctx->lock, flags);
83
84 if (irq_ctx->masked) {
85 enable_irq(irq: irq_ctx->hwirq);
86 irq_ctx->masked = false;
87 }
88
89 spin_unlock_irqrestore(lock: &irq_ctx->lock, flags);
90}
91
92static int vfio_platform_unmask_handler(void *opaque, void *unused)
93{
94 struct vfio_platform_irq *irq_ctx = opaque;
95
96 vfio_platform_unmask(irq_ctx);
97
98 return 0;
99}
100
101static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
102 unsigned index, unsigned start,
103 unsigned count, uint32_t flags,
104 void *data)
105{
106 if (start != 0 || count != 1)
107 return -EINVAL;
108
109 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE))
110 return -EINVAL;
111
112 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
113 int32_t fd = *(int32_t *)data;
114
115 if (fd >= 0)
116 return vfio_virqfd_enable(opaque: (void *) &vdev->irqs[index],
117 handler: vfio_platform_unmask_handler,
118 NULL, NULL,
119 pvirqfd: &vdev->irqs[index].unmask,
120 fd);
121
122 vfio_virqfd_disable(pvirqfd: &vdev->irqs[index].unmask);
123 return 0;
124 }
125
126 if (flags & VFIO_IRQ_SET_DATA_NONE) {
127 vfio_platform_unmask(irq_ctx: &vdev->irqs[index]);
128
129 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
130 uint8_t unmask = *(uint8_t *)data;
131
132 if (unmask)
133 vfio_platform_unmask(irq_ctx: &vdev->irqs[index]);
134 }
135
136 return 0;
137}
138
139/*
140 * The trigger eventfd is guaranteed valid in the interrupt path
141 * and protected by the igate mutex when triggered via ioctl.
142 */
143static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
144{
145 if (likely(irq_ctx->trigger))
146 eventfd_signal(ctx: irq_ctx->trigger);
147}
148
149static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
150{
151 struct vfio_platform_irq *irq_ctx = dev_id;
152 unsigned long flags;
153 int ret = IRQ_NONE;
154
155 spin_lock_irqsave(&irq_ctx->lock, flags);
156
157 if (!irq_ctx->masked) {
158 ret = IRQ_HANDLED;
159
160 /* automask maskable interrupts */
161 disable_irq_nosync(irq: irq_ctx->hwirq);
162 irq_ctx->masked = true;
163 }
164
165 spin_unlock_irqrestore(lock: &irq_ctx->lock, flags);
166
167 if (ret == IRQ_HANDLED)
168 vfio_send_eventfd(irq_ctx);
169
170 return ret;
171}
172
173static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
174{
175 struct vfio_platform_irq *irq_ctx = dev_id;
176
177 vfio_send_eventfd(irq_ctx);
178
179 return IRQ_HANDLED;
180}
181
182static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
183 int fd)
184{
185 struct vfio_platform_irq *irq = &vdev->irqs[index];
186 struct eventfd_ctx *trigger;
187
188 if (irq->trigger) {
189 disable_irq(irq: irq->hwirq);
190 eventfd_ctx_put(ctx: irq->trigger);
191 irq->trigger = NULL;
192 }
193
194 if (fd < 0) /* Disable only */
195 return 0;
196
197 trigger = eventfd_ctx_fdget(fd);
198 if (IS_ERR(ptr: trigger))
199 return PTR_ERR(ptr: trigger);
200
201 irq->trigger = trigger;
202
203 /*
204 * irq->masked effectively provides nested disables within the overall
205 * enable relative to trigger. Specifically request_irq() is called
206 * with NO_AUTOEN, therefore the IRQ is initially disabled. The user
207 * may only further disable the IRQ with a MASK operations because
208 * irq->masked is initially false.
209 */
210 enable_irq(irq: irq->hwirq);
211
212 return 0;
213}
214
215static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
216 unsigned index, unsigned start,
217 unsigned count, uint32_t flags,
218 void *data)
219{
220 struct vfio_platform_irq *irq = &vdev->irqs[index];
221 irq_handler_t handler;
222
223 if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED)
224 handler = vfio_automasked_irq_handler;
225 else
226 handler = vfio_irq_handler;
227
228 if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
229 return vfio_set_trigger(vdev, index, fd: -1);
230
231 if (start != 0 || count != 1)
232 return -EINVAL;
233
234 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
235 int32_t fd = *(int32_t *)data;
236
237 return vfio_set_trigger(vdev, index, fd);
238 }
239
240 if (flags & VFIO_IRQ_SET_DATA_NONE) {
241 handler(irq->hwirq, irq);
242
243 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
244 uint8_t trigger = *(uint8_t *)data;
245
246 if (trigger)
247 handler(irq->hwirq, irq);
248 }
249
250 return 0;
251}
252
253int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
254 uint32_t flags, unsigned index, unsigned start,
255 unsigned count, void *data)
256{
257 int (*func)(struct vfio_platform_device *vdev, unsigned index,
258 unsigned start, unsigned count, uint32_t flags,
259 void *data) = NULL;
260
261 /*
262 * For compatibility, errors from request_irq() are local to the
263 * SET_IRQS path and reflected in the name pointer. This allows,
264 * for example, polling mode fallback for an exclusive IRQ failure.
265 */
266 if (IS_ERR(ptr: vdev->irqs[index].name))
267 return PTR_ERR(ptr: vdev->irqs[index].name);
268
269 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
270 case VFIO_IRQ_SET_ACTION_MASK:
271 func = vfio_platform_set_irq_mask;
272 break;
273 case VFIO_IRQ_SET_ACTION_UNMASK:
274 func = vfio_platform_set_irq_unmask;
275 break;
276 case VFIO_IRQ_SET_ACTION_TRIGGER:
277 func = vfio_platform_set_irq_trigger;
278 break;
279 }
280
281 if (!func)
282 return -ENOTTY;
283
284 return func(vdev, index, start, count, flags, data);
285}
286
287int vfio_platform_irq_init(struct vfio_platform_device *vdev)
288{
289 int cnt = 0, i, ret = 0;
290
291 while (vdev->get_irq(vdev, cnt) >= 0)
292 cnt++;
293
294 vdev->irqs = kcalloc(n: cnt, size: sizeof(struct vfio_platform_irq),
295 GFP_KERNEL_ACCOUNT);
296 if (!vdev->irqs)
297 return -ENOMEM;
298
299 for (i = 0; i < cnt; i++) {
300 int hwirq = vdev->get_irq(vdev, i);
301 irq_handler_t handler = vfio_irq_handler;
302
303 if (hwirq < 0) {
304 ret = -EINVAL;
305 goto err;
306 }
307
308 spin_lock_init(&vdev->irqs[i].lock);
309
310 vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
311
312 if (irq_get_trigger_type(irq: hwirq) & IRQ_TYPE_LEVEL_MASK) {
313 vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
314 | VFIO_IRQ_INFO_AUTOMASKED;
315 handler = vfio_automasked_irq_handler;
316 }
317
318 vdev->irqs[i].count = 1;
319 vdev->irqs[i].hwirq = hwirq;
320 vdev->irqs[i].masked = false;
321 vdev->irqs[i].name = kasprintf(GFP_KERNEL_ACCOUNT,
322 fmt: "vfio-irq[%d](%s)", hwirq,
323 vdev->name);
324 if (!vdev->irqs[i].name) {
325 ret = -ENOMEM;
326 goto err;
327 }
328
329 ret = request_irq(irq: hwirq, handler, IRQF_NO_AUTOEN,
330 name: vdev->irqs[i].name, dev: &vdev->irqs[i]);
331 if (ret) {
332 kfree(objp: vdev->irqs[i].name);
333 vdev->irqs[i].name = ERR_PTR(error: ret);
334 }
335 }
336
337 vdev->num_irqs = cnt;
338
339 return 0;
340err:
341 for (--i; i >= 0; i--) {
342 if (!IS_ERR(ptr: vdev->irqs[i].name)) {
343 free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
344 kfree(objp: vdev->irqs[i].name);
345 }
346 }
347 kfree(objp: vdev->irqs);
348 return ret;
349}
350
351void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
352{
353 int i;
354
355 for (i = 0; i < vdev->num_irqs; i++) {
356 vfio_virqfd_disable(pvirqfd: &vdev->irqs[i].mask);
357 vfio_virqfd_disable(pvirqfd: &vdev->irqs[i].unmask);
358 if (!IS_ERR(ptr: vdev->irqs[i].name)) {
359 free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
360 if (vdev->irqs[i].trigger)
361 eventfd_ctx_put(ctx: vdev->irqs[i].trigger);
362 kfree(objp: vdev->irqs[i].name);
363 }
364 }
365
366 vdev->num_irqs = 0;
367 kfree(objp: vdev->irqs);
368}
369

source code of linux/drivers/vfio/platform/vfio_platform_irq.c