1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * GPIO driver for virtio-based virtual GPIO controllers |
4 | * |
5 | * Copyright (C) 2021 metux IT consult |
6 | * Enrico Weigelt, metux IT consult <info@metux.net> |
7 | * |
8 | * Copyright (C) 2021 Linaro. |
9 | * Viresh Kumar <viresh.kumar@linaro.org> |
10 | */ |
11 | |
12 | #include <linux/completion.h> |
13 | #include <linux/err.h> |
14 | #include <linux/gpio/driver.h> |
15 | #include <linux/io.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> |
18 | #include <linux/mutex.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/virtio_config.h> |
21 | #include <uapi/linux/virtio_gpio.h> |
22 | #include <uapi/linux/virtio_ids.h> |
23 | |
24 | struct virtio_gpio_line { |
25 | struct mutex lock; /* Protects line operation */ |
26 | struct completion completion; |
27 | struct virtio_gpio_request req ____cacheline_aligned; |
28 | struct virtio_gpio_response res ____cacheline_aligned; |
29 | unsigned int rxlen; |
30 | }; |
31 | |
32 | struct vgpio_irq_line { |
33 | u8 type; |
34 | bool disabled; |
35 | bool masked; |
36 | bool queued; |
37 | bool update_pending; |
38 | bool queue_pending; |
39 | |
40 | struct virtio_gpio_irq_request ireq ____cacheline_aligned; |
41 | struct virtio_gpio_irq_response ires ____cacheline_aligned; |
42 | }; |
43 | |
44 | struct virtio_gpio { |
45 | struct virtio_device *vdev; |
46 | struct mutex lock; /* Protects virtqueue operation */ |
47 | struct gpio_chip gc; |
48 | struct virtio_gpio_line *lines; |
49 | struct virtqueue *request_vq; |
50 | |
51 | /* irq support */ |
52 | struct virtqueue *event_vq; |
53 | struct mutex irq_lock; /* Protects irq operation */ |
54 | raw_spinlock_t eventq_lock; /* Protects queuing of the buffer */ |
55 | struct vgpio_irq_line *irq_lines; |
56 | }; |
57 | |
58 | static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio, |
59 | u8 txvalue, u8 *rxvalue, void *response, u32 rxlen) |
60 | { |
61 | struct virtio_gpio_line *line = &vgpio->lines[gpio]; |
62 | struct virtio_gpio_request *req = &line->req; |
63 | struct virtio_gpio_response *res = response; |
64 | struct scatterlist *sgs[2], req_sg, res_sg; |
65 | struct device *dev = &vgpio->vdev->dev; |
66 | int ret; |
67 | |
68 | /* |
69 | * Prevent concurrent requests for the same line since we have |
70 | * pre-allocated request/response buffers for each GPIO line. Moreover |
71 | * Linux always accesses a GPIO line sequentially, so this locking shall |
72 | * always go through without any delays. |
73 | */ |
74 | mutex_lock(&line->lock); |
75 | |
76 | req->type = cpu_to_le16(type); |
77 | req->gpio = cpu_to_le16(gpio); |
78 | req->value = cpu_to_le32(txvalue); |
79 | |
80 | sg_init_one(&req_sg, req, sizeof(*req)); |
81 | sg_init_one(&res_sg, res, rxlen); |
82 | sgs[0] = &req_sg; |
83 | sgs[1] = &res_sg; |
84 | |
85 | line->rxlen = 0; |
86 | reinit_completion(x: &line->completion); |
87 | |
88 | /* |
89 | * Virtqueue callers need to ensure they don't call its APIs with other |
90 | * virtqueue operations at the same time. |
91 | */ |
92 | mutex_lock(&vgpio->lock); |
93 | ret = virtqueue_add_sgs(vq: vgpio->request_vq, sgs, out_sgs: 1, in_sgs: 1, data: line, GFP_KERNEL); |
94 | if (ret) { |
95 | dev_err(dev, "failed to add request to vq\n" ); |
96 | mutex_unlock(lock: &vgpio->lock); |
97 | goto out; |
98 | } |
99 | |
100 | virtqueue_kick(vq: vgpio->request_vq); |
101 | mutex_unlock(lock: &vgpio->lock); |
102 | |
103 | wait_for_completion(&line->completion); |
104 | |
105 | if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) { |
106 | dev_err(dev, "GPIO request failed: %d\n" , gpio); |
107 | ret = -EINVAL; |
108 | goto out; |
109 | } |
110 | |
111 | if (unlikely(line->rxlen != rxlen)) { |
112 | dev_err(dev, "GPIO operation returned incorrect len (%u : %u)\n" , |
113 | rxlen, line->rxlen); |
114 | ret = -EINVAL; |
115 | goto out; |
116 | } |
117 | |
118 | if (rxvalue) |
119 | *rxvalue = res->value; |
120 | |
121 | out: |
122 | mutex_unlock(lock: &line->lock); |
123 | return ret; |
124 | } |
125 | |
126 | static int virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio, |
127 | u8 txvalue, u8 *rxvalue) |
128 | { |
129 | struct virtio_gpio_line *line = &vgpio->lines[gpio]; |
130 | struct virtio_gpio_response *res = &line->res; |
131 | |
132 | return _virtio_gpio_req(vgpio, type, gpio, txvalue, rxvalue, response: res, |
133 | rxlen: sizeof(*res)); |
134 | } |
135 | |
136 | static void virtio_gpio_free(struct gpio_chip *gc, unsigned int gpio) |
137 | { |
138 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
139 | |
140 | virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio, |
141 | VIRTIO_GPIO_DIRECTION_NONE, NULL); |
142 | } |
143 | |
144 | static int virtio_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) |
145 | { |
146 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
147 | u8 direction; |
148 | int ret; |
149 | |
150 | ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_DIRECTION, gpio, txvalue: 0, |
151 | rxvalue: &direction); |
152 | if (ret) |
153 | return ret; |
154 | |
155 | switch (direction) { |
156 | case VIRTIO_GPIO_DIRECTION_IN: |
157 | return GPIO_LINE_DIRECTION_IN; |
158 | case VIRTIO_GPIO_DIRECTION_OUT: |
159 | return GPIO_LINE_DIRECTION_OUT; |
160 | default: |
161 | return -EINVAL; |
162 | } |
163 | } |
164 | |
165 | static int virtio_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio) |
166 | { |
167 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
168 | |
169 | return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio, |
170 | VIRTIO_GPIO_DIRECTION_IN, NULL); |
171 | } |
172 | |
173 | static int virtio_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, |
174 | int value) |
175 | { |
176 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
177 | int ret; |
178 | |
179 | ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, txvalue: value, NULL); |
180 | if (ret) |
181 | return ret; |
182 | |
183 | return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio, |
184 | VIRTIO_GPIO_DIRECTION_OUT, NULL); |
185 | } |
186 | |
187 | static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio) |
188 | { |
189 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
190 | u8 value; |
191 | int ret; |
192 | |
193 | ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_VALUE, gpio, txvalue: 0, rxvalue: &value); |
194 | return ret ? ret : value; |
195 | } |
196 | |
197 | static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value) |
198 | { |
199 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
200 | |
201 | virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, txvalue: value, NULL); |
202 | } |
203 | |
204 | /* Interrupt handling */ |
205 | static void virtio_gpio_irq_prepare(struct virtio_gpio *vgpio, u16 gpio) |
206 | { |
207 | struct vgpio_irq_line *irq_line = &vgpio->irq_lines[gpio]; |
208 | struct virtio_gpio_irq_request *ireq = &irq_line->ireq; |
209 | struct virtio_gpio_irq_response *ires = &irq_line->ires; |
210 | struct scatterlist *sgs[2], req_sg, res_sg; |
211 | int ret; |
212 | |
213 | if (WARN_ON(irq_line->queued || irq_line->masked || irq_line->disabled)) |
214 | return; |
215 | |
216 | ireq->gpio = cpu_to_le16(gpio); |
217 | sg_init_one(&req_sg, ireq, sizeof(*ireq)); |
218 | sg_init_one(&res_sg, ires, sizeof(*ires)); |
219 | sgs[0] = &req_sg; |
220 | sgs[1] = &res_sg; |
221 | |
222 | ret = virtqueue_add_sgs(vq: vgpio->event_vq, sgs, out_sgs: 1, in_sgs: 1, data: irq_line, GFP_ATOMIC); |
223 | if (ret) { |
224 | dev_err(&vgpio->vdev->dev, "failed to add request to eventq\n" ); |
225 | return; |
226 | } |
227 | |
228 | irq_line->queued = true; |
229 | virtqueue_kick(vq: vgpio->event_vq); |
230 | } |
231 | |
232 | static void virtio_gpio_irq_enable(struct irq_data *d) |
233 | { |
234 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
235 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
236 | struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq]; |
237 | |
238 | raw_spin_lock(&vgpio->eventq_lock); |
239 | irq_line->disabled = false; |
240 | irq_line->masked = false; |
241 | irq_line->queue_pending = true; |
242 | raw_spin_unlock(&vgpio->eventq_lock); |
243 | |
244 | irq_line->update_pending = true; |
245 | } |
246 | |
247 | static void virtio_gpio_irq_disable(struct irq_data *d) |
248 | { |
249 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
250 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
251 | struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq]; |
252 | |
253 | raw_spin_lock(&vgpio->eventq_lock); |
254 | irq_line->disabled = true; |
255 | irq_line->masked = true; |
256 | irq_line->queue_pending = false; |
257 | raw_spin_unlock(&vgpio->eventq_lock); |
258 | |
259 | irq_line->update_pending = true; |
260 | } |
261 | |
262 | static void virtio_gpio_irq_mask(struct irq_data *d) |
263 | { |
264 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
265 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
266 | struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq]; |
267 | |
268 | raw_spin_lock(&vgpio->eventq_lock); |
269 | irq_line->masked = true; |
270 | raw_spin_unlock(&vgpio->eventq_lock); |
271 | } |
272 | |
273 | static void virtio_gpio_irq_unmask(struct irq_data *d) |
274 | { |
275 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
276 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
277 | struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq]; |
278 | |
279 | raw_spin_lock(&vgpio->eventq_lock); |
280 | irq_line->masked = false; |
281 | |
282 | /* Queue the buffer unconditionally on unmask */ |
283 | virtio_gpio_irq_prepare(vgpio, gpio: d->hwirq); |
284 | raw_spin_unlock(&vgpio->eventq_lock); |
285 | } |
286 | |
287 | static int virtio_gpio_irq_set_type(struct irq_data *d, unsigned int type) |
288 | { |
289 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
290 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
291 | struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq]; |
292 | |
293 | switch (type) { |
294 | case IRQ_TYPE_EDGE_RISING: |
295 | type = VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING; |
296 | break; |
297 | case IRQ_TYPE_EDGE_FALLING: |
298 | type = VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING; |
299 | break; |
300 | case IRQ_TYPE_EDGE_BOTH: |
301 | type = VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH; |
302 | break; |
303 | case IRQ_TYPE_LEVEL_LOW: |
304 | type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW; |
305 | break; |
306 | case IRQ_TYPE_LEVEL_HIGH: |
307 | type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH; |
308 | break; |
309 | default: |
310 | dev_err(&vgpio->vdev->dev, "unsupported irq type: %u\n" , type); |
311 | return -EINVAL; |
312 | } |
313 | |
314 | irq_line->type = type; |
315 | irq_line->update_pending = true; |
316 | |
317 | return 0; |
318 | } |
319 | |
320 | static void virtio_gpio_irq_bus_lock(struct irq_data *d) |
321 | { |
322 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
323 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
324 | |
325 | mutex_lock(&vgpio->irq_lock); |
326 | } |
327 | |
328 | static void virtio_gpio_irq_bus_sync_unlock(struct irq_data *d) |
329 | { |
330 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
331 | struct virtio_gpio *vgpio = gpiochip_get_data(gc); |
332 | struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq]; |
333 | u8 type = irq_line->disabled ? VIRTIO_GPIO_IRQ_TYPE_NONE : irq_line->type; |
334 | unsigned long flags; |
335 | |
336 | if (irq_line->update_pending) { |
337 | irq_line->update_pending = false; |
338 | virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_IRQ_TYPE, gpio: d->hwirq, txvalue: type, |
339 | NULL); |
340 | |
341 | /* Queue the buffer only after interrupt is enabled */ |
342 | raw_spin_lock_irqsave(&vgpio->eventq_lock, flags); |
343 | if (irq_line->queue_pending) { |
344 | irq_line->queue_pending = false; |
345 | virtio_gpio_irq_prepare(vgpio, gpio: d->hwirq); |
346 | } |
347 | raw_spin_unlock_irqrestore(&vgpio->eventq_lock, flags); |
348 | } |
349 | |
350 | mutex_unlock(lock: &vgpio->irq_lock); |
351 | } |
352 | |
353 | static bool ignore_irq(struct virtio_gpio *vgpio, int gpio, |
354 | struct vgpio_irq_line *irq_line) |
355 | { |
356 | bool ignore = false; |
357 | |
358 | raw_spin_lock(&vgpio->eventq_lock); |
359 | irq_line->queued = false; |
360 | |
361 | /* Interrupt is disabled currently */ |
362 | if (irq_line->masked || irq_line->disabled) { |
363 | ignore = true; |
364 | goto unlock; |
365 | } |
366 | |
367 | /* |
368 | * Buffer is returned as the interrupt was disabled earlier, but is |
369 | * enabled again now. Requeue the buffers. |
370 | */ |
371 | if (irq_line->ires.status == VIRTIO_GPIO_IRQ_STATUS_INVALID) { |
372 | virtio_gpio_irq_prepare(vgpio, gpio); |
373 | ignore = true; |
374 | goto unlock; |
375 | } |
376 | |
377 | if (WARN_ON(irq_line->ires.status != VIRTIO_GPIO_IRQ_STATUS_VALID)) |
378 | ignore = true; |
379 | |
380 | unlock: |
381 | raw_spin_unlock(&vgpio->eventq_lock); |
382 | |
383 | return ignore; |
384 | } |
385 | |
386 | static void virtio_gpio_event_vq(struct virtqueue *vq) |
387 | { |
388 | struct virtio_gpio *vgpio = vq->vdev->priv; |
389 | struct device *dev = &vgpio->vdev->dev; |
390 | struct vgpio_irq_line *irq_line; |
391 | int gpio, ret; |
392 | unsigned int len; |
393 | |
394 | while (true) { |
395 | irq_line = virtqueue_get_buf(vq: vgpio->event_vq, len: &len); |
396 | if (!irq_line) |
397 | break; |
398 | |
399 | if (len != sizeof(irq_line->ires)) { |
400 | dev_err(dev, "irq with incorrect length (%u : %u)\n" , |
401 | len, (unsigned int)sizeof(irq_line->ires)); |
402 | continue; |
403 | } |
404 | |
405 | /* |
406 | * Find GPIO line number from the offset of irq_line within the |
407 | * irq_lines block. We can also get GPIO number from |
408 | * irq-request, but better not to rely on a buffer returned by |
409 | * remote. |
410 | */ |
411 | gpio = irq_line - vgpio->irq_lines; |
412 | WARN_ON(gpio >= vgpio->gc.ngpio); |
413 | |
414 | if (unlikely(ignore_irq(vgpio, gpio, irq_line))) |
415 | continue; |
416 | |
417 | ret = generic_handle_domain_irq(domain: vgpio->gc.irq.domain, hwirq: gpio); |
418 | if (ret) |
419 | dev_err(dev, "failed to handle interrupt: %d\n" , ret); |
420 | } |
421 | } |
422 | |
423 | static void virtio_gpio_request_vq(struct virtqueue *vq) |
424 | { |
425 | struct virtio_gpio_line *line; |
426 | unsigned int len; |
427 | |
428 | do { |
429 | line = virtqueue_get_buf(vq, len: &len); |
430 | if (!line) |
431 | return; |
432 | |
433 | line->rxlen = len; |
434 | complete(&line->completion); |
435 | } while (1); |
436 | } |
437 | |
438 | static void virtio_gpio_free_vqs(struct virtio_device *vdev) |
439 | { |
440 | virtio_reset_device(dev: vdev); |
441 | vdev->config->del_vqs(vdev); |
442 | } |
443 | |
444 | static int virtio_gpio_alloc_vqs(struct virtio_gpio *vgpio, |
445 | struct virtio_device *vdev) |
446 | { |
447 | struct virtqueue_info vqs_info[] = { |
448 | { "requestq" , virtio_gpio_request_vq }, |
449 | { "eventq" , virtio_gpio_event_vq }, |
450 | }; |
451 | struct virtqueue *vqs[2] = { NULL, NULL }; |
452 | int ret; |
453 | |
454 | ret = virtio_find_vqs(vdev, nvqs: vgpio->irq_lines ? 2 : 1, vqs, |
455 | vqs_info, NULL); |
456 | if (ret) { |
457 | dev_err(&vdev->dev, "failed to find vqs: %d\n" , ret); |
458 | return ret; |
459 | } |
460 | |
461 | if (!vqs[0]) { |
462 | dev_err(&vdev->dev, "failed to find requestq vq\n" ); |
463 | goto out; |
464 | } |
465 | vgpio->request_vq = vqs[0]; |
466 | |
467 | if (vgpio->irq_lines && !vqs[1]) { |
468 | dev_err(&vdev->dev, "failed to find eventq vq\n" ); |
469 | goto out; |
470 | } |
471 | vgpio->event_vq = vqs[1]; |
472 | |
473 | return 0; |
474 | |
475 | out: |
476 | if (vqs[0] || vqs[1]) |
477 | virtio_gpio_free_vqs(vdev); |
478 | |
479 | return -ENODEV; |
480 | } |
481 | |
482 | static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio, |
483 | u32 gpio_names_size, u16 ngpio) |
484 | { |
485 | struct virtio_gpio_response_get_names *res; |
486 | struct device *dev = &vgpio->vdev->dev; |
487 | u8 *gpio_names, *str; |
488 | const char **names; |
489 | int i, ret, len; |
490 | |
491 | if (!gpio_names_size) |
492 | return NULL; |
493 | |
494 | len = sizeof(*res) + gpio_names_size; |
495 | res = devm_kzalloc(dev, size: len, GFP_KERNEL); |
496 | if (!res) |
497 | return NULL; |
498 | gpio_names = res->value; |
499 | |
500 | ret = _virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_NAMES, gpio: 0, txvalue: 0, NULL, |
501 | response: res, rxlen: len); |
502 | if (ret) { |
503 | dev_err(dev, "Failed to get GPIO names: %d\n" , ret); |
504 | return NULL; |
505 | } |
506 | |
507 | names = devm_kcalloc(dev, n: ngpio, size: sizeof(*names), GFP_KERNEL); |
508 | if (!names) |
509 | return NULL; |
510 | |
511 | /* NULL terminate the string instead of checking it */ |
512 | gpio_names[gpio_names_size - 1] = '\0'; |
513 | |
514 | for (i = 0, str = gpio_names; i < ngpio; i++) { |
515 | names[i] = str; |
516 | str += strlen(str) + 1; /* zero-length strings are allowed */ |
517 | |
518 | if (str > gpio_names + gpio_names_size) { |
519 | dev_err(dev, "gpio_names block is too short (%d)\n" , i); |
520 | return NULL; |
521 | } |
522 | } |
523 | |
524 | return names; |
525 | } |
526 | |
527 | static int virtio_gpio_probe(struct virtio_device *vdev) |
528 | { |
529 | struct virtio_gpio_config config; |
530 | struct device *dev = &vdev->dev; |
531 | struct virtio_gpio *vgpio; |
532 | struct irq_chip *gpio_irq_chip; |
533 | u32 gpio_names_size; |
534 | u16 ngpio; |
535 | int ret, i; |
536 | |
537 | vgpio = devm_kzalloc(dev, size: sizeof(*vgpio), GFP_KERNEL); |
538 | if (!vgpio) |
539 | return -ENOMEM; |
540 | |
541 | /* Read configuration */ |
542 | virtio_cread_bytes(vdev, offset: 0, buf: &config, len: sizeof(config)); |
543 | gpio_names_size = le32_to_cpu(config.gpio_names_size); |
544 | ngpio = le16_to_cpu(config.ngpio); |
545 | if (!ngpio) { |
546 | dev_err(dev, "Number of GPIOs can't be zero\n" ); |
547 | return -EINVAL; |
548 | } |
549 | |
550 | vgpio->lines = devm_kcalloc(dev, n: ngpio, size: sizeof(*vgpio->lines), GFP_KERNEL); |
551 | if (!vgpio->lines) |
552 | return -ENOMEM; |
553 | |
554 | for (i = 0; i < ngpio; i++) { |
555 | mutex_init(&vgpio->lines[i].lock); |
556 | init_completion(x: &vgpio->lines[i].completion); |
557 | } |
558 | |
559 | mutex_init(&vgpio->lock); |
560 | vdev->priv = vgpio; |
561 | |
562 | vgpio->vdev = vdev; |
563 | vgpio->gc.free = virtio_gpio_free; |
564 | vgpio->gc.get_direction = virtio_gpio_get_direction; |
565 | vgpio->gc.direction_input = virtio_gpio_direction_input; |
566 | vgpio->gc.direction_output = virtio_gpio_direction_output; |
567 | vgpio->gc.get = virtio_gpio_get; |
568 | vgpio->gc.set = virtio_gpio_set; |
569 | vgpio->gc.ngpio = ngpio; |
570 | vgpio->gc.base = -1; /* Allocate base dynamically */ |
571 | vgpio->gc.label = dev_name(dev); |
572 | vgpio->gc.parent = dev; |
573 | vgpio->gc.owner = THIS_MODULE; |
574 | vgpio->gc.can_sleep = true; |
575 | |
576 | /* Interrupt support */ |
577 | if (virtio_has_feature(vdev, VIRTIO_GPIO_F_IRQ)) { |
578 | vgpio->irq_lines = devm_kcalloc(dev, n: ngpio, size: sizeof(*vgpio->irq_lines), GFP_KERNEL); |
579 | if (!vgpio->irq_lines) |
580 | return -ENOMEM; |
581 | |
582 | gpio_irq_chip = devm_kzalloc(dev, size: sizeof(*gpio_irq_chip), GFP_KERNEL); |
583 | if (!gpio_irq_chip) |
584 | return -ENOMEM; |
585 | |
586 | gpio_irq_chip->name = dev_name(dev); |
587 | gpio_irq_chip->irq_enable = virtio_gpio_irq_enable; |
588 | gpio_irq_chip->irq_disable = virtio_gpio_irq_disable; |
589 | gpio_irq_chip->irq_mask = virtio_gpio_irq_mask; |
590 | gpio_irq_chip->irq_unmask = virtio_gpio_irq_unmask; |
591 | gpio_irq_chip->irq_set_type = virtio_gpio_irq_set_type; |
592 | gpio_irq_chip->irq_bus_lock = virtio_gpio_irq_bus_lock; |
593 | gpio_irq_chip->irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock; |
594 | |
595 | /* The event comes from the outside so no parent handler */ |
596 | vgpio->gc.irq.parent_handler = NULL; |
597 | vgpio->gc.irq.num_parents = 0; |
598 | vgpio->gc.irq.parents = NULL; |
599 | vgpio->gc.irq.default_type = IRQ_TYPE_NONE; |
600 | vgpio->gc.irq.handler = handle_level_irq; |
601 | vgpio->gc.irq.chip = gpio_irq_chip; |
602 | |
603 | for (i = 0; i < ngpio; i++) { |
604 | vgpio->irq_lines[i].type = VIRTIO_GPIO_IRQ_TYPE_NONE; |
605 | vgpio->irq_lines[i].disabled = true; |
606 | vgpio->irq_lines[i].masked = true; |
607 | } |
608 | |
609 | mutex_init(&vgpio->irq_lock); |
610 | raw_spin_lock_init(&vgpio->eventq_lock); |
611 | } |
612 | |
613 | ret = virtio_gpio_alloc_vqs(vgpio, vdev); |
614 | if (ret) |
615 | return ret; |
616 | |
617 | /* Mark the device ready to perform operations from within probe() */ |
618 | virtio_device_ready(dev: vdev); |
619 | |
620 | vgpio->gc.names = virtio_gpio_get_names(vgpio, gpio_names_size, ngpio); |
621 | |
622 | ret = gpiochip_add_data(&vgpio->gc, vgpio); |
623 | if (ret) { |
624 | virtio_gpio_free_vqs(vdev); |
625 | dev_err(dev, "Failed to add virtio-gpio controller\n" ); |
626 | } |
627 | |
628 | return ret; |
629 | } |
630 | |
631 | static void virtio_gpio_remove(struct virtio_device *vdev) |
632 | { |
633 | struct virtio_gpio *vgpio = vdev->priv; |
634 | |
635 | gpiochip_remove(gc: &vgpio->gc); |
636 | virtio_gpio_free_vqs(vdev); |
637 | } |
638 | |
639 | static const struct virtio_device_id id_table[] = { |
640 | { VIRTIO_ID_GPIO, VIRTIO_DEV_ANY_ID }, |
641 | {}, |
642 | }; |
643 | MODULE_DEVICE_TABLE(virtio, id_table); |
644 | |
645 | static const unsigned int features[] = { |
646 | VIRTIO_GPIO_F_IRQ, |
647 | }; |
648 | |
649 | static struct virtio_driver virtio_gpio_driver = { |
650 | .feature_table = features, |
651 | .feature_table_size = ARRAY_SIZE(features), |
652 | .id_table = id_table, |
653 | .probe = virtio_gpio_probe, |
654 | .remove = virtio_gpio_remove, |
655 | .driver = { |
656 | .name = KBUILD_MODNAME, |
657 | }, |
658 | }; |
659 | module_virtio_driver(virtio_gpio_driver); |
660 | |
661 | MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>" ); |
662 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>" ); |
663 | MODULE_DESCRIPTION("VirtIO GPIO driver" ); |
664 | MODULE_LICENSE("GPL" ); |
665 | |