1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VIRTIO_CONFIG_H
3#define _LINUX_VIRTIO_CONFIG_H
4
5#include <linux/err.h>
6#include <linux/bug.h>
7#include <linux/virtio.h>
8#include <linux/virtio_byteorder.h>
9#include <linux/compiler_types.h>
10#include <uapi/linux/virtio_config.h>
11
12struct irq_affinity;
13
14struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17};
18
19typedef void vq_callback_t(struct virtqueue *);
20
21/**
22 * struct virtqueue_info - Info for a virtqueue passed to find_vqs().
23 * @name: virtqueue description. Used mainly for debugging, NULL for
24 * a virtqueue unused by the driver.
25 * @callback: A callback to invoke on a used buffer notification.
26 * NULL for a virtqueue that does not need a callback.
27 * @ctx: A flag to indicate to maintain an extra context per virtqueue.
28 */
29struct virtqueue_info {
30 const char *name;
31 vq_callback_t *callback;
32 bool ctx;
33};
34
35/**
36 * struct virtio_config_ops - operations for configuring a virtio device
37 * Note: Do not assume that a transport implements all of the operations
38 * getting/setting a value as a simple read/write! Generally speaking,
39 * any of @get/@set, @get_status/@set_status, or @get_features/
40 * @finalize_features are NOT safe to be called from an atomic
41 * context.
42 * @get: read the value of a configuration field
43 * vdev: the virtio_device
44 * offset: the offset of the configuration field
45 * buf: the buffer to write the field value into.
46 * len: the length of the buffer
47 * @set: write the value of a configuration field
48 * vdev: the virtio_device
49 * offset: the offset of the configuration field
50 * buf: the buffer to read the field value from.
51 * len: the length of the buffer
52 * @generation: config generation counter (optional)
53 * vdev: the virtio_device
54 * Returns the config generation counter
55 * @get_status: read the status byte
56 * vdev: the virtio_device
57 * Returns the status byte
58 * @set_status: write the status byte
59 * vdev: the virtio_device
60 * status: the new status byte
61 * @reset: reset the device
62 * vdev: the virtio device
63 * After this, status and feature negotiation must be done again
64 * Device must not be reset from its vq/config callbacks, or in
65 * parallel with being added/removed.
66 * @find_vqs: find virtqueues and instantiate them.
67 * vdev: the virtio_device
68 * nvqs: the number of virtqueues to find
69 * vqs: on success, includes new virtqueues
70 * vqs_info: array of virtqueue info structures
71 * Returns 0 on success or error status
72 * @del_vqs: free virtqueues found by find_vqs().
73 * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
74 * The function guarantees that all memory operations on the
75 * queue before it are visible to the vring_interrupt() that is
76 * called after it.
77 * vdev: the virtio_device
78 * @get_features: get the array of feature bits for this device.
79 * vdev: the virtio_device
80 * Returns the first 64 feature bits (all we currently need).
81 * @finalize_features: confirm what device features we'll be using.
82 * vdev: the virtio_device
83 * This sends the driver feature bits to the device: it can change
84 * the dev->feature bits if it wants.
85 * Note that despite the name this can be called any number of
86 * times.
87 * Returns 0 on success or error status
88 * @bus_name: return the bus name associated with the device (optional)
89 * vdev: the virtio_device
90 * This returns a pointer to the bus name a la pci_name from which
91 * the caller can then copy.
92 * @set_vq_affinity: set the affinity for a virtqueue (optional).
93 * @get_vq_affinity: get the affinity for a virtqueue (optional).
94 * @get_shm_region: get a shared memory region based on the index.
95 * @disable_vq_and_reset: reset a queue individually (optional).
96 * vq: the virtqueue
97 * Returns 0 on success or error status
98 * disable_vq_and_reset will guarantee that the callbacks are disabled and
99 * synchronized.
100 * Except for the callback, the caller should guarantee that the vring is
101 * not accessed by any functions of virtqueue.
102 * @enable_vq_after_reset: enable a reset queue
103 * vq: the virtqueue
104 * Returns 0 on success or error status
105 * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
106 * set.
107 */
108struct virtio_config_ops {
109 void (*get)(struct virtio_device *vdev, unsigned offset,
110 void *buf, unsigned len);
111 void (*set)(struct virtio_device *vdev, unsigned offset,
112 const void *buf, unsigned len);
113 u32 (*generation)(struct virtio_device *vdev);
114 u8 (*get_status)(struct virtio_device *vdev);
115 void (*set_status)(struct virtio_device *vdev, u8 status);
116 void (*reset)(struct virtio_device *vdev);
117 int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs,
118 struct virtqueue *vqs[],
119 struct virtqueue_info vqs_info[],
120 struct irq_affinity *desc);
121 void (*del_vqs)(struct virtio_device *);
122 void (*synchronize_cbs)(struct virtio_device *);
123 u64 (*get_features)(struct virtio_device *vdev);
124 int (*finalize_features)(struct virtio_device *vdev);
125 const char *(*bus_name)(struct virtio_device *vdev);
126 int (*set_vq_affinity)(struct virtqueue *vq,
127 const struct cpumask *cpu_mask);
128 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
129 int index);
130 bool (*get_shm_region)(struct virtio_device *vdev,
131 struct virtio_shm_region *region, u8 id);
132 int (*disable_vq_and_reset)(struct virtqueue *vq);
133 int (*enable_vq_after_reset)(struct virtqueue *vq);
134};
135
136/* If driver didn't advertise the feature, it will never appear. */
137void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
138 unsigned int fbit);
139
140/**
141 * __virtio_test_bit - helper to test feature bits. For use by transports.
142 * Devices should normally use virtio_has_feature,
143 * which includes more checks.
144 * @vdev: the device
145 * @fbit: the feature bit
146 */
147static inline bool __virtio_test_bit(const struct virtio_device *vdev,
148 unsigned int fbit)
149{
150 /* Did you forget to fix assumptions on max features? */
151 if (__builtin_constant_p(fbit))
152 BUILD_BUG_ON(fbit >= 64);
153 else
154 BUG_ON(fbit >= 64);
155
156 return vdev->features & BIT_ULL(fbit);
157}
158
159/**
160 * __virtio_set_bit - helper to set feature bits. For use by transports.
161 * @vdev: the device
162 * @fbit: the feature bit
163 */
164static inline void __virtio_set_bit(struct virtio_device *vdev,
165 unsigned int fbit)
166{
167 /* Did you forget to fix assumptions on max features? */
168 if (__builtin_constant_p(fbit))
169 BUILD_BUG_ON(fbit >= 64);
170 else
171 BUG_ON(fbit >= 64);
172
173 vdev->features |= BIT_ULL(fbit);
174}
175
176/**
177 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
178 * @vdev: the device
179 * @fbit: the feature bit
180 */
181static inline void __virtio_clear_bit(struct virtio_device *vdev,
182 unsigned int fbit)
183{
184 /* Did you forget to fix assumptions on max features? */
185 if (__builtin_constant_p(fbit))
186 BUILD_BUG_ON(fbit >= 64);
187 else
188 BUG_ON(fbit >= 64);
189
190 vdev->features &= ~BIT_ULL(fbit);
191}
192
193/**
194 * virtio_has_feature - helper to determine if this device has this feature.
195 * @vdev: the device
196 * @fbit: the feature bit
197 */
198static inline bool virtio_has_feature(const struct virtio_device *vdev,
199 unsigned int fbit)
200{
201 if (fbit < VIRTIO_TRANSPORT_F_START)
202 virtio_check_driver_offered_feature(vdev, fbit);
203
204 return __virtio_test_bit(vdev, fbit);
205}
206
207/**
208 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
209 * @vdev: the device
210 */
211static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
212{
213 /*
214 * Note the reverse polarity of the quirk feature (compared to most
215 * other features), this is for compatibility with legacy systems.
216 */
217 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
218}
219
220static inline
221int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
222 struct virtqueue *vqs[],
223 struct virtqueue_info vqs_info[],
224 struct irq_affinity *desc)
225{
226 return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc);
227}
228
229static inline
230struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
231 vq_callback_t *c, const char *n)
232{
233 struct virtqueue_info vqs_info[] = {
234 { n, c },
235 };
236 struct virtqueue *vq;
237 int err = virtio_find_vqs(vdev, nvqs: 1, vqs: &vq, vqs_info, NULL);
238
239 if (err < 0)
240 return ERR_PTR(error: err);
241 return vq;
242}
243
244/**
245 * virtio_synchronize_cbs - synchronize with virtqueue callbacks
246 * @dev: the virtio device
247 */
248static inline
249void virtio_synchronize_cbs(struct virtio_device *dev)
250{
251 if (dev->config->synchronize_cbs) {
252 dev->config->synchronize_cbs(dev);
253 } else {
254 /*
255 * A best effort fallback to synchronize with
256 * interrupts, preemption and softirq disabled
257 * regions. See comment above synchronize_rcu().
258 */
259 synchronize_rcu();
260 }
261}
262
263/**
264 * virtio_device_ready - enable vq use in probe function
265 * @dev: the virtio device
266 *
267 * Driver must call this to use vqs in the probe function.
268 *
269 * Note: vqs are enabled automatically after probe returns.
270 */
271static inline
272void virtio_device_ready(struct virtio_device *dev)
273{
274 unsigned status = dev->config->get_status(dev);
275
276 WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
277
278#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
279 /*
280 * The virtio_synchronize_cbs() makes sure vring_interrupt()
281 * will see the driver specific setup if it sees vq->broken
282 * as false (even if the notifications come before DRIVER_OK).
283 */
284 virtio_synchronize_cbs(dev);
285 __virtio_unbreak_device(dev);
286#endif
287 /*
288 * The transport should ensure the visibility of vq->broken
289 * before setting DRIVER_OK. See the comments for the transport
290 * specific set_status() method.
291 *
292 * A well behaved device will only notify a virtqueue after
293 * DRIVER_OK, this means the device should "see" the coherenct
294 * memory write that set vq->broken as false which is done by
295 * the driver when it sees DRIVER_OK, then the following
296 * driver's vring_interrupt() will see vq->broken as false so
297 * we won't lose any notification.
298 */
299 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
300}
301
302static inline
303const char *virtio_bus_name(struct virtio_device *vdev)
304{
305 if (!vdev->config->bus_name)
306 return "virtio";
307 return vdev->config->bus_name(vdev);
308}
309
310/**
311 * virtqueue_set_affinity - setting affinity for a virtqueue
312 * @vq: the virtqueue
313 * @cpu_mask: the cpu mask
314 *
315 * Pay attention the function are best-effort: the affinity hint may not be set
316 * due to config support, irq type and sharing.
317 *
318 */
319static inline
320int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
321{
322 struct virtio_device *vdev = vq->vdev;
323 if (vdev->config->set_vq_affinity)
324 return vdev->config->set_vq_affinity(vq, cpu_mask);
325 return 0;
326}
327
328static inline
329bool virtio_get_shm_region(struct virtio_device *vdev,
330 struct virtio_shm_region *region, u8 id)
331{
332 if (!region->len)
333 return false;
334 if (!vdev->config->get_shm_region)
335 return false;
336 return vdev->config->get_shm_region(vdev, region, id);
337}
338
339static inline bool virtio_is_little_endian(struct virtio_device *vdev)
340{
341 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
342 virtio_legacy_is_little_endian();
343}
344
345/* Memory accessors */
346static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
347{
348 return __virtio16_to_cpu(little_endian: virtio_is_little_endian(vdev), val);
349}
350
351static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
352{
353 return __cpu_to_virtio16(little_endian: virtio_is_little_endian(vdev), val);
354}
355
356static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
357{
358 return __virtio32_to_cpu(little_endian: virtio_is_little_endian(vdev), val);
359}
360
361static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
362{
363 return __cpu_to_virtio32(little_endian: virtio_is_little_endian(vdev), val);
364}
365
366static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
367{
368 return __virtio64_to_cpu(little_endian: virtio_is_little_endian(vdev), val);
369}
370
371static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
372{
373 return __cpu_to_virtio64(little_endian: virtio_is_little_endian(vdev), val);
374}
375
376#define virtio_to_cpu(vdev, x) \
377 _Generic((x), \
378 __u8: (x), \
379 __virtio16: virtio16_to_cpu((vdev), (x)), \
380 __virtio32: virtio32_to_cpu((vdev), (x)), \
381 __virtio64: virtio64_to_cpu((vdev), (x)) \
382 )
383
384#define cpu_to_virtio(vdev, x, m) \
385 _Generic((m), \
386 __u8: (x), \
387 __virtio16: cpu_to_virtio16((vdev), (x)), \
388 __virtio32: cpu_to_virtio32((vdev), (x)), \
389 __virtio64: cpu_to_virtio64((vdev), (x)) \
390 )
391
392#define __virtio_native_type(structname, member) \
393 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
394
395/* Config space accessors. */
396#define virtio_cread(vdev, structname, member, ptr) \
397 do { \
398 typeof(((structname*)0)->member) virtio_cread_v; \
399 \
400 might_sleep(); \
401 /* Sanity check: must match the member's type */ \
402 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
403 \
404 switch (sizeof(virtio_cread_v)) { \
405 case 1: \
406 case 2: \
407 case 4: \
408 vdev->config->get((vdev), \
409 offsetof(structname, member), \
410 &virtio_cread_v, \
411 sizeof(virtio_cread_v)); \
412 break; \
413 default: \
414 __virtio_cread_many((vdev), \
415 offsetof(structname, member), \
416 &virtio_cread_v, \
417 1, \
418 sizeof(virtio_cread_v)); \
419 break; \
420 } \
421 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
422 } while(0)
423
424/* Config space accessors. */
425#define virtio_cwrite(vdev, structname, member, ptr) \
426 do { \
427 typeof(((structname*)0)->member) virtio_cwrite_v = \
428 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
429 \
430 might_sleep(); \
431 /* Sanity check: must match the member's type */ \
432 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
433 \
434 vdev->config->set((vdev), offsetof(structname, member), \
435 &virtio_cwrite_v, \
436 sizeof(virtio_cwrite_v)); \
437 } while(0)
438
439/*
440 * Nothing virtio-specific about these, but let's worry about generalizing
441 * these later.
442 */
443#define virtio_le_to_cpu(x) \
444 _Generic((x), \
445 __u8: (u8)(x), \
446 __le16: (u16)le16_to_cpu(x), \
447 __le32: (u32)le32_to_cpu(x), \
448 __le64: (u64)le64_to_cpu(x) \
449 )
450
451#define virtio_cpu_to_le(x, m) \
452 _Generic((m), \
453 __u8: (x), \
454 __le16: cpu_to_le16(x), \
455 __le32: cpu_to_le32(x), \
456 __le64: cpu_to_le64(x) \
457 )
458
459/* LE (e.g. modern) Config space accessors. */
460#define virtio_cread_le(vdev, structname, member, ptr) \
461 do { \
462 typeof(((structname*)0)->member) virtio_cread_v; \
463 \
464 might_sleep(); \
465 /* Sanity check: must match the member's type */ \
466 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
467 \
468 switch (sizeof(virtio_cread_v)) { \
469 case 1: \
470 case 2: \
471 case 4: \
472 vdev->config->get((vdev), \
473 offsetof(structname, member), \
474 &virtio_cread_v, \
475 sizeof(virtio_cread_v)); \
476 break; \
477 default: \
478 __virtio_cread_many((vdev), \
479 offsetof(structname, member), \
480 &virtio_cread_v, \
481 1, \
482 sizeof(virtio_cread_v)); \
483 break; \
484 } \
485 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
486 } while(0)
487
488#define virtio_cwrite_le(vdev, structname, member, ptr) \
489 do { \
490 typeof(((structname*)0)->member) virtio_cwrite_v = \
491 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
492 \
493 might_sleep(); \
494 /* Sanity check: must match the member's type */ \
495 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
496 \
497 vdev->config->set((vdev), offsetof(structname, member), \
498 &virtio_cwrite_v, \
499 sizeof(virtio_cwrite_v)); \
500 } while(0)
501
502
503/* Read @count fields, @bytes each. */
504static inline void __virtio_cread_many(struct virtio_device *vdev,
505 unsigned int offset,
506 void *buf, size_t count, size_t bytes)
507{
508 u32 old, gen = vdev->config->generation ?
509 vdev->config->generation(vdev) : 0;
510 int i;
511
512 might_sleep();
513 do {
514 old = gen;
515
516 for (i = 0; i < count; i++)
517 vdev->config->get(vdev, offset + bytes * i,
518 buf + i * bytes, bytes);
519
520 gen = vdev->config->generation ?
521 vdev->config->generation(vdev) : 0;
522 } while (gen != old);
523}
524
525static inline void virtio_cread_bytes(struct virtio_device *vdev,
526 unsigned int offset,
527 void *buf, size_t len)
528{
529 __virtio_cread_many(vdev, offset, buf, count: len, bytes: 1);
530}
531
532static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
533{
534 u8 ret;
535
536 might_sleep();
537 vdev->config->get(vdev, offset, &ret, sizeof(ret));
538 return ret;
539}
540
541static inline void virtio_cwrite8(struct virtio_device *vdev,
542 unsigned int offset, u8 val)
543{
544 might_sleep();
545 vdev->config->set(vdev, offset, &val, sizeof(val));
546}
547
548static inline u16 virtio_cread16(struct virtio_device *vdev,
549 unsigned int offset)
550{
551 __virtio16 ret;
552
553 might_sleep();
554 vdev->config->get(vdev, offset, &ret, sizeof(ret));
555 return virtio16_to_cpu(vdev, val: ret);
556}
557
558static inline void virtio_cwrite16(struct virtio_device *vdev,
559 unsigned int offset, u16 val)
560{
561 __virtio16 v;
562
563 might_sleep();
564 v = cpu_to_virtio16(vdev, val);
565 vdev->config->set(vdev, offset, &v, sizeof(v));
566}
567
568static inline u32 virtio_cread32(struct virtio_device *vdev,
569 unsigned int offset)
570{
571 __virtio32 ret;
572
573 might_sleep();
574 vdev->config->get(vdev, offset, &ret, sizeof(ret));
575 return virtio32_to_cpu(vdev, val: ret);
576}
577
578static inline void virtio_cwrite32(struct virtio_device *vdev,
579 unsigned int offset, u32 val)
580{
581 __virtio32 v;
582
583 might_sleep();
584 v = cpu_to_virtio32(vdev, val);
585 vdev->config->set(vdev, offset, &v, sizeof(v));
586}
587
588static inline u64 virtio_cread64(struct virtio_device *vdev,
589 unsigned int offset)
590{
591 __virtio64 ret;
592
593 __virtio_cread_many(vdev, offset, buf: &ret, count: 1, bytes: sizeof(ret));
594 return virtio64_to_cpu(vdev, val: ret);
595}
596
597static inline void virtio_cwrite64(struct virtio_device *vdev,
598 unsigned int offset, u64 val)
599{
600 __virtio64 v;
601
602 might_sleep();
603 v = cpu_to_virtio64(vdev, val);
604 vdev->config->set(vdev, offset, &v, sizeof(v));
605}
606
607/* Conditional config space accessors. */
608#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
609 ({ \
610 int _r = 0; \
611 if (!virtio_has_feature(vdev, fbit)) \
612 _r = -ENOENT; \
613 else \
614 virtio_cread((vdev), structname, member, ptr); \
615 _r; \
616 })
617
618/* Conditional config space accessors. */
619#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
620 ({ \
621 int _r = 0; \
622 if (!virtio_has_feature(vdev, fbit)) \
623 _r = -ENOENT; \
624 else \
625 virtio_cread_le((vdev), structname, member, ptr); \
626 _r; \
627 })
628
629#endif /* _LINUX_VIRTIO_CONFIG_H */
630

source code of linux/include/linux/virtio_config.h