1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_VDPA_H |
3 | #define _LINUX_VDPA_H |
4 | |
5 | #include <linux/kernel.h> |
6 | #include <linux/device.h> |
7 | #include <linux/interrupt.h> |
8 | #include <linux/vhost_iotlb.h> |
9 | #include <linux/virtio_net.h> |
10 | #include <linux/if_ether.h> |
11 | |
12 | /** |
13 | * struct vdpa_callback - vDPA callback definition. |
14 | * @callback: interrupt callback function |
15 | * @private: the data passed to the callback function |
16 | * @trigger: the eventfd for the callback (Optional). |
17 | * When it is set, the vDPA driver must guarantee that |
18 | * signaling it is functional equivalent to triggering |
19 | * the callback. Then vDPA parent can signal it directly |
20 | * instead of triggering the callback. |
21 | */ |
22 | struct vdpa_callback { |
23 | irqreturn_t (*callback)(void *data); |
24 | void *private; |
25 | struct eventfd_ctx *trigger; |
26 | }; |
27 | |
28 | /** |
29 | * struct vdpa_notification_area - vDPA notification area |
30 | * @addr: base address of the notification area |
31 | * @size: size of the notification area |
32 | */ |
33 | struct vdpa_notification_area { |
34 | resource_size_t addr; |
35 | resource_size_t size; |
36 | }; |
37 | |
38 | /** |
39 | * struct vdpa_vq_state_split - vDPA split virtqueue state |
40 | * @avail_index: available index |
41 | */ |
42 | struct vdpa_vq_state_split { |
43 | u16 avail_index; |
44 | }; |
45 | |
46 | /** |
47 | * struct vdpa_vq_state_packed - vDPA packed virtqueue state |
48 | * @last_avail_counter: last driver ring wrap counter observed by device |
49 | * @last_avail_idx: device available index |
50 | * @last_used_counter: device ring wrap counter |
51 | * @last_used_idx: used index |
52 | */ |
53 | struct vdpa_vq_state_packed { |
54 | u16 last_avail_counter:1; |
55 | u16 last_avail_idx:15; |
56 | u16 last_used_counter:1; |
57 | u16 last_used_idx:15; |
58 | }; |
59 | |
60 | struct vdpa_vq_state { |
61 | union { |
62 | struct vdpa_vq_state_split split; |
63 | struct vdpa_vq_state_packed packed; |
64 | }; |
65 | }; |
66 | |
67 | struct vdpa_mgmt_dev; |
68 | |
69 | /** |
70 | * struct vdpa_device - representation of a vDPA device |
71 | * @dev: underlying device |
72 | * @dma_dev: the actual device that is performing DMA |
73 | * @driver_override: driver name to force a match; do not set directly, |
74 | * because core frees it; use driver_set_override() to |
75 | * set or clear it. |
76 | * @config: the configuration ops for this device. |
77 | * @cf_lock: Protects get and set access to configuration layout. |
78 | * @index: device index |
79 | * @features_valid: were features initialized? for legacy guests |
80 | * @ngroups: the number of virtqueue groups |
81 | * @nas: the number of address spaces |
82 | * @use_va: indicate whether virtual address must be used by this device |
83 | * @nvqs: maximum number of supported virtqueues |
84 | * @mdev: management device pointer; caller must setup when registering device as part |
85 | * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). |
86 | */ |
87 | struct vdpa_device { |
88 | struct device dev; |
89 | struct device *dma_dev; |
90 | const char *driver_override; |
91 | const struct vdpa_config_ops *config; |
92 | struct rw_semaphore cf_lock; /* Protects get/set config */ |
93 | unsigned int index; |
94 | bool features_valid; |
95 | bool use_va; |
96 | u32 nvqs; |
97 | struct vdpa_mgmt_dev *mdev; |
98 | unsigned int ngroups; |
99 | unsigned int nas; |
100 | }; |
101 | |
102 | /** |
103 | * struct vdpa_iova_range - the IOVA range support by the device |
104 | * @first: start of the IOVA range |
105 | * @last: end of the IOVA range |
106 | */ |
107 | struct vdpa_iova_range { |
108 | u64 first; |
109 | u64 last; |
110 | }; |
111 | |
112 | struct vdpa_dev_set_config { |
113 | u64 device_features; |
114 | struct { |
115 | u8 mac[ETH_ALEN]; |
116 | u16 mtu; |
117 | u16 max_vq_pairs; |
118 | } net; |
119 | u64 mask; |
120 | }; |
121 | |
122 | /** |
123 | * struct vdpa_map_file - file area for device memory mapping |
124 | * @file: vma->vm_file for the mapping |
125 | * @offset: mapping offset in the vm_file |
126 | */ |
127 | struct vdpa_map_file { |
128 | struct file *file; |
129 | u64 offset; |
130 | }; |
131 | |
132 | /** |
133 | * struct vdpa_config_ops - operations for configuring a vDPA device. |
134 | * Note: vDPA device drivers are required to implement all of the |
135 | * operations unless it is mentioned to be optional in the following |
136 | * list. |
137 | * |
138 | * @set_vq_address: Set the address of virtqueue |
139 | * @vdev: vdpa device |
140 | * @idx: virtqueue index |
141 | * @desc_area: address of desc area |
142 | * @driver_area: address of driver area |
143 | * @device_area: address of device area |
144 | * Returns integer: success (0) or error (< 0) |
145 | * @set_vq_num: Set the size of virtqueue |
146 | * @vdev: vdpa device |
147 | * @idx: virtqueue index |
148 | * @num: the size of virtqueue |
149 | * @kick_vq: Kick the virtqueue |
150 | * @vdev: vdpa device |
151 | * @idx: virtqueue index |
152 | * @kick_vq_with_data: Kick the virtqueue and supply extra data |
153 | * (only if VIRTIO_F_NOTIFICATION_DATA is negotiated) |
154 | * @vdev: vdpa device |
155 | * @data for split virtqueue: |
156 | * 16 bits vqn and 16 bits next available index. |
157 | * @data for packed virtqueue: |
158 | * 16 bits vqn, 15 least significant bits of |
159 | * next available index and 1 bit next_wrap. |
160 | * @set_vq_cb: Set the interrupt callback function for |
161 | * a virtqueue |
162 | * @vdev: vdpa device |
163 | * @idx: virtqueue index |
164 | * @cb: virtio-vdev interrupt callback structure |
165 | * @set_vq_ready: Set ready status for a virtqueue |
166 | * @vdev: vdpa device |
167 | * @idx: virtqueue index |
168 | * @ready: ready (true) not ready(false) |
169 | * @get_vq_ready: Get ready status for a virtqueue |
170 | * @vdev: vdpa device |
171 | * @idx: virtqueue index |
172 | * Returns boolean: ready (true) or not (false) |
173 | * @set_vq_state: Set the state for a virtqueue |
174 | * @vdev: vdpa device |
175 | * @idx: virtqueue index |
176 | * @state: pointer to set virtqueue state (last_avail_idx) |
177 | * Returns integer: success (0) or error (< 0) |
178 | * @get_vq_state: Get the state for a virtqueue |
179 | * @vdev: vdpa device |
180 | * @idx: virtqueue index |
181 | * @state: pointer to returned state (last_avail_idx) |
182 | * @get_vendor_vq_stats: Get the vendor statistics of a device. |
183 | * @vdev: vdpa device |
184 | * @idx: virtqueue index |
185 | * @msg: socket buffer holding stats message |
186 | * @extack: extack for reporting error messages |
187 | * Returns integer: success (0) or error (< 0) |
188 | * @get_vq_notification: Get the notification area for a virtqueue (optional) |
189 | * @vdev: vdpa device |
190 | * @idx: virtqueue index |
191 | * Returns the notification area |
192 | * @get_vq_irq: Get the irq number of a virtqueue (optional, |
193 | * but must implemented if require vq irq offloading) |
194 | * @vdev: vdpa device |
195 | * @idx: virtqueue index |
196 | * Returns int: irq number of a virtqueue, |
197 | * negative number if no irq assigned. |
198 | * @get_vq_align: Get the virtqueue align requirement |
199 | * for the device |
200 | * @vdev: vdpa device |
201 | * Returns virtqueue algin requirement |
202 | * @get_vq_group: Get the group id for a specific |
203 | * virtqueue (optional) |
204 | * @vdev: vdpa device |
205 | * @idx: virtqueue index |
206 | * Returns u32: group id for this virtqueue |
207 | * @get_vq_desc_group: Get the group id for the descriptor table of |
208 | * a specific virtqueue (optional) |
209 | * @vdev: vdpa device |
210 | * @idx: virtqueue index |
211 | * Returns u32: group id for the descriptor table |
212 | * portion of this virtqueue. Could be different |
213 | * than the one from @get_vq_group, in which case |
214 | * the access to the descriptor table can be |
215 | * confined to a separate asid, isolating from |
216 | * the virtqueue's buffer address access. |
217 | * @get_device_features: Get virtio features supported by the device |
218 | * @vdev: vdpa device |
219 | * Returns the virtio features support by the |
220 | * device |
221 | * @get_backend_features: Get parent-specific backend features (optional) |
222 | * Returns the vdpa features supported by the |
223 | * device. |
224 | * @set_driver_features: Set virtio features supported by the driver |
225 | * @vdev: vdpa device |
226 | * @features: feature support by the driver |
227 | * Returns integer: success (0) or error (< 0) |
228 | * @get_driver_features: Get the virtio driver features in action |
229 | * @vdev: vdpa device |
230 | * Returns the virtio features accepted |
231 | * @set_config_cb: Set the config interrupt callback |
232 | * @vdev: vdpa device |
233 | * @cb: virtio-vdev interrupt callback structure |
234 | * @get_vq_num_max: Get the max size of virtqueue |
235 | * @vdev: vdpa device |
236 | * Returns u16: max size of virtqueue |
237 | * @get_vq_num_min: Get the min size of virtqueue (optional) |
238 | * @vdev: vdpa device |
239 | * Returns u16: min size of virtqueue |
240 | * @get_device_id: Get virtio device id |
241 | * @vdev: vdpa device |
242 | * Returns u32: virtio device id |
243 | * @get_vendor_id: Get id for the vendor that provides this device |
244 | * @vdev: vdpa device |
245 | * Returns u32: virtio vendor id |
246 | * @get_status: Get the device status |
247 | * @vdev: vdpa device |
248 | * Returns u8: virtio device status |
249 | * @set_status: Set the device status |
250 | * @vdev: vdpa device |
251 | * @status: virtio device status |
252 | * @reset: Reset device |
253 | * @vdev: vdpa device |
254 | * Returns integer: success (0) or error (< 0) |
255 | * @compat_reset: Reset device with compatibility quirks to |
256 | * accommodate older userspace. Only needed by |
257 | * parent driver which used to have bogus reset |
258 | * behaviour, and has to maintain such behaviour |
259 | * for compatibility with older userspace. |
260 | * Historically compliant driver only has to |
261 | * implement .reset, Historically non-compliant |
262 | * driver should implement both. |
263 | * @vdev: vdpa device |
264 | * @flags: compatibility quirks for reset |
265 | * Returns integer: success (0) or error (< 0) |
266 | * @suspend: Suspend the device (optional) |
267 | * @vdev: vdpa device |
268 | * Returns integer: success (0) or error (< 0) |
269 | * @resume: Resume the device (optional) |
270 | * @vdev: vdpa device |
271 | * Returns integer: success (0) or error (< 0) |
272 | * @get_config_size: Get the size of the configuration space includes |
273 | * fields that are conditional on feature bits. |
274 | * @vdev: vdpa device |
275 | * Returns size_t: configuration size |
276 | * @get_config: Read from device specific configuration space |
277 | * @vdev: vdpa device |
278 | * @offset: offset from the beginning of |
279 | * configuration space |
280 | * @buf: buffer used to read to |
281 | * @len: the length to read from |
282 | * configuration space |
283 | * @set_config: Write to device specific configuration space |
284 | * @vdev: vdpa device |
285 | * @offset: offset from the beginning of |
286 | * configuration space |
287 | * @buf: buffer used to write from |
288 | * @len: the length to write to |
289 | * configuration space |
290 | * @get_generation: Get device config generation (optional) |
291 | * @vdev: vdpa device |
292 | * Returns u32: device generation |
293 | * @get_iova_range: Get supported iova range (optional) |
294 | * @vdev: vdpa device |
295 | * Returns the iova range supported by |
296 | * the device. |
297 | * @set_vq_affinity: Set the affinity of virtqueue (optional) |
298 | * @vdev: vdpa device |
299 | * @idx: virtqueue index |
300 | * @cpu_mask: the affinity mask |
301 | * Returns integer: success (0) or error (< 0) |
302 | * @get_vq_affinity: Get the affinity of virtqueue (optional) |
303 | * @vdev: vdpa device |
304 | * @idx: virtqueue index |
305 | * Returns the affinity mask |
306 | * @set_group_asid: Set address space identifier for a |
307 | * virtqueue group (optional) |
308 | * @vdev: vdpa device |
309 | * @group: virtqueue group |
310 | * @asid: address space id for this group |
311 | * Returns integer: success (0) or error (< 0) |
312 | * @set_map: Set device memory mapping (optional) |
313 | * Needed for device that using device |
314 | * specific DMA translation (on-chip IOMMU) |
315 | * @vdev: vdpa device |
316 | * @asid: address space identifier |
317 | * @iotlb: vhost memory mapping to be |
318 | * used by the vDPA |
319 | * Returns integer: success (0) or error (< 0) |
320 | * @dma_map: Map an area of PA to IOVA (optional) |
321 | * Needed for device that using device |
322 | * specific DMA translation (on-chip IOMMU) |
323 | * and preferring incremental map. |
324 | * @vdev: vdpa device |
325 | * @asid: address space identifier |
326 | * @iova: iova to be mapped |
327 | * @size: size of the area |
328 | * @pa: physical address for the map |
329 | * @perm: device access permission (VHOST_MAP_XX) |
330 | * Returns integer: success (0) or error (< 0) |
331 | * @dma_unmap: Unmap an area of IOVA (optional but |
332 | * must be implemented with dma_map) |
333 | * Needed for device that using device |
334 | * specific DMA translation (on-chip IOMMU) |
335 | * and preferring incremental unmap. |
336 | * @vdev: vdpa device |
337 | * @asid: address space identifier |
338 | * @iova: iova to be unmapped |
339 | * @size: size of the area |
340 | * Returns integer: success (0) or error (< 0) |
341 | * @reset_map: Reset device memory mapping to the default |
342 | * state (optional) |
343 | * Needed for devices that are using device |
344 | * specific DMA translation and prefer mapping |
345 | * to be decoupled from the virtio life cycle, |
346 | * i.e. device .reset op does not reset mapping |
347 | * @vdev: vdpa device |
348 | * @asid: address space identifier |
349 | * Returns integer: success (0) or error (< 0) |
350 | * @get_vq_dma_dev: Get the dma device for a specific |
351 | * virtqueue (optional) |
352 | * @vdev: vdpa device |
353 | * @idx: virtqueue index |
354 | * Returns pointer to structure device or error (NULL) |
355 | * @bind_mm: Bind the device to a specific address space |
356 | * so the vDPA framework can use VA when this |
357 | * callback is implemented. (optional) |
358 | * @vdev: vdpa device |
359 | * @mm: address space to bind |
360 | * @unbind_mm: Unbind the device from the address space |
361 | * bound using the bind_mm callback. (optional) |
362 | * @vdev: vdpa device |
363 | * @free: Free resources that belongs to vDPA (optional) |
364 | * @vdev: vdpa device |
365 | */ |
366 | struct vdpa_config_ops { |
367 | /* Virtqueue ops */ |
368 | int (*set_vq_address)(struct vdpa_device *vdev, |
369 | u16 idx, u64 desc_area, u64 driver_area, |
370 | u64 device_area); |
371 | void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num); |
372 | void (*kick_vq)(struct vdpa_device *vdev, u16 idx); |
373 | void (*kick_vq_with_data)(struct vdpa_device *vdev, u32 data); |
374 | void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx, |
375 | struct vdpa_callback *cb); |
376 | void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); |
377 | bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); |
378 | int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, |
379 | const struct vdpa_vq_state *state); |
380 | int (*get_vq_state)(struct vdpa_device *vdev, u16 idx, |
381 | struct vdpa_vq_state *state); |
382 | int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx, |
383 | struct sk_buff *msg, |
384 | struct netlink_ext_ack *extack); |
385 | struct vdpa_notification_area |
386 | (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); |
387 | /* vq irq is not expected to be changed once DRIVER_OK is set */ |
388 | int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx); |
389 | |
390 | /* Device ops */ |
391 | u32 (*get_vq_align)(struct vdpa_device *vdev); |
392 | u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx); |
393 | u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx); |
394 | u64 (*get_device_features)(struct vdpa_device *vdev); |
395 | u64 (*get_backend_features)(const struct vdpa_device *vdev); |
396 | int (*set_driver_features)(struct vdpa_device *vdev, u64 features); |
397 | u64 (*get_driver_features)(struct vdpa_device *vdev); |
398 | void (*set_config_cb)(struct vdpa_device *vdev, |
399 | struct vdpa_callback *cb); |
400 | u16 (*get_vq_num_max)(struct vdpa_device *vdev); |
401 | u16 (*get_vq_num_min)(struct vdpa_device *vdev); |
402 | u32 (*get_device_id)(struct vdpa_device *vdev); |
403 | u32 (*get_vendor_id)(struct vdpa_device *vdev); |
404 | u8 (*get_status)(struct vdpa_device *vdev); |
405 | void (*set_status)(struct vdpa_device *vdev, u8 status); |
406 | int (*reset)(struct vdpa_device *vdev); |
407 | int (*compat_reset)(struct vdpa_device *vdev, u32 flags); |
408 | #define VDPA_RESET_F_CLEAN_MAP 1 |
409 | int (*suspend)(struct vdpa_device *vdev); |
410 | int (*resume)(struct vdpa_device *vdev); |
411 | size_t (*get_config_size)(struct vdpa_device *vdev); |
412 | void (*get_config)(struct vdpa_device *vdev, unsigned int offset, |
413 | void *buf, unsigned int len); |
414 | void (*set_config)(struct vdpa_device *vdev, unsigned int offset, |
415 | const void *buf, unsigned int len); |
416 | u32 (*get_generation)(struct vdpa_device *vdev); |
417 | struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev); |
418 | int (*set_vq_affinity)(struct vdpa_device *vdev, u16 idx, |
419 | const struct cpumask *cpu_mask); |
420 | const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev, |
421 | u16 idx); |
422 | |
423 | /* DMA ops */ |
424 | int (*set_map)(struct vdpa_device *vdev, unsigned int asid, |
425 | struct vhost_iotlb *iotlb); |
426 | int (*dma_map)(struct vdpa_device *vdev, unsigned int asid, |
427 | u64 iova, u64 size, u64 pa, u32 perm, void *opaque); |
428 | int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid, |
429 | u64 iova, u64 size); |
430 | int (*reset_map)(struct vdpa_device *vdev, unsigned int asid); |
431 | int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group, |
432 | unsigned int asid); |
433 | struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx); |
434 | int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm); |
435 | void (*unbind_mm)(struct vdpa_device *vdev); |
436 | |
437 | /* Free device resources */ |
438 | void (*free)(struct vdpa_device *vdev); |
439 | }; |
440 | |
441 | struct vdpa_device *__vdpa_alloc_device(struct device *parent, |
442 | const struct vdpa_config_ops *config, |
443 | unsigned int ngroups, unsigned int nas, |
444 | size_t size, const char *name, |
445 | bool use_va); |
446 | |
447 | /** |
448 | * vdpa_alloc_device - allocate and initilaize a vDPA device |
449 | * |
450 | * @dev_struct: the type of the parent structure |
451 | * @member: the name of struct vdpa_device within the @dev_struct |
452 | * @parent: the parent device |
453 | * @config: the bus operations that is supported by this device |
454 | * @ngroups: the number of virtqueue groups supported by this device |
455 | * @nas: the number of address spaces |
456 | * @name: name of the vdpa device |
457 | * @use_va: indicate whether virtual address must be used by this device |
458 | * |
459 | * Return allocated data structure or ERR_PTR upon error |
460 | */ |
461 | #define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \ |
462 | name, use_va) \ |
463 | container_of((__vdpa_alloc_device( \ |
464 | parent, config, ngroups, nas, \ |
465 | (sizeof(dev_struct) + \ |
466 | BUILD_BUG_ON_ZERO(offsetof( \ |
467 | dev_struct, member))), name, use_va)), \ |
468 | dev_struct, member) |
469 | |
470 | int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); |
471 | void vdpa_unregister_device(struct vdpa_device *vdev); |
472 | |
473 | int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); |
474 | void _vdpa_unregister_device(struct vdpa_device *vdev); |
475 | |
476 | /** |
477 | * struct vdpa_driver - operations for a vDPA driver |
478 | * @driver: underlying device driver |
479 | * @probe: the function to call when a device is found. Returns 0 or -errno. |
480 | * @remove: the function to call when a device is removed. |
481 | */ |
482 | struct vdpa_driver { |
483 | struct device_driver driver; |
484 | int (*probe)(struct vdpa_device *vdev); |
485 | void (*remove)(struct vdpa_device *vdev); |
486 | }; |
487 | |
488 | #define vdpa_register_driver(drv) \ |
489 | __vdpa_register_driver(drv, THIS_MODULE) |
490 | int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner); |
491 | void vdpa_unregister_driver(struct vdpa_driver *drv); |
492 | |
493 | #define module_vdpa_driver(__vdpa_driver) \ |
494 | module_driver(__vdpa_driver, vdpa_register_driver, \ |
495 | vdpa_unregister_driver) |
496 | |
497 | static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver) |
498 | { |
499 | return container_of(driver, struct vdpa_driver, driver); |
500 | } |
501 | |
502 | static inline struct vdpa_device *dev_to_vdpa(struct device *_dev) |
503 | { |
504 | return container_of(_dev, struct vdpa_device, dev); |
505 | } |
506 | |
507 | static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev) |
508 | { |
509 | return dev_get_drvdata(dev: &vdev->dev); |
510 | } |
511 | |
512 | static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data) |
513 | { |
514 | dev_set_drvdata(dev: &vdev->dev, data); |
515 | } |
516 | |
517 | static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) |
518 | { |
519 | return vdev->dma_dev; |
520 | } |
521 | |
522 | static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags) |
523 | { |
524 | const struct vdpa_config_ops *ops = vdev->config; |
525 | int ret; |
526 | |
527 | down_write(sem: &vdev->cf_lock); |
528 | vdev->features_valid = false; |
529 | if (ops->compat_reset && flags) |
530 | ret = ops->compat_reset(vdev, flags); |
531 | else |
532 | ret = ops->reset(vdev); |
533 | up_write(sem: &vdev->cf_lock); |
534 | return ret; |
535 | } |
536 | |
537 | static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features) |
538 | { |
539 | const struct vdpa_config_ops *ops = vdev->config; |
540 | int ret; |
541 | |
542 | vdev->features_valid = true; |
543 | ret = ops->set_driver_features(vdev, features); |
544 | |
545 | return ret; |
546 | } |
547 | |
548 | static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) |
549 | { |
550 | int ret; |
551 | |
552 | down_write(sem: &vdev->cf_lock); |
553 | ret = vdpa_set_features_unlocked(vdev, features); |
554 | up_write(sem: &vdev->cf_lock); |
555 | |
556 | return ret; |
557 | } |
558 | |
559 | void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, |
560 | void *buf, unsigned int len); |
561 | void vdpa_set_config(struct vdpa_device *dev, unsigned int offset, |
562 | const void *buf, unsigned int length); |
563 | void vdpa_set_status(struct vdpa_device *vdev, u8 status); |
564 | |
565 | /** |
566 | * struct vdpa_mgmtdev_ops - vdpa device ops |
567 | * @dev_add: Add a vdpa device using alloc and register |
568 | * @mdev: parent device to use for device addition |
569 | * @name: name of the new vdpa device |
570 | * @config: config attributes to apply to the device under creation |
571 | * Driver need to add a new device using _vdpa_register_device() |
572 | * after fully initializing the vdpa device. Driver must return 0 |
573 | * on success or appropriate error code. |
574 | * @dev_del: Remove a vdpa device using unregister |
575 | * @mdev: parent device to use for device removal |
576 | * @dev: vdpa device to remove |
577 | * Driver need to remove the specified device by calling |
578 | * _vdpa_unregister_device(). |
579 | */ |
580 | struct vdpa_mgmtdev_ops { |
581 | int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name, |
582 | const struct vdpa_dev_set_config *config); |
583 | void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); |
584 | }; |
585 | |
586 | /** |
587 | * struct vdpa_mgmt_dev - vdpa management device |
588 | * @device: Management parent device |
589 | * @ops: operations supported by management device |
590 | * @id_table: Pointer to device id table of supported ids |
591 | * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that |
592 | * management device support during dev_add callback |
593 | * @list: list entry |
594 | * @supported_features: features supported by device |
595 | * @max_supported_vqs: maximum number of virtqueues supported by device |
596 | */ |
597 | struct vdpa_mgmt_dev { |
598 | struct device *device; |
599 | const struct vdpa_mgmtdev_ops *ops; |
600 | struct virtio_device_id *id_table; |
601 | u64 config_attr_mask; |
602 | struct list_head list; |
603 | u64 supported_features; |
604 | u32 max_supported_vqs; |
605 | }; |
606 | |
607 | int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev); |
608 | void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev); |
609 | |
610 | #endif /* _LINUX_VDPA_H */ |
611 | |