1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * vDPA bridge driver for modern virtio-pci device |
4 | * |
5 | * Copyright (c) 2020, Red Hat Inc. All rights reserved. |
6 | * Author: Jason Wang <jasowang@redhat.com> |
7 | * |
8 | * Based on virtio_pci_modern.c. |
9 | */ |
10 | |
11 | #include <linux/interrupt.h> |
12 | #include <linux/module.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/vdpa.h> |
15 | #include <linux/virtio.h> |
16 | #include <linux/virtio_config.h> |
17 | #include <linux/virtio_ring.h> |
18 | #include <linux/virtio_pci.h> |
19 | #include <linux/virtio_pci_modern.h> |
20 | #include <uapi/linux/vdpa.h> |
21 | |
22 | #define VP_VDPA_QUEUE_MAX 256 |
23 | #define VP_VDPA_DRIVER_NAME "vp_vdpa" |
24 | #define VP_VDPA_NAME_SIZE 256 |
25 | |
26 | struct vp_vring { |
27 | void __iomem *notify; |
28 | char msix_name[VP_VDPA_NAME_SIZE]; |
29 | struct vdpa_callback cb; |
30 | resource_size_t notify_pa; |
31 | int irq; |
32 | }; |
33 | |
34 | struct vp_vdpa { |
35 | struct vdpa_device vdpa; |
36 | struct virtio_pci_modern_device *mdev; |
37 | struct vp_vring *vring; |
38 | struct vdpa_callback config_cb; |
39 | u64 device_features; |
40 | char msix_name[VP_VDPA_NAME_SIZE]; |
41 | int config_irq; |
42 | int queues; |
43 | int vectors; |
44 | }; |
45 | |
46 | struct vp_vdpa_mgmtdev { |
47 | struct vdpa_mgmt_dev mgtdev; |
48 | struct virtio_pci_modern_device *mdev; |
49 | struct vp_vdpa *vp_vdpa; |
50 | }; |
51 | |
52 | static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa) |
53 | { |
54 | return container_of(vdpa, struct vp_vdpa, vdpa); |
55 | } |
56 | |
57 | static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) |
58 | { |
59 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
60 | |
61 | return vp_vdpa->mdev; |
62 | } |
63 | |
64 | static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa) |
65 | { |
66 | return vp_vdpa->mdev; |
67 | } |
68 | |
69 | static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa) |
70 | { |
71 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
72 | |
73 | return vp_vdpa->device_features; |
74 | } |
75 | |
76 | static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) |
77 | { |
78 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
79 | |
80 | vp_modern_set_features(mdev, features); |
81 | |
82 | return 0; |
83 | } |
84 | |
85 | static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa) |
86 | { |
87 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
88 | |
89 | return vp_modern_get_driver_features(mdev); |
90 | } |
91 | |
92 | static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) |
93 | { |
94 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
95 | |
96 | return vp_modern_get_status(mdev); |
97 | } |
98 | |
99 | static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx) |
100 | { |
101 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
102 | int irq = vp_vdpa->vring[idx].irq; |
103 | |
104 | if (irq == VIRTIO_MSI_NO_VECTOR) |
105 | return -EINVAL; |
106 | |
107 | return irq; |
108 | } |
109 | |
110 | static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) |
111 | { |
112 | struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); |
113 | struct pci_dev *pdev = mdev->pci_dev; |
114 | int i; |
115 | |
116 | for (i = 0; i < vp_vdpa->queues; i++) { |
117 | if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { |
118 | vp_modern_queue_vector(mdev, idx: i, VIRTIO_MSI_NO_VECTOR); |
119 | devm_free_irq(dev: &pdev->dev, irq: vp_vdpa->vring[i].irq, |
120 | dev_id: &vp_vdpa->vring[i]); |
121 | vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; |
122 | } |
123 | } |
124 | |
125 | if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { |
126 | vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR); |
127 | devm_free_irq(dev: &pdev->dev, irq: vp_vdpa->config_irq, dev_id: vp_vdpa); |
128 | vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; |
129 | } |
130 | |
131 | if (vp_vdpa->vectors) { |
132 | pci_free_irq_vectors(dev: pdev); |
133 | vp_vdpa->vectors = 0; |
134 | } |
135 | } |
136 | |
137 | static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg) |
138 | { |
139 | struct vp_vring *vring = arg; |
140 | |
141 | if (vring->cb.callback) |
142 | return vring->cb.callback(vring->cb.private); |
143 | |
144 | return IRQ_HANDLED; |
145 | } |
146 | |
147 | static irqreturn_t vp_vdpa_config_handler(int irq, void *arg) |
148 | { |
149 | struct vp_vdpa *vp_vdpa = arg; |
150 | |
151 | if (vp_vdpa->config_cb.callback) |
152 | return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private); |
153 | |
154 | return IRQ_HANDLED; |
155 | } |
156 | |
157 | static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) |
158 | { |
159 | struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); |
160 | struct pci_dev *pdev = mdev->pci_dev; |
161 | int i, ret, irq; |
162 | int queues = vp_vdpa->queues; |
163 | int vectors = queues + 1; |
164 | |
165 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: vectors, max_vecs: vectors, PCI_IRQ_MSIX); |
166 | if (ret != vectors) { |
167 | dev_err(&pdev->dev, |
168 | "vp_vdpa: fail to allocate irq vectors want %d but %d\n" , |
169 | vectors, ret); |
170 | return ret; |
171 | } |
172 | |
173 | vp_vdpa->vectors = vectors; |
174 | |
175 | for (i = 0; i < queues; i++) { |
176 | snprintf(buf: vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, |
177 | fmt: "vp-vdpa[%s]-%d\n" , pci_name(pdev), i); |
178 | irq = pci_irq_vector(dev: pdev, nr: i); |
179 | ret = devm_request_irq(dev: &pdev->dev, irq, |
180 | handler: vp_vdpa_vq_handler, |
181 | irqflags: 0, devname: vp_vdpa->vring[i].msix_name, |
182 | dev_id: &vp_vdpa->vring[i]); |
183 | if (ret) { |
184 | dev_err(&pdev->dev, |
185 | "vp_vdpa: fail to request irq for vq %d\n" , i); |
186 | goto err; |
187 | } |
188 | vp_modern_queue_vector(mdev, idx: i, vector: i); |
189 | vp_vdpa->vring[i].irq = irq; |
190 | } |
191 | |
192 | snprintf(buf: vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, fmt: "vp-vdpa[%s]-config\n" , |
193 | pci_name(pdev)); |
194 | irq = pci_irq_vector(dev: pdev, nr: queues); |
195 | ret = devm_request_irq(dev: &pdev->dev, irq, handler: vp_vdpa_config_handler, irqflags: 0, |
196 | devname: vp_vdpa->msix_name, dev_id: vp_vdpa); |
197 | if (ret) { |
198 | dev_err(&pdev->dev, |
199 | "vp_vdpa: fail to request irq for vq %d\n" , i); |
200 | goto err; |
201 | } |
202 | vp_modern_config_vector(mdev, vector: queues); |
203 | vp_vdpa->config_irq = irq; |
204 | |
205 | return 0; |
206 | err: |
207 | vp_vdpa_free_irq(vp_vdpa); |
208 | return ret; |
209 | } |
210 | |
211 | static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) |
212 | { |
213 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
214 | struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); |
215 | u8 s = vp_vdpa_get_status(vdpa); |
216 | |
217 | if (status & VIRTIO_CONFIG_S_DRIVER_OK && |
218 | !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { |
219 | vp_vdpa_request_irq(vp_vdpa); |
220 | } |
221 | |
222 | vp_modern_set_status(mdev, status); |
223 | } |
224 | |
225 | static int vp_vdpa_reset(struct vdpa_device *vdpa) |
226 | { |
227 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
228 | struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); |
229 | u8 s = vp_vdpa_get_status(vdpa); |
230 | |
231 | vp_modern_set_status(mdev, status: 0); |
232 | |
233 | if (s & VIRTIO_CONFIG_S_DRIVER_OK) |
234 | vp_vdpa_free_irq(vp_vdpa); |
235 | |
236 | return 0; |
237 | } |
238 | |
239 | static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa) |
240 | { |
241 | return VP_VDPA_QUEUE_MAX; |
242 | } |
243 | |
244 | static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, |
245 | struct vdpa_vq_state *state) |
246 | { |
247 | /* Note that this is not supported by virtio specification, so |
248 | * we return -EOPNOTSUPP here. This means we can't support live |
249 | * migration, vhost device start/stop. |
250 | */ |
251 | return -EOPNOTSUPP; |
252 | } |
253 | |
254 | static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa, |
255 | const struct vdpa_vq_state *state) |
256 | { |
257 | const struct vdpa_vq_state_split *split = &state->split; |
258 | |
259 | if (split->avail_index == 0) |
260 | return 0; |
261 | |
262 | return -EOPNOTSUPP; |
263 | } |
264 | |
265 | static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa, |
266 | const struct vdpa_vq_state *state) |
267 | { |
268 | const struct vdpa_vq_state_packed *packed = &state->packed; |
269 | |
270 | if (packed->last_avail_counter == 1 && |
271 | packed->last_avail_idx == 0 && |
272 | packed->last_used_counter == 1 && |
273 | packed->last_used_idx == 0) |
274 | return 0; |
275 | |
276 | return -EOPNOTSUPP; |
277 | } |
278 | |
279 | static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, |
280 | const struct vdpa_vq_state *state) |
281 | { |
282 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
283 | |
284 | /* Note that this is not supported by virtio specification. |
285 | * But if the state is by chance equal to the device initial |
286 | * state, we can let it go. |
287 | */ |
288 | if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) && |
289 | !vp_modern_get_queue_enable(mdev, idx: qid)) { |
290 | if (vp_modern_get_driver_features(mdev) & |
291 | BIT_ULL(VIRTIO_F_RING_PACKED)) |
292 | return vp_vdpa_set_vq_state_packed(vdpa, state); |
293 | else |
294 | return vp_vdpa_set_vq_state_split(vdpa, state); |
295 | } |
296 | |
297 | return -EOPNOTSUPP; |
298 | } |
299 | |
300 | static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, |
301 | struct vdpa_callback *cb) |
302 | { |
303 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
304 | |
305 | vp_vdpa->vring[qid].cb = *cb; |
306 | } |
307 | |
308 | static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa, |
309 | u16 qid, bool ready) |
310 | { |
311 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
312 | |
313 | vp_modern_set_queue_enable(mdev, idx: qid, enable: ready); |
314 | } |
315 | |
316 | static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) |
317 | { |
318 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
319 | |
320 | return vp_modern_get_queue_enable(mdev, idx: qid); |
321 | } |
322 | |
323 | static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, |
324 | u32 num) |
325 | { |
326 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
327 | |
328 | vp_modern_set_queue_size(mdev, idx: qid, size: num); |
329 | } |
330 | |
331 | static u16 vp_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid) |
332 | { |
333 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
334 | |
335 | return vp_modern_get_queue_size(mdev, idx: qid); |
336 | } |
337 | |
338 | static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, |
339 | u64 desc_area, u64 driver_area, |
340 | u64 device_area) |
341 | { |
342 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
343 | |
344 | vp_modern_queue_address(mdev, index: qid, desc_addr: desc_area, |
345 | driver_addr: driver_area, device_addr: device_area); |
346 | |
347 | return 0; |
348 | } |
349 | |
350 | static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) |
351 | { |
352 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
353 | |
354 | vp_iowrite16(value: qid, addr: vp_vdpa->vring[qid].notify); |
355 | } |
356 | |
357 | static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa) |
358 | { |
359 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
360 | |
361 | return vp_modern_generation(mdev); |
362 | } |
363 | |
364 | static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa) |
365 | { |
366 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
367 | |
368 | return mdev->id.device; |
369 | } |
370 | |
371 | static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa) |
372 | { |
373 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
374 | |
375 | return mdev->id.vendor; |
376 | } |
377 | |
378 | static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) |
379 | { |
380 | return PAGE_SIZE; |
381 | } |
382 | |
383 | static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) |
384 | { |
385 | struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); |
386 | |
387 | return mdev->device_len; |
388 | } |
389 | |
390 | static void vp_vdpa_get_config(struct vdpa_device *vdpa, |
391 | unsigned int offset, |
392 | void *buf, unsigned int len) |
393 | { |
394 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
395 | struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); |
396 | u8 old, new; |
397 | u8 *p; |
398 | int i; |
399 | |
400 | do { |
401 | old = vp_ioread8(addr: &mdev->common->config_generation); |
402 | p = buf; |
403 | for (i = 0; i < len; i++) |
404 | *p++ = vp_ioread8(addr: mdev->device + offset + i); |
405 | |
406 | new = vp_ioread8(addr: &mdev->common->config_generation); |
407 | } while (old != new); |
408 | } |
409 | |
410 | static void vp_vdpa_set_config(struct vdpa_device *vdpa, |
411 | unsigned int offset, const void *buf, |
412 | unsigned int len) |
413 | { |
414 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
415 | struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); |
416 | const u8 *p = buf; |
417 | int i; |
418 | |
419 | for (i = 0; i < len; i++) |
420 | vp_iowrite8(value: *p++, addr: mdev->device + offset + i); |
421 | } |
422 | |
423 | static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa, |
424 | struct vdpa_callback *cb) |
425 | { |
426 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
427 | |
428 | vp_vdpa->config_cb = *cb; |
429 | } |
430 | |
431 | static struct vdpa_notification_area |
432 | vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) |
433 | { |
434 | struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); |
435 | struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); |
436 | struct vdpa_notification_area notify; |
437 | |
438 | notify.addr = vp_vdpa->vring[qid].notify_pa; |
439 | notify.size = mdev->notify_offset_multiplier; |
440 | |
441 | return notify; |
442 | } |
443 | |
444 | static const struct vdpa_config_ops vp_vdpa_ops = { |
445 | .get_device_features = vp_vdpa_get_device_features, |
446 | .set_driver_features = vp_vdpa_set_driver_features, |
447 | .get_driver_features = vp_vdpa_get_driver_features, |
448 | .get_status = vp_vdpa_get_status, |
449 | .set_status = vp_vdpa_set_status, |
450 | .reset = vp_vdpa_reset, |
451 | .get_vq_num_max = vp_vdpa_get_vq_num_max, |
452 | .get_vq_state = vp_vdpa_get_vq_state, |
453 | .get_vq_notification = vp_vdpa_get_vq_notification, |
454 | .set_vq_state = vp_vdpa_set_vq_state, |
455 | .set_vq_cb = vp_vdpa_set_vq_cb, |
456 | .set_vq_ready = vp_vdpa_set_vq_ready, |
457 | .get_vq_ready = vp_vdpa_get_vq_ready, |
458 | .set_vq_num = vp_vdpa_set_vq_num, |
459 | .get_vq_size = vp_vdpa_get_vq_size, |
460 | .set_vq_address = vp_vdpa_set_vq_address, |
461 | .kick_vq = vp_vdpa_kick_vq, |
462 | .get_generation = vp_vdpa_get_generation, |
463 | .get_device_id = vp_vdpa_get_device_id, |
464 | .get_vendor_id = vp_vdpa_get_vendor_id, |
465 | .get_vq_align = vp_vdpa_get_vq_align, |
466 | .get_config_size = vp_vdpa_get_config_size, |
467 | .get_config = vp_vdpa_get_config, |
468 | .set_config = vp_vdpa_set_config, |
469 | .set_config_cb = vp_vdpa_set_config_cb, |
470 | .get_vq_irq = vp_vdpa_get_vq_irq, |
471 | }; |
472 | |
473 | static void vp_vdpa_free_irq_vectors(void *data) |
474 | { |
475 | pci_free_irq_vectors(dev: data); |
476 | } |
477 | |
478 | static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, |
479 | const struct vdpa_dev_set_config *add_config) |
480 | { |
481 | struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = |
482 | container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev); |
483 | |
484 | struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev; |
485 | struct pci_dev *pdev = mdev->pci_dev; |
486 | struct device *dev = &pdev->dev; |
487 | struct vp_vdpa *vp_vdpa = NULL; |
488 | u64 device_features; |
489 | int ret, i; |
490 | |
491 | vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, |
492 | dev, &vp_vdpa_ops, 1, 1, name, false); |
493 | |
494 | if (IS_ERR(ptr: vp_vdpa)) { |
495 | dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n" ); |
496 | return PTR_ERR(ptr: vp_vdpa); |
497 | } |
498 | |
499 | vp_vdpa_mgtdev->vp_vdpa = vp_vdpa; |
500 | |
501 | vp_vdpa->vdpa.dma_dev = &pdev->dev; |
502 | vp_vdpa->queues = vp_modern_get_num_queues(mdev); |
503 | vp_vdpa->mdev = mdev; |
504 | |
505 | device_features = vp_modern_get_features(mdev); |
506 | if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { |
507 | if (add_config->device_features & ~device_features) { |
508 | ret = -EINVAL; |
509 | dev_err(&pdev->dev, "Try to provision features " |
510 | "that are not supported by the device: " |
511 | "device_features 0x%llx provisioned 0x%llx\n" , |
512 | device_features, add_config->device_features); |
513 | goto err; |
514 | } |
515 | device_features = add_config->device_features; |
516 | } |
517 | vp_vdpa->device_features = device_features; |
518 | |
519 | ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev); |
520 | if (ret) { |
521 | dev_err(&pdev->dev, |
522 | "Failed for adding devres for freeing irq vectors\n" ); |
523 | goto err; |
524 | } |
525 | |
526 | vp_vdpa->vring = devm_kcalloc(dev: &pdev->dev, n: vp_vdpa->queues, |
527 | size: sizeof(*vp_vdpa->vring), |
528 | GFP_KERNEL); |
529 | if (!vp_vdpa->vring) { |
530 | ret = -ENOMEM; |
531 | dev_err(&pdev->dev, "Fail to allocate virtqueues\n" ); |
532 | goto err; |
533 | } |
534 | |
535 | for (i = 0; i < vp_vdpa->queues; i++) { |
536 | vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; |
537 | vp_vdpa->vring[i].notify = |
538 | vp_modern_map_vq_notify(mdev, index: i, |
539 | pa: &vp_vdpa->vring[i].notify_pa); |
540 | if (!vp_vdpa->vring[i].notify) { |
541 | ret = -EINVAL; |
542 | dev_warn(&pdev->dev, "Fail to map vq notify %d\n" , i); |
543 | goto err; |
544 | } |
545 | } |
546 | vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; |
547 | |
548 | vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev; |
549 | ret = _vdpa_register_device(vdev: &vp_vdpa->vdpa, nvqs: vp_vdpa->queues); |
550 | if (ret) { |
551 | dev_err(&pdev->dev, "Failed to register to vdpa bus\n" ); |
552 | goto err; |
553 | } |
554 | |
555 | return 0; |
556 | |
557 | err: |
558 | put_device(dev: &vp_vdpa->vdpa.dev); |
559 | return ret; |
560 | } |
561 | |
562 | static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, |
563 | struct vdpa_device *dev) |
564 | { |
565 | struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = |
566 | container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev); |
567 | |
568 | struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa; |
569 | |
570 | _vdpa_unregister_device(vdev: &vp_vdpa->vdpa); |
571 | vp_vdpa_mgtdev->vp_vdpa = NULL; |
572 | } |
573 | |
574 | static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = { |
575 | .dev_add = vp_vdpa_dev_add, |
576 | .dev_del = vp_vdpa_dev_del, |
577 | }; |
578 | |
579 | static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
580 | { |
581 | struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL; |
582 | struct vdpa_mgmt_dev *mgtdev; |
583 | struct device *dev = &pdev->dev; |
584 | struct virtio_pci_modern_device *mdev = NULL; |
585 | struct virtio_device_id *mdev_id = NULL; |
586 | int err; |
587 | |
588 | vp_vdpa_mgtdev = kzalloc(size: sizeof(*vp_vdpa_mgtdev), GFP_KERNEL); |
589 | if (!vp_vdpa_mgtdev) |
590 | return -ENOMEM; |
591 | |
592 | mgtdev = &vp_vdpa_mgtdev->mgtdev; |
593 | mgtdev->ops = &vp_vdpa_mdev_ops; |
594 | mgtdev->device = dev; |
595 | |
596 | mdev = kzalloc(size: sizeof(struct virtio_pci_modern_device), GFP_KERNEL); |
597 | if (!mdev) { |
598 | err = -ENOMEM; |
599 | goto mdev_err; |
600 | } |
601 | |
602 | mdev_id = kzalloc(size: sizeof(struct virtio_device_id), GFP_KERNEL); |
603 | if (!mdev_id) { |
604 | err = -ENOMEM; |
605 | goto mdev_id_err; |
606 | } |
607 | |
608 | vp_vdpa_mgtdev->mdev = mdev; |
609 | mdev->pci_dev = pdev; |
610 | |
611 | err = pcim_enable_device(pdev); |
612 | if (err) { |
613 | goto probe_err; |
614 | } |
615 | |
616 | err = vp_modern_probe(mdev); |
617 | if (err) { |
618 | dev_err(&pdev->dev, "Failed to probe modern PCI device\n" ); |
619 | goto probe_err; |
620 | } |
621 | |
622 | mdev_id->device = mdev->id.device; |
623 | mdev_id->vendor = mdev->id.vendor; |
624 | mgtdev->id_table = mdev_id; |
625 | mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev); |
626 | mgtdev->supported_features = vp_modern_get_features(mdev); |
627 | mgtdev->config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES); |
628 | pci_set_master(dev: pdev); |
629 | pci_set_drvdata(pdev, data: vp_vdpa_mgtdev); |
630 | |
631 | err = vdpa_mgmtdev_register(mdev: mgtdev); |
632 | if (err) { |
633 | dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n" ); |
634 | goto register_err; |
635 | } |
636 | |
637 | return 0; |
638 | |
639 | register_err: |
640 | vp_modern_remove(mdev: vp_vdpa_mgtdev->mdev); |
641 | probe_err: |
642 | kfree(objp: mdev_id); |
643 | mdev_id_err: |
644 | kfree(objp: mdev); |
645 | mdev_err: |
646 | kfree(objp: vp_vdpa_mgtdev); |
647 | return err; |
648 | } |
649 | |
650 | static void vp_vdpa_remove(struct pci_dev *pdev) |
651 | { |
652 | struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev); |
653 | struct virtio_pci_modern_device *mdev = NULL; |
654 | |
655 | mdev = vp_vdpa_mgtdev->mdev; |
656 | vdpa_mgmtdev_unregister(mdev: &vp_vdpa_mgtdev->mgtdev); |
657 | vp_modern_remove(mdev); |
658 | kfree(objp: vp_vdpa_mgtdev->mgtdev.id_table); |
659 | kfree(objp: mdev); |
660 | kfree(objp: vp_vdpa_mgtdev); |
661 | } |
662 | |
663 | static struct pci_driver vp_vdpa_driver = { |
664 | .name = "vp-vdpa" , |
665 | .id_table = NULL, /* only dynamic ids */ |
666 | .probe = vp_vdpa_probe, |
667 | .remove = vp_vdpa_remove, |
668 | }; |
669 | |
670 | module_pci_driver(vp_vdpa_driver); |
671 | |
672 | MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>" ); |
673 | MODULE_DESCRIPTION("vp-vdpa" ); |
674 | MODULE_LICENSE("GPL" ); |
675 | MODULE_VERSION("1" ); |
676 | |