1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * vDPA bridge driver for Alibaba ENI(Elastic Network Interface) |
4 | * |
5 | * Copyright (c) 2021, Alibaba Inc. All rights reserved. |
6 | * Author: Wu Zongyong <wuzongyong@linux.alibaba.com> |
7 | * |
8 | */ |
9 | |
10 | #include "linux/bits.h" |
11 | #include <linux/interrupt.h> |
12 | #include <linux/module.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/vdpa.h> |
15 | #include <linux/virtio.h> |
16 | #include <linux/virtio_config.h> |
17 | #include <linux/virtio_ring.h> |
18 | #include <linux/virtio_pci.h> |
19 | #include <linux/virtio_pci_legacy.h> |
20 | #include <uapi/linux/virtio_net.h> |
21 | |
22 | #define ENI_MSIX_NAME_SIZE 256 |
23 | |
24 | #define ENI_ERR(pdev, fmt, ...) \ |
25 | dev_err(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__) |
26 | #define ENI_DBG(pdev, fmt, ...) \ |
27 | dev_dbg(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__) |
28 | #define ENI_INFO(pdev, fmt, ...) \ |
29 | dev_info(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__) |
30 | |
31 | struct eni_vring { |
32 | void __iomem *notify; |
33 | char msix_name[ENI_MSIX_NAME_SIZE]; |
34 | struct vdpa_callback cb; |
35 | int irq; |
36 | }; |
37 | |
38 | struct eni_vdpa { |
39 | struct vdpa_device vdpa; |
40 | struct virtio_pci_legacy_device ldev; |
41 | struct eni_vring *vring; |
42 | struct vdpa_callback config_cb; |
43 | char msix_name[ENI_MSIX_NAME_SIZE]; |
44 | int config_irq; |
45 | int queues; |
46 | int vectors; |
47 | }; |
48 | |
49 | static struct eni_vdpa *vdpa_to_eni(struct vdpa_device *vdpa) |
50 | { |
51 | return container_of(vdpa, struct eni_vdpa, vdpa); |
52 | } |
53 | |
54 | static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa) |
55 | { |
56 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
57 | |
58 | return &eni_vdpa->ldev; |
59 | } |
60 | |
61 | static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa) |
62 | { |
63 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
64 | u64 features = vp_legacy_get_features(ldev); |
65 | |
66 | features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); |
67 | features |= BIT_ULL(VIRTIO_F_ORDER_PLATFORM); |
68 | |
69 | return features; |
70 | } |
71 | |
72 | static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) |
73 | { |
74 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
75 | |
76 | if (!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) && features) { |
77 | ENI_ERR(ldev->pci_dev, |
78 | "VIRTIO_NET_F_MRG_RXBUF is not negotiated\n" ); |
79 | return -EINVAL; |
80 | } |
81 | |
82 | vp_legacy_set_features(ldev, features: (u32)features); |
83 | |
84 | return 0; |
85 | } |
86 | |
87 | static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa) |
88 | { |
89 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
90 | |
91 | return vp_legacy_get_driver_features(ldev); |
92 | } |
93 | |
94 | static u8 eni_vdpa_get_status(struct vdpa_device *vdpa) |
95 | { |
96 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
97 | |
98 | return vp_legacy_get_status(ldev); |
99 | } |
100 | |
101 | static int eni_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx) |
102 | { |
103 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
104 | int irq = eni_vdpa->vring[idx].irq; |
105 | |
106 | if (irq == VIRTIO_MSI_NO_VECTOR) |
107 | return -EINVAL; |
108 | |
109 | return irq; |
110 | } |
111 | |
112 | static void eni_vdpa_free_irq(struct eni_vdpa *eni_vdpa) |
113 | { |
114 | struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev; |
115 | struct pci_dev *pdev = ldev->pci_dev; |
116 | int i; |
117 | |
118 | for (i = 0; i < eni_vdpa->queues; i++) { |
119 | if (eni_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { |
120 | vp_legacy_queue_vector(ldev, idx: i, VIRTIO_MSI_NO_VECTOR); |
121 | devm_free_irq(dev: &pdev->dev, irq: eni_vdpa->vring[i].irq, |
122 | dev_id: &eni_vdpa->vring[i]); |
123 | eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; |
124 | } |
125 | } |
126 | |
127 | if (eni_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { |
128 | vp_legacy_config_vector(ldev, VIRTIO_MSI_NO_VECTOR); |
129 | devm_free_irq(dev: &pdev->dev, irq: eni_vdpa->config_irq, dev_id: eni_vdpa); |
130 | eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; |
131 | } |
132 | |
133 | if (eni_vdpa->vectors) { |
134 | pci_free_irq_vectors(dev: pdev); |
135 | eni_vdpa->vectors = 0; |
136 | } |
137 | } |
138 | |
139 | static irqreturn_t eni_vdpa_vq_handler(int irq, void *arg) |
140 | { |
141 | struct eni_vring *vring = arg; |
142 | |
143 | if (vring->cb.callback) |
144 | return vring->cb.callback(vring->cb.private); |
145 | |
146 | return IRQ_HANDLED; |
147 | } |
148 | |
149 | static irqreturn_t eni_vdpa_config_handler(int irq, void *arg) |
150 | { |
151 | struct eni_vdpa *eni_vdpa = arg; |
152 | |
153 | if (eni_vdpa->config_cb.callback) |
154 | return eni_vdpa->config_cb.callback(eni_vdpa->config_cb.private); |
155 | |
156 | return IRQ_HANDLED; |
157 | } |
158 | |
159 | static int eni_vdpa_request_irq(struct eni_vdpa *eni_vdpa) |
160 | { |
161 | struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev; |
162 | struct pci_dev *pdev = ldev->pci_dev; |
163 | int i, ret, irq; |
164 | int queues = eni_vdpa->queues; |
165 | int vectors = queues + 1; |
166 | |
167 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: vectors, max_vecs: vectors, PCI_IRQ_MSIX); |
168 | if (ret != vectors) { |
169 | ENI_ERR(pdev, |
170 | "failed to allocate irq vectors want %d but %d\n" , |
171 | vectors, ret); |
172 | return ret; |
173 | } |
174 | |
175 | eni_vdpa->vectors = vectors; |
176 | |
177 | for (i = 0; i < queues; i++) { |
178 | snprintf(buf: eni_vdpa->vring[i].msix_name, ENI_MSIX_NAME_SIZE, |
179 | fmt: "eni-vdpa[%s]-%d\n" , pci_name(pdev), i); |
180 | irq = pci_irq_vector(dev: pdev, nr: i); |
181 | ret = devm_request_irq(dev: &pdev->dev, irq, |
182 | handler: eni_vdpa_vq_handler, |
183 | irqflags: 0, devname: eni_vdpa->vring[i].msix_name, |
184 | dev_id: &eni_vdpa->vring[i]); |
185 | if (ret) { |
186 | ENI_ERR(pdev, "failed to request irq for vq %d\n" , i); |
187 | goto err; |
188 | } |
189 | vp_legacy_queue_vector(ldev, idx: i, vector: i); |
190 | eni_vdpa->vring[i].irq = irq; |
191 | } |
192 | |
193 | snprintf(buf: eni_vdpa->msix_name, ENI_MSIX_NAME_SIZE, fmt: "eni-vdpa[%s]-config\n" , |
194 | pci_name(pdev)); |
195 | irq = pci_irq_vector(dev: pdev, nr: queues); |
196 | ret = devm_request_irq(dev: &pdev->dev, irq, handler: eni_vdpa_config_handler, irqflags: 0, |
197 | devname: eni_vdpa->msix_name, dev_id: eni_vdpa); |
198 | if (ret) { |
199 | ENI_ERR(pdev, "failed to request irq for config vq %d\n" , i); |
200 | goto err; |
201 | } |
202 | vp_legacy_config_vector(ldev, vector: queues); |
203 | eni_vdpa->config_irq = irq; |
204 | |
205 | return 0; |
206 | err: |
207 | eni_vdpa_free_irq(eni_vdpa); |
208 | return ret; |
209 | } |
210 | |
211 | static void eni_vdpa_set_status(struct vdpa_device *vdpa, u8 status) |
212 | { |
213 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
214 | struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev; |
215 | u8 s = eni_vdpa_get_status(vdpa); |
216 | |
217 | if (status & VIRTIO_CONFIG_S_DRIVER_OK && |
218 | !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { |
219 | eni_vdpa_request_irq(eni_vdpa); |
220 | } |
221 | |
222 | vp_legacy_set_status(ldev, status); |
223 | |
224 | if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) && |
225 | (s & VIRTIO_CONFIG_S_DRIVER_OK)) |
226 | eni_vdpa_free_irq(eni_vdpa); |
227 | } |
228 | |
229 | static int eni_vdpa_reset(struct vdpa_device *vdpa) |
230 | { |
231 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
232 | struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev; |
233 | u8 s = eni_vdpa_get_status(vdpa); |
234 | |
235 | vp_legacy_set_status(ldev, status: 0); |
236 | |
237 | if (s & VIRTIO_CONFIG_S_DRIVER_OK) |
238 | eni_vdpa_free_irq(eni_vdpa); |
239 | |
240 | return 0; |
241 | } |
242 | |
243 | static u16 eni_vdpa_get_vq_num_max(struct vdpa_device *vdpa) |
244 | { |
245 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
246 | |
247 | return vp_legacy_get_queue_size(ldev, idx: 0); |
248 | } |
249 | |
250 | static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa) |
251 | { |
252 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
253 | |
254 | return vp_legacy_get_queue_size(ldev, idx: 0); |
255 | } |
256 | |
257 | static u16 eni_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid) |
258 | { |
259 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
260 | |
261 | return vp_legacy_get_queue_size(ldev, idx: qid); |
262 | } |
263 | |
264 | static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, |
265 | struct vdpa_vq_state *state) |
266 | { |
267 | return -EOPNOTSUPP; |
268 | } |
269 | |
270 | static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, |
271 | const struct vdpa_vq_state *state) |
272 | { |
273 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
274 | const struct vdpa_vq_state_split *split = &state->split; |
275 | |
276 | /* ENI is build upon virtio-pci specfication which not support |
277 | * to set state of virtqueue. But if the state is equal to the |
278 | * device initial state by chance, we can let it go. |
279 | */ |
280 | if (!vp_legacy_get_queue_enable(ldev, idx: qid) |
281 | && split->avail_index == 0) |
282 | return 0; |
283 | |
284 | return -EOPNOTSUPP; |
285 | } |
286 | |
287 | |
288 | static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, |
289 | struct vdpa_callback *cb) |
290 | { |
291 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
292 | |
293 | eni_vdpa->vring[qid].cb = *cb; |
294 | } |
295 | |
296 | static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid, |
297 | bool ready) |
298 | { |
299 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
300 | |
301 | /* ENI is a legacy virtio-pci device. This is not supported |
302 | * by specification. But we can disable virtqueue by setting |
303 | * address to 0. |
304 | */ |
305 | if (!ready) |
306 | vp_legacy_set_queue_address(ldev, index: qid, queue_pfn: 0); |
307 | } |
308 | |
309 | static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) |
310 | { |
311 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
312 | |
313 | return vp_legacy_get_queue_enable(ldev, idx: qid); |
314 | } |
315 | |
316 | static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, |
317 | u32 num) |
318 | { |
319 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
320 | struct pci_dev *pdev = ldev->pci_dev; |
321 | u16 n = vp_legacy_get_queue_size(ldev, idx: qid); |
322 | |
323 | /* ENI is a legacy virtio-pci device which not allow to change |
324 | * virtqueue size. Just report a error if someone tries to |
325 | * change it. |
326 | */ |
327 | if (num != n) |
328 | ENI_ERR(pdev, |
329 | "not support to set vq %u fixed num %u to %u\n" , |
330 | qid, n, num); |
331 | } |
332 | |
333 | static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, |
334 | u64 desc_area, u64 driver_area, |
335 | u64 device_area) |
336 | { |
337 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
338 | u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; |
339 | |
340 | vp_legacy_set_queue_address(ldev, index: qid, queue_pfn: pfn); |
341 | |
342 | return 0; |
343 | } |
344 | |
345 | static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) |
346 | { |
347 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
348 | |
349 | iowrite16(qid, eni_vdpa->vring[qid].notify); |
350 | } |
351 | |
352 | static u32 eni_vdpa_get_device_id(struct vdpa_device *vdpa) |
353 | { |
354 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
355 | |
356 | return ldev->id.device; |
357 | } |
358 | |
359 | static u32 eni_vdpa_get_vendor_id(struct vdpa_device *vdpa) |
360 | { |
361 | struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa); |
362 | |
363 | return ldev->id.vendor; |
364 | } |
365 | |
366 | static u32 eni_vdpa_get_vq_align(struct vdpa_device *vdpa) |
367 | { |
368 | return VIRTIO_PCI_VRING_ALIGN; |
369 | } |
370 | |
371 | static size_t eni_vdpa_get_config_size(struct vdpa_device *vdpa) |
372 | { |
373 | return sizeof(struct virtio_net_config); |
374 | } |
375 | |
376 | |
377 | static void eni_vdpa_get_config(struct vdpa_device *vdpa, |
378 | unsigned int offset, |
379 | void *buf, unsigned int len) |
380 | { |
381 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
382 | struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev; |
383 | void __iomem *ioaddr = ldev->ioaddr + |
384 | VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) + |
385 | offset; |
386 | u8 *p = buf; |
387 | int i; |
388 | |
389 | for (i = 0; i < len; i++) |
390 | *p++ = ioread8(ioaddr + i); |
391 | } |
392 | |
393 | static void eni_vdpa_set_config(struct vdpa_device *vdpa, |
394 | unsigned int offset, const void *buf, |
395 | unsigned int len) |
396 | { |
397 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
398 | struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev; |
399 | void __iomem *ioaddr = ldev->ioaddr + |
400 | VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) + |
401 | offset; |
402 | const u8 *p = buf; |
403 | int i; |
404 | |
405 | for (i = 0; i < len; i++) |
406 | iowrite8(*p++, ioaddr + i); |
407 | } |
408 | |
409 | static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa, |
410 | struct vdpa_callback *cb) |
411 | { |
412 | struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa); |
413 | |
414 | eni_vdpa->config_cb = *cb; |
415 | } |
416 | |
417 | static const struct vdpa_config_ops eni_vdpa_ops = { |
418 | .get_device_features = eni_vdpa_get_device_features, |
419 | .set_driver_features = eni_vdpa_set_driver_features, |
420 | .get_driver_features = eni_vdpa_get_driver_features, |
421 | .get_status = eni_vdpa_get_status, |
422 | .set_status = eni_vdpa_set_status, |
423 | .reset = eni_vdpa_reset, |
424 | .get_vq_num_max = eni_vdpa_get_vq_num_max, |
425 | .get_vq_num_min = eni_vdpa_get_vq_num_min, |
426 | .get_vq_size = eni_vdpa_get_vq_size, |
427 | .get_vq_state = eni_vdpa_get_vq_state, |
428 | .set_vq_state = eni_vdpa_set_vq_state, |
429 | .set_vq_cb = eni_vdpa_set_vq_cb, |
430 | .set_vq_ready = eni_vdpa_set_vq_ready, |
431 | .get_vq_ready = eni_vdpa_get_vq_ready, |
432 | .set_vq_num = eni_vdpa_set_vq_num, |
433 | .set_vq_address = eni_vdpa_set_vq_address, |
434 | .kick_vq = eni_vdpa_kick_vq, |
435 | .get_device_id = eni_vdpa_get_device_id, |
436 | .get_vendor_id = eni_vdpa_get_vendor_id, |
437 | .get_vq_align = eni_vdpa_get_vq_align, |
438 | .get_config_size = eni_vdpa_get_config_size, |
439 | .get_config = eni_vdpa_get_config, |
440 | .set_config = eni_vdpa_set_config, |
441 | .set_config_cb = eni_vdpa_set_config_cb, |
442 | .get_vq_irq = eni_vdpa_get_vq_irq, |
443 | }; |
444 | |
445 | |
446 | static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa) |
447 | { |
448 | struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev; |
449 | u32 features = vp_legacy_get_features(ldev); |
450 | u16 num = 2; |
451 | |
452 | if (features & BIT_ULL(VIRTIO_NET_F_MQ)) { |
453 | __virtio16 max_virtqueue_pairs; |
454 | |
455 | eni_vdpa_get_config(vdpa: &eni_vdpa->vdpa, |
456 | offsetof(struct virtio_net_config, max_virtqueue_pairs), |
457 | buf: &max_virtqueue_pairs, |
458 | len: sizeof(max_virtqueue_pairs)); |
459 | num = 2 * __virtio16_to_cpu(little_endian: virtio_legacy_is_little_endian(), |
460 | val: max_virtqueue_pairs); |
461 | } |
462 | |
463 | if (features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) |
464 | num += 1; |
465 | |
466 | return num; |
467 | } |
468 | |
469 | static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
470 | { |
471 | struct device *dev = &pdev->dev; |
472 | struct eni_vdpa *eni_vdpa; |
473 | struct virtio_pci_legacy_device *ldev; |
474 | int ret, i; |
475 | |
476 | ret = pcim_enable_device(pdev); |
477 | if (ret) |
478 | return ret; |
479 | |
480 | eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa, |
481 | dev, &eni_vdpa_ops, 1, 1, NULL, false); |
482 | if (IS_ERR(ptr: eni_vdpa)) { |
483 | ENI_ERR(pdev, "failed to allocate vDPA structure\n" ); |
484 | return PTR_ERR(ptr: eni_vdpa); |
485 | } |
486 | |
487 | ldev = &eni_vdpa->ldev; |
488 | ldev->pci_dev = pdev; |
489 | |
490 | ret = vp_legacy_probe(ldev); |
491 | if (ret) { |
492 | ENI_ERR(pdev, "failed to probe legacy PCI device\n" ); |
493 | goto err; |
494 | } |
495 | |
496 | pci_set_master(dev: pdev); |
497 | pci_set_drvdata(pdev, data: eni_vdpa); |
498 | |
499 | eni_vdpa->vdpa.dma_dev = &pdev->dev; |
500 | eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); |
501 | |
502 | eni_vdpa->vring = devm_kcalloc(dev: &pdev->dev, n: eni_vdpa->queues, |
503 | size: sizeof(*eni_vdpa->vring), |
504 | GFP_KERNEL); |
505 | if (!eni_vdpa->vring) { |
506 | ret = -ENOMEM; |
507 | ENI_ERR(pdev, "failed to allocate virtqueues\n" ); |
508 | goto err_remove_vp_legacy; |
509 | } |
510 | |
511 | for (i = 0; i < eni_vdpa->queues; i++) { |
512 | eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; |
513 | eni_vdpa->vring[i].notify = ldev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; |
514 | } |
515 | eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; |
516 | |
517 | ret = vdpa_register_device(vdev: &eni_vdpa->vdpa, nvqs: eni_vdpa->queues); |
518 | if (ret) { |
519 | ENI_ERR(pdev, "failed to register to vdpa bus\n" ); |
520 | goto err_remove_vp_legacy; |
521 | } |
522 | |
523 | return 0; |
524 | |
525 | err_remove_vp_legacy: |
526 | vp_legacy_remove(ldev: &eni_vdpa->ldev); |
527 | err: |
528 | put_device(dev: &eni_vdpa->vdpa.dev); |
529 | return ret; |
530 | } |
531 | |
532 | static void eni_vdpa_remove(struct pci_dev *pdev) |
533 | { |
534 | struct eni_vdpa *eni_vdpa = pci_get_drvdata(pdev); |
535 | |
536 | vdpa_unregister_device(vdev: &eni_vdpa->vdpa); |
537 | vp_legacy_remove(ldev: &eni_vdpa->ldev); |
538 | } |
539 | |
540 | static struct pci_device_id eni_pci_ids[] = { |
541 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, |
542 | VIRTIO_TRANS_ID_NET, |
543 | PCI_SUBVENDOR_ID_REDHAT_QUMRANET, |
544 | VIRTIO_ID_NET) }, |
545 | { 0 }, |
546 | }; |
547 | |
548 | static struct pci_driver eni_vdpa_driver = { |
549 | .name = "alibaba-eni-vdpa" , |
550 | .id_table = eni_pci_ids, |
551 | .probe = eni_vdpa_probe, |
552 | .remove = eni_vdpa_remove, |
553 | }; |
554 | |
555 | module_pci_driver(eni_vdpa_driver); |
556 | |
557 | MODULE_AUTHOR("Wu Zongyong <wuzongyong@linux.alibaba.com>" ); |
558 | MODULE_DESCRIPTION("Alibaba ENI vDPA driver" ); |
559 | MODULE_LICENSE("GPL v2" ); |
560 | |