1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Intel IFC VF NIC driver for virtio dataplane offloading |
4 | * |
5 | * Copyright (C) 2020 Intel Corporation. |
6 | * |
7 | * Author: Zhu Lingshan <lingshan.zhu@intel.com> |
8 | * |
9 | */ |
10 | |
11 | #include <linux/interrupt.h> |
12 | #include <linux/module.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/sysfs.h> |
15 | #include "ifcvf_base.h" |
16 | |
17 | #define DRIVER_AUTHOR "Intel Corporation" |
18 | #define IFCVF_DRIVER_NAME "ifcvf" |
19 | |
20 | static irqreturn_t ifcvf_config_changed(int irq, void *arg) |
21 | { |
22 | struct ifcvf_hw *vf = arg; |
23 | |
24 | if (vf->config_cb.callback) |
25 | return vf->config_cb.callback(vf->config_cb.private); |
26 | |
27 | return IRQ_HANDLED; |
28 | } |
29 | |
30 | static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg) |
31 | { |
32 | struct vring_info *vring = arg; |
33 | |
34 | if (vring->cb.callback) |
35 | return vring->cb.callback(vring->cb.private); |
36 | |
37 | return IRQ_HANDLED; |
38 | } |
39 | |
40 | static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg) |
41 | { |
42 | struct ifcvf_hw *vf = arg; |
43 | struct vring_info *vring; |
44 | int i; |
45 | |
46 | for (i = 0; i < vf->nr_vring; i++) { |
47 | vring = &vf->vring[i]; |
48 | if (vring->cb.callback) |
49 | vring->cb.callback(vring->cb.private); |
50 | } |
51 | |
52 | return IRQ_HANDLED; |
53 | } |
54 | |
55 | static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg) |
56 | { |
57 | struct ifcvf_hw *vf = arg; |
58 | u8 isr; |
59 | |
60 | isr = vp_ioread8(addr: vf->isr); |
61 | if (isr & VIRTIO_PCI_ISR_CONFIG) |
62 | ifcvf_config_changed(irq, arg); |
63 | |
64 | return ifcvf_vqs_reused_intr_handler(irq, arg); |
65 | } |
66 | |
67 | static void ifcvf_free_irq_vectors(void *data) |
68 | { |
69 | pci_free_irq_vectors(dev: data); |
70 | } |
71 | |
72 | static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf) |
73 | { |
74 | struct pci_dev *pdev = vf->pdev; |
75 | int i; |
76 | |
77 | for (i = 0; i < vf->nr_vring; i++) { |
78 | if (vf->vring[i].irq != -EINVAL) { |
79 | devm_free_irq(dev: &pdev->dev, irq: vf->vring[i].irq, dev_id: &vf->vring[i]); |
80 | vf->vring[i].irq = -EINVAL; |
81 | } |
82 | } |
83 | } |
84 | |
85 | static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf) |
86 | { |
87 | struct pci_dev *pdev = vf->pdev; |
88 | |
89 | if (vf->vqs_reused_irq != -EINVAL) { |
90 | devm_free_irq(dev: &pdev->dev, irq: vf->vqs_reused_irq, dev_id: vf); |
91 | vf->vqs_reused_irq = -EINVAL; |
92 | } |
93 | |
94 | } |
95 | |
96 | static void ifcvf_free_vq_irq(struct ifcvf_hw *vf) |
97 | { |
98 | if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) |
99 | ifcvf_free_per_vq_irq(vf); |
100 | else |
101 | ifcvf_free_vqs_reused_irq(vf); |
102 | } |
103 | |
104 | static void ifcvf_free_config_irq(struct ifcvf_hw *vf) |
105 | { |
106 | struct pci_dev *pdev = vf->pdev; |
107 | |
108 | if (vf->config_irq == -EINVAL) |
109 | return; |
110 | |
111 | /* If the irq is shared by all vqs and the config interrupt, |
112 | * it is already freed in ifcvf_free_vq_irq, so here only |
113 | * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED |
114 | */ |
115 | if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) { |
116 | devm_free_irq(dev: &pdev->dev, irq: vf->config_irq, dev_id: vf); |
117 | vf->config_irq = -EINVAL; |
118 | } |
119 | } |
120 | |
121 | static void ifcvf_free_irq(struct ifcvf_hw *vf) |
122 | { |
123 | struct pci_dev *pdev = vf->pdev; |
124 | |
125 | ifcvf_free_vq_irq(vf); |
126 | ifcvf_free_config_irq(vf); |
127 | ifcvf_free_irq_vectors(data: pdev); |
128 | vf->num_msix_vectors = 0; |
129 | } |
130 | |
131 | /* ifcvf MSIX vectors allocator, this helper tries to allocate |
132 | * vectors for all virtqueues and the config interrupt. |
133 | * It returns the number of allocated vectors, negative |
134 | * return value when fails. |
135 | */ |
136 | static int ifcvf_alloc_vectors(struct ifcvf_hw *vf) |
137 | { |
138 | struct pci_dev *pdev = vf->pdev; |
139 | int max_intr, ret; |
140 | |
141 | /* all queues and config interrupt */ |
142 | max_intr = vf->nr_vring + 1; |
143 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); |
144 | |
145 | if (ret < 0) { |
146 | IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n" ); |
147 | return ret; |
148 | } |
149 | |
150 | if (ret < max_intr) |
151 | IFCVF_INFO(pdev, |
152 | "Requested %u vectors, however only %u allocated, lower performance\n" , |
153 | max_intr, ret); |
154 | |
155 | return ret; |
156 | } |
157 | |
158 | static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf) |
159 | { |
160 | struct pci_dev *pdev = vf->pdev; |
161 | int i, vector, ret, irq; |
162 | |
163 | vf->vqs_reused_irq = -EINVAL; |
164 | for (i = 0; i < vf->nr_vring; i++) { |
165 | snprintf(buf: vf->vring[i].msix_name, size: 256, fmt: "ifcvf[%s]-%d\n" , pci_name(pdev), i); |
166 | vector = i; |
167 | irq = pci_irq_vector(dev: pdev, nr: vector); |
168 | ret = devm_request_irq(dev: &pdev->dev, irq, |
169 | handler: ifcvf_vq_intr_handler, irqflags: 0, |
170 | devname: vf->vring[i].msix_name, |
171 | dev_id: &vf->vring[i]); |
172 | if (ret) { |
173 | IFCVF_ERR(pdev, "Failed to request irq for vq %d\n" , i); |
174 | goto err; |
175 | } |
176 | |
177 | vf->vring[i].irq = irq; |
178 | ret = ifcvf_set_vq_vector(hw: vf, qid: i, vector); |
179 | if (ret == VIRTIO_MSI_NO_VECTOR) { |
180 | IFCVF_ERR(pdev, "No msix vector for vq %u\n" , i); |
181 | goto err; |
182 | } |
183 | } |
184 | |
185 | return 0; |
186 | err: |
187 | ifcvf_free_irq(vf); |
188 | |
189 | return -EFAULT; |
190 | } |
191 | |
192 | static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf) |
193 | { |
194 | struct pci_dev *pdev = vf->pdev; |
195 | int i, vector, ret, irq; |
196 | |
197 | vector = 0; |
198 | snprintf(buf: vf->vring[0].msix_name, size: 256, fmt: "ifcvf[%s]-vqs-reused-irq\n" , pci_name(pdev)); |
199 | irq = pci_irq_vector(dev: pdev, nr: vector); |
200 | ret = devm_request_irq(dev: &pdev->dev, irq, |
201 | handler: ifcvf_vqs_reused_intr_handler, irqflags: 0, |
202 | devname: vf->vring[0].msix_name, dev_id: vf); |
203 | if (ret) { |
204 | IFCVF_ERR(pdev, "Failed to request reused irq for the device\n" ); |
205 | goto err; |
206 | } |
207 | |
208 | vf->vqs_reused_irq = irq; |
209 | for (i = 0; i < vf->nr_vring; i++) { |
210 | vf->vring[i].irq = -EINVAL; |
211 | ret = ifcvf_set_vq_vector(hw: vf, qid: i, vector); |
212 | if (ret == VIRTIO_MSI_NO_VECTOR) { |
213 | IFCVF_ERR(pdev, "No msix vector for vq %u\n" , i); |
214 | goto err; |
215 | } |
216 | } |
217 | |
218 | return 0; |
219 | err: |
220 | ifcvf_free_irq(vf); |
221 | |
222 | return -EFAULT; |
223 | } |
224 | |
225 | static int ifcvf_request_dev_irq(struct ifcvf_hw *vf) |
226 | { |
227 | struct pci_dev *pdev = vf->pdev; |
228 | int i, vector, ret, irq; |
229 | |
230 | vector = 0; |
231 | snprintf(buf: vf->vring[0].msix_name, size: 256, fmt: "ifcvf[%s]-dev-irq\n" , pci_name(pdev)); |
232 | irq = pci_irq_vector(dev: pdev, nr: vector); |
233 | ret = devm_request_irq(dev: &pdev->dev, irq, |
234 | handler: ifcvf_dev_intr_handler, irqflags: 0, |
235 | devname: vf->vring[0].msix_name, dev_id: vf); |
236 | if (ret) { |
237 | IFCVF_ERR(pdev, "Failed to request irq for the device\n" ); |
238 | goto err; |
239 | } |
240 | |
241 | vf->vqs_reused_irq = irq; |
242 | for (i = 0; i < vf->nr_vring; i++) { |
243 | vf->vring[i].irq = -EINVAL; |
244 | ret = ifcvf_set_vq_vector(hw: vf, qid: i, vector); |
245 | if (ret == VIRTIO_MSI_NO_VECTOR) { |
246 | IFCVF_ERR(pdev, "No msix vector for vq %u\n" , i); |
247 | goto err; |
248 | } |
249 | } |
250 | |
251 | vf->config_irq = irq; |
252 | ret = ifcvf_set_config_vector(hw: vf, vector); |
253 | if (ret == VIRTIO_MSI_NO_VECTOR) { |
254 | IFCVF_ERR(pdev, "No msix vector for device config\n" ); |
255 | goto err; |
256 | } |
257 | |
258 | return 0; |
259 | err: |
260 | ifcvf_free_irq(vf); |
261 | |
262 | return -EFAULT; |
263 | |
264 | } |
265 | |
266 | static int ifcvf_request_vq_irq(struct ifcvf_hw *vf) |
267 | { |
268 | int ret; |
269 | |
270 | if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) |
271 | ret = ifcvf_request_per_vq_irq(vf); |
272 | else |
273 | ret = ifcvf_request_vqs_reused_irq(vf); |
274 | |
275 | return ret; |
276 | } |
277 | |
278 | static int ifcvf_request_config_irq(struct ifcvf_hw *vf) |
279 | { |
280 | struct pci_dev *pdev = vf->pdev; |
281 | int config_vector, ret; |
282 | |
283 | if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) |
284 | config_vector = vf->nr_vring; |
285 | else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG) |
286 | /* vector 0 for vqs and 1 for config interrupt */ |
287 | config_vector = 1; |
288 | else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED) |
289 | /* re-use the vqs vector */ |
290 | return 0; |
291 | else |
292 | return -EINVAL; |
293 | |
294 | snprintf(buf: vf->config_msix_name, size: 256, fmt: "ifcvf[%s]-config\n" , |
295 | pci_name(pdev)); |
296 | vf->config_irq = pci_irq_vector(dev: pdev, nr: config_vector); |
297 | ret = devm_request_irq(dev: &pdev->dev, irq: vf->config_irq, |
298 | handler: ifcvf_config_changed, irqflags: 0, |
299 | devname: vf->config_msix_name, dev_id: vf); |
300 | if (ret) { |
301 | IFCVF_ERR(pdev, "Failed to request config irq\n" ); |
302 | goto err; |
303 | } |
304 | |
305 | ret = ifcvf_set_config_vector(hw: vf, vector: config_vector); |
306 | if (ret == VIRTIO_MSI_NO_VECTOR) { |
307 | IFCVF_ERR(pdev, "No msix vector for device config\n" ); |
308 | goto err; |
309 | } |
310 | |
311 | return 0; |
312 | err: |
313 | ifcvf_free_irq(vf); |
314 | |
315 | return -EFAULT; |
316 | } |
317 | |
318 | static int ifcvf_request_irq(struct ifcvf_hw *vf) |
319 | { |
320 | int nvectors, ret, max_intr; |
321 | |
322 | nvectors = ifcvf_alloc_vectors(vf); |
323 | if (nvectors <= 0) |
324 | return -EFAULT; |
325 | |
326 | vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG; |
327 | max_intr = vf->nr_vring + 1; |
328 | if (nvectors < max_intr) |
329 | vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG; |
330 | |
331 | if (nvectors == 1) { |
332 | vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED; |
333 | ret = ifcvf_request_dev_irq(vf); |
334 | |
335 | return ret; |
336 | } |
337 | |
338 | ret = ifcvf_request_vq_irq(vf); |
339 | if (ret) |
340 | return ret; |
341 | |
342 | ret = ifcvf_request_config_irq(vf); |
343 | |
344 | if (ret) |
345 | return ret; |
346 | |
347 | vf->num_msix_vectors = nvectors; |
348 | |
349 | return 0; |
350 | } |
351 | |
352 | static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev) |
353 | { |
354 | return container_of(vdpa_dev, struct ifcvf_adapter, vdpa); |
355 | } |
356 | |
357 | static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev) |
358 | { |
359 | struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); |
360 | |
361 | return adapter->vf; |
362 | } |
363 | |
364 | static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev) |
365 | { |
366 | struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); |
367 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
368 | struct pci_dev *pdev = adapter->pdev; |
369 | u32 type = vf->dev_type; |
370 | u64 features; |
371 | |
372 | if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK) |
373 | features = ifcvf_get_dev_features(hw: vf); |
374 | else { |
375 | features = 0; |
376 | IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n" , vf->dev_type); |
377 | } |
378 | |
379 | return features; |
380 | } |
381 | |
382 | static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) |
383 | { |
384 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
385 | int ret; |
386 | |
387 | ret = ifcvf_verify_min_features(hw: vf, features); |
388 | if (ret) |
389 | return ret; |
390 | |
391 | ifcvf_set_driver_features(hw: vf, features); |
392 | |
393 | return 0; |
394 | } |
395 | |
396 | static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) |
397 | { |
398 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
399 | u64 features; |
400 | |
401 | features = ifcvf_get_driver_features(hw: vf); |
402 | |
403 | return features; |
404 | } |
405 | |
406 | static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev) |
407 | { |
408 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
409 | |
410 | return ifcvf_get_status(hw: vf); |
411 | } |
412 | |
413 | static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) |
414 | { |
415 | struct ifcvf_hw *vf; |
416 | u8 status_old; |
417 | int ret; |
418 | |
419 | vf = vdpa_to_vf(vdpa_dev); |
420 | status_old = ifcvf_get_status(hw: vf); |
421 | |
422 | if (status_old == status) |
423 | return; |
424 | |
425 | if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && |
426 | !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) { |
427 | ret = ifcvf_request_irq(vf); |
428 | if (ret) { |
429 | IFCVF_ERR(vf->pdev, "failed to request irq with error %d\n" , ret); |
430 | return; |
431 | } |
432 | } |
433 | |
434 | ifcvf_set_status(hw: vf, status); |
435 | } |
436 | |
437 | static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev) |
438 | { |
439 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
440 | u8 status = ifcvf_get_status(hw: vf); |
441 | |
442 | ifcvf_stop(hw: vf); |
443 | |
444 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) |
445 | ifcvf_free_irq(vf); |
446 | |
447 | ifcvf_reset(hw: vf); |
448 | |
449 | return 0; |
450 | } |
451 | |
452 | static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) |
453 | { |
454 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
455 | |
456 | return ifcvf_get_max_vq_size(hw: vf); |
457 | } |
458 | |
459 | static u16 ifcvf_vdpa_get_vq_num_min(struct vdpa_device *vdpa_dev) |
460 | { |
461 | return IFCVF_MIN_VQ_SIZE; |
462 | } |
463 | |
464 | static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, |
465 | struct vdpa_vq_state *state) |
466 | { |
467 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
468 | |
469 | state->split.avail_index = ifcvf_get_vq_state(hw: vf, qid); |
470 | return 0; |
471 | } |
472 | |
473 | static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, |
474 | const struct vdpa_vq_state *state) |
475 | { |
476 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
477 | |
478 | return ifcvf_set_vq_state(hw: vf, qid, num: state->split.avail_index); |
479 | } |
480 | |
481 | static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, |
482 | struct vdpa_callback *cb) |
483 | { |
484 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
485 | |
486 | vf->vring[qid].cb = *cb; |
487 | } |
488 | |
489 | static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, |
490 | u16 qid, bool ready) |
491 | { |
492 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
493 | |
494 | ifcvf_set_vq_ready(hw: vf, qid, ready); |
495 | } |
496 | |
497 | static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) |
498 | { |
499 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
500 | |
501 | return ifcvf_get_vq_ready(hw: vf, qid); |
502 | } |
503 | |
504 | static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, |
505 | u32 num) |
506 | { |
507 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
508 | |
509 | ifcvf_set_vq_num(hw: vf, qid, num); |
510 | } |
511 | |
512 | static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, |
513 | u64 desc_area, u64 driver_area, |
514 | u64 device_area) |
515 | { |
516 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
517 | |
518 | return ifcvf_set_vq_address(hw: vf, qid, desc_area, driver_area, device_area); |
519 | } |
520 | |
521 | static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) |
522 | { |
523 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
524 | |
525 | ifcvf_notify_queue(hw: vf, qid); |
526 | } |
527 | |
528 | static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev) |
529 | { |
530 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
531 | |
532 | return vp_ioread8(addr: &vf->common_cfg->config_generation); |
533 | } |
534 | |
535 | static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev) |
536 | { |
537 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
538 | |
539 | return vf->dev_type; |
540 | } |
541 | |
542 | static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) |
543 | { |
544 | struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); |
545 | struct pci_dev *pdev = adapter->pdev; |
546 | |
547 | return pdev->subsystem_vendor; |
548 | } |
549 | |
550 | static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) |
551 | { |
552 | return IFCVF_QUEUE_ALIGNMENT; |
553 | } |
554 | |
555 | static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev) |
556 | { |
557 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
558 | |
559 | return vf->config_size; |
560 | } |
561 | |
562 | static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx) |
563 | { |
564 | return 0; |
565 | } |
566 | |
567 | static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, |
568 | unsigned int offset, |
569 | void *buf, unsigned int len) |
570 | { |
571 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
572 | |
573 | ifcvf_read_dev_config(hw: vf, offset, dst: buf, length: len); |
574 | } |
575 | |
576 | static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev, |
577 | unsigned int offset, const void *buf, |
578 | unsigned int len) |
579 | { |
580 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
581 | |
582 | ifcvf_write_dev_config(hw: vf, offset, src: buf, length: len); |
583 | } |
584 | |
585 | static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, |
586 | struct vdpa_callback *cb) |
587 | { |
588 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
589 | |
590 | vf->config_cb.callback = cb->callback; |
591 | vf->config_cb.private = cb->private; |
592 | } |
593 | |
594 | static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, |
595 | u16 qid) |
596 | { |
597 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
598 | |
599 | if (vf->vqs_reused_irq < 0) |
600 | return vf->vring[qid].irq; |
601 | else |
602 | return -EINVAL; |
603 | } |
604 | |
605 | static u16 ifcvf_vdpa_get_vq_size(struct vdpa_device *vdpa_dev, |
606 | u16 qid) |
607 | { |
608 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
609 | |
610 | return ifcvf_get_vq_size(hw: vf, qid); |
611 | } |
612 | |
613 | static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev, |
614 | u16 idx) |
615 | { |
616 | struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); |
617 | struct vdpa_notification_area area; |
618 | |
619 | area.addr = vf->vring[idx].notify_pa; |
620 | if (!vf->notify_off_multiplier) |
621 | area.size = PAGE_SIZE; |
622 | else |
623 | area.size = vf->notify_off_multiplier; |
624 | |
625 | return area; |
626 | } |
627 | |
628 | /* |
629 | * IFCVF currently doesn't have on-chip IOMMU, so not |
630 | * implemented set_map()/dma_map()/dma_unmap() |
631 | */ |
632 | static const struct vdpa_config_ops ifc_vdpa_ops = { |
633 | .get_device_features = ifcvf_vdpa_get_device_features, |
634 | .set_driver_features = ifcvf_vdpa_set_driver_features, |
635 | .get_driver_features = ifcvf_vdpa_get_driver_features, |
636 | .get_status = ifcvf_vdpa_get_status, |
637 | .set_status = ifcvf_vdpa_set_status, |
638 | .reset = ifcvf_vdpa_reset, |
639 | .get_vq_num_max = ifcvf_vdpa_get_vq_num_max, |
640 | .get_vq_num_min = ifcvf_vdpa_get_vq_num_min, |
641 | .get_vq_state = ifcvf_vdpa_get_vq_state, |
642 | .set_vq_state = ifcvf_vdpa_set_vq_state, |
643 | .set_vq_cb = ifcvf_vdpa_set_vq_cb, |
644 | .set_vq_ready = ifcvf_vdpa_set_vq_ready, |
645 | .get_vq_ready = ifcvf_vdpa_get_vq_ready, |
646 | .set_vq_num = ifcvf_vdpa_set_vq_num, |
647 | .set_vq_address = ifcvf_vdpa_set_vq_address, |
648 | .get_vq_irq = ifcvf_vdpa_get_vq_irq, |
649 | .get_vq_size = ifcvf_vdpa_get_vq_size, |
650 | .kick_vq = ifcvf_vdpa_kick_vq, |
651 | .get_generation = ifcvf_vdpa_get_generation, |
652 | .get_device_id = ifcvf_vdpa_get_device_id, |
653 | .get_vendor_id = ifcvf_vdpa_get_vendor_id, |
654 | .get_vq_align = ifcvf_vdpa_get_vq_align, |
655 | .get_vq_group = ifcvf_vdpa_get_vq_group, |
656 | .get_config_size = ifcvf_vdpa_get_config_size, |
657 | .get_config = ifcvf_vdpa_get_config, |
658 | .set_config = ifcvf_vdpa_set_config, |
659 | .set_config_cb = ifcvf_vdpa_set_config_cb, |
660 | .get_vq_notification = ifcvf_get_vq_notification, |
661 | }; |
662 | |
663 | static struct virtio_device_id id_table_net[] = { |
664 | {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID}, |
665 | {0}, |
666 | }; |
667 | |
668 | static struct virtio_device_id id_table_blk[] = { |
669 | {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID}, |
670 | {0}, |
671 | }; |
672 | |
673 | static u32 get_dev_type(struct pci_dev *pdev) |
674 | { |
675 | u32 dev_type; |
676 | |
677 | /* This drirver drives both modern virtio devices and transitional |
678 | * devices in modern mode. |
679 | * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM, |
680 | * so legacy devices and transitional devices in legacy |
681 | * mode will not work for vDPA, this driver will not |
682 | * drive devices with legacy interface. |
683 | */ |
684 | |
685 | if (pdev->device < 0x1040) |
686 | dev_type = pdev->subsystem_device; |
687 | else |
688 | dev_type = pdev->device - 0x1040; |
689 | |
690 | return dev_type; |
691 | } |
692 | |
693 | static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, |
694 | const struct vdpa_dev_set_config *config) |
695 | { |
696 | struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; |
697 | struct ifcvf_adapter *adapter; |
698 | struct vdpa_device *vdpa_dev; |
699 | struct pci_dev *pdev; |
700 | struct ifcvf_hw *vf; |
701 | u64 device_features; |
702 | int ret; |
703 | |
704 | ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev); |
705 | vf = &ifcvf_mgmt_dev->vf; |
706 | pdev = vf->pdev; |
707 | adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, |
708 | &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false); |
709 | if (IS_ERR(ptr: adapter)) { |
710 | IFCVF_ERR(pdev, "Failed to allocate vDPA structure" ); |
711 | return PTR_ERR(ptr: adapter); |
712 | } |
713 | |
714 | ifcvf_mgmt_dev->adapter = adapter; |
715 | adapter->pdev = pdev; |
716 | adapter->vdpa.dma_dev = &pdev->dev; |
717 | adapter->vdpa.mdev = mdev; |
718 | adapter->vf = vf; |
719 | vdpa_dev = &adapter->vdpa; |
720 | |
721 | device_features = vf->hw_features; |
722 | if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { |
723 | if (config->device_features & ~device_features) { |
724 | IFCVF_ERR(pdev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n" , |
725 | config->device_features, device_features); |
726 | return -EINVAL; |
727 | } |
728 | device_features &= config->device_features; |
729 | } |
730 | vf->dev_features = device_features; |
731 | |
732 | if (name) |
733 | ret = dev_set_name(dev: &vdpa_dev->dev, name: "%s" , name); |
734 | else |
735 | ret = dev_set_name(dev: &vdpa_dev->dev, name: "vdpa%u" , vdpa_dev->index); |
736 | |
737 | ret = _vdpa_register_device(vdev: &adapter->vdpa, nvqs: vf->nr_vring); |
738 | if (ret) { |
739 | put_device(dev: &adapter->vdpa.dev); |
740 | IFCVF_ERR(pdev, "Failed to register to vDPA bus" ); |
741 | return ret; |
742 | } |
743 | |
744 | return 0; |
745 | } |
746 | |
747 | static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev) |
748 | { |
749 | struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; |
750 | |
751 | ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev); |
752 | _vdpa_unregister_device(vdev: dev); |
753 | ifcvf_mgmt_dev->adapter = NULL; |
754 | } |
755 | |
756 | static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = { |
757 | .dev_add = ifcvf_vdpa_dev_add, |
758 | .dev_del = ifcvf_vdpa_dev_del |
759 | }; |
760 | |
761 | static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
762 | { |
763 | struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; |
764 | struct device *dev = &pdev->dev; |
765 | struct ifcvf_hw *vf; |
766 | u32 dev_type; |
767 | int ret, i; |
768 | |
769 | ret = pcim_enable_device(pdev); |
770 | if (ret) { |
771 | IFCVF_ERR(pdev, "Failed to enable device\n" ); |
772 | return ret; |
773 | } |
774 | ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4), |
775 | IFCVF_DRIVER_NAME); |
776 | if (ret) { |
777 | IFCVF_ERR(pdev, "Failed to request MMIO region\n" ); |
778 | return ret; |
779 | } |
780 | |
781 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
782 | if (ret) { |
783 | IFCVF_ERR(pdev, "No usable DMA configuration\n" ); |
784 | return ret; |
785 | } |
786 | |
787 | ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev); |
788 | if (ret) { |
789 | IFCVF_ERR(pdev, |
790 | "Failed for adding devres for freeing irq vectors\n" ); |
791 | return ret; |
792 | } |
793 | |
794 | pci_set_master(dev: pdev); |
795 | ifcvf_mgmt_dev = kzalloc(size: sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL); |
796 | if (!ifcvf_mgmt_dev) { |
797 | IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n" ); |
798 | return -ENOMEM; |
799 | } |
800 | |
801 | vf = &ifcvf_mgmt_dev->vf; |
802 | vf->dev_type = get_dev_type(pdev); |
803 | vf->base = pcim_iomap_table(pdev); |
804 | vf->pdev = pdev; |
805 | |
806 | ret = ifcvf_init_hw(hw: vf, dev: pdev); |
807 | if (ret) { |
808 | IFCVF_ERR(pdev, "Failed to init IFCVF hw\n" ); |
809 | goto err; |
810 | } |
811 | |
812 | for (i = 0; i < vf->nr_vring; i++) |
813 | vf->vring[i].irq = -EINVAL; |
814 | |
815 | vf->hw_features = ifcvf_get_hw_features(hw: vf); |
816 | vf->config_size = ifcvf_get_config_size(hw: vf); |
817 | |
818 | dev_type = get_dev_type(pdev); |
819 | switch (dev_type) { |
820 | case VIRTIO_ID_NET: |
821 | ifcvf_mgmt_dev->mdev.id_table = id_table_net; |
822 | break; |
823 | case VIRTIO_ID_BLOCK: |
824 | ifcvf_mgmt_dev->mdev.id_table = id_table_blk; |
825 | break; |
826 | default: |
827 | IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n" , dev_type); |
828 | ret = -EOPNOTSUPP; |
829 | goto err; |
830 | } |
831 | |
832 | ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops; |
833 | ifcvf_mgmt_dev->mdev.device = dev; |
834 | ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring; |
835 | ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features; |
836 | ifcvf_mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES); |
837 | |
838 | ret = vdpa_mgmtdev_register(mdev: &ifcvf_mgmt_dev->mdev); |
839 | if (ret) { |
840 | IFCVF_ERR(pdev, |
841 | "Failed to initialize the management interfaces\n" ); |
842 | goto err; |
843 | } |
844 | |
845 | pci_set_drvdata(pdev, data: ifcvf_mgmt_dev); |
846 | |
847 | return 0; |
848 | |
849 | err: |
850 | kfree(objp: ifcvf_mgmt_dev->vf.vring); |
851 | kfree(objp: ifcvf_mgmt_dev); |
852 | return ret; |
853 | } |
854 | |
855 | static void ifcvf_remove(struct pci_dev *pdev) |
856 | { |
857 | struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; |
858 | |
859 | ifcvf_mgmt_dev = pci_get_drvdata(pdev); |
860 | vdpa_mgmtdev_unregister(mdev: &ifcvf_mgmt_dev->mdev); |
861 | kfree(objp: ifcvf_mgmt_dev->vf.vring); |
862 | kfree(objp: ifcvf_mgmt_dev); |
863 | } |
864 | |
865 | static struct pci_device_id ifcvf_pci_ids[] = { |
866 | /* N3000 network device */ |
867 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, |
868 | N3000_DEVICE_ID, |
869 | PCI_VENDOR_ID_INTEL, |
870 | N3000_SUBSYS_DEVICE_ID) }, |
871 | /* C5000X-PL network device |
872 | * F2000X-PL network device |
873 | */ |
874 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, |
875 | VIRTIO_TRANS_ID_NET, |
876 | PCI_VENDOR_ID_INTEL, |
877 | VIRTIO_ID_NET) }, |
878 | /* C5000X-PL block device */ |
879 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, |
880 | VIRTIO_TRANS_ID_BLOCK, |
881 | PCI_VENDOR_ID_INTEL, |
882 | VIRTIO_ID_BLOCK) }, |
883 | |
884 | { 0 }, |
885 | }; |
886 | MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids); |
887 | |
888 | static struct pci_driver ifcvf_driver = { |
889 | .name = IFCVF_DRIVER_NAME, |
890 | .id_table = ifcvf_pci_ids, |
891 | .probe = ifcvf_probe, |
892 | .remove = ifcvf_remove, |
893 | }; |
894 | |
895 | module_pci_driver(ifcvf_driver); |
896 | |
897 | MODULE_LICENSE("GPL v2" ); |
898 | |