1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright(c) 2023 Advanced Micro Devices, Inc */ |
3 | |
4 | #include <linux/pci.h> |
5 | #include <linux/vdpa.h> |
6 | #include <uapi/linux/vdpa.h> |
7 | #include <linux/virtio_pci_modern.h> |
8 | |
9 | #include <linux/pds/pds_common.h> |
10 | #include <linux/pds/pds_core_if.h> |
11 | #include <linux/pds/pds_adminq.h> |
12 | #include <linux/pds/pds_auxbus.h> |
13 | |
14 | #include "vdpa_dev.h" |
15 | #include "aux_drv.h" |
16 | #include "cmds.h" |
17 | #include "debugfs.h" |
18 | |
19 | static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev); |
20 | |
21 | static struct pds_vdpa_device *vdpa_to_pdsv(struct vdpa_device *vdpa_dev) |
22 | { |
23 | return container_of(vdpa_dev, struct pds_vdpa_device, vdpa_dev); |
24 | } |
25 | |
26 | static int pds_vdpa_notify_handler(struct notifier_block *nb, |
27 | unsigned long ecode, |
28 | void *data) |
29 | { |
30 | struct pds_vdpa_device *pdsv = container_of(nb, struct pds_vdpa_device, nb); |
31 | struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev; |
32 | |
33 | dev_dbg(dev, "%s: event code %lu\n" , __func__, ecode); |
34 | |
35 | if (ecode == PDS_EVENT_RESET || ecode == PDS_EVENT_LINK_CHANGE) { |
36 | if (pdsv->config_cb.callback) |
37 | pdsv->config_cb.callback(pdsv->config_cb.private); |
38 | } |
39 | |
40 | return 0; |
41 | } |
42 | |
43 | static int pds_vdpa_register_event_handler(struct pds_vdpa_device *pdsv) |
44 | { |
45 | struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev; |
46 | struct notifier_block *nb = &pdsv->nb; |
47 | int err; |
48 | |
49 | if (!nb->notifier_call) { |
50 | nb->notifier_call = pds_vdpa_notify_handler; |
51 | err = pdsc_register_notify(nb); |
52 | if (err) { |
53 | nb->notifier_call = NULL; |
54 | dev_err(dev, "failed to register pds event handler: %ps\n" , |
55 | ERR_PTR(err)); |
56 | return -EINVAL; |
57 | } |
58 | dev_dbg(dev, "pds event handler registered\n" ); |
59 | } |
60 | |
61 | return 0; |
62 | } |
63 | |
64 | static void pds_vdpa_unregister_event_handler(struct pds_vdpa_device *pdsv) |
65 | { |
66 | if (pdsv->nb.notifier_call) { |
67 | pdsc_unregister_notify(nb: &pdsv->nb); |
68 | pdsv->nb.notifier_call = NULL; |
69 | } |
70 | } |
71 | |
72 | static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, |
73 | u64 desc_addr, u64 driver_addr, u64 device_addr) |
74 | { |
75 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
76 | |
77 | pdsv->vqs[qid].desc_addr = desc_addr; |
78 | pdsv->vqs[qid].avail_addr = driver_addr; |
79 | pdsv->vqs[qid].used_addr = device_addr; |
80 | |
81 | return 0; |
82 | } |
83 | |
84 | static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num) |
85 | { |
86 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
87 | |
88 | pdsv->vqs[qid].q_len = num; |
89 | } |
90 | |
91 | static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) |
92 | { |
93 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
94 | |
95 | iowrite16(qid, pdsv->vqs[qid].notify); |
96 | } |
97 | |
98 | static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, |
99 | struct vdpa_callback *cb) |
100 | { |
101 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
102 | |
103 | pdsv->vqs[qid].event_cb = *cb; |
104 | } |
105 | |
106 | static irqreturn_t pds_vdpa_isr(int irq, void *data) |
107 | { |
108 | struct pds_vdpa_vq_info *vq; |
109 | |
110 | vq = data; |
111 | if (vq->event_cb.callback) |
112 | vq->event_cb.callback(vq->event_cb.private); |
113 | |
114 | return IRQ_HANDLED; |
115 | } |
116 | |
117 | static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid) |
118 | { |
119 | if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR) |
120 | return; |
121 | |
122 | free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]); |
123 | pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; |
124 | } |
125 | |
126 | static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready) |
127 | { |
128 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
129 | struct device *dev = &pdsv->vdpa_dev.dev; |
130 | u64 driver_features; |
131 | u16 invert_idx = 0; |
132 | int err; |
133 | |
134 | dev_dbg(dev, "%s: qid %d ready %d => %d\n" , |
135 | __func__, qid, pdsv->vqs[qid].ready, ready); |
136 | if (ready == pdsv->vqs[qid].ready) |
137 | return; |
138 | |
139 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); |
140 | if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) |
141 | invert_idx = PDS_VDPA_PACKED_INVERT_IDX; |
142 | |
143 | if (ready) { |
144 | /* Pass vq setup info to DSC using adminq to gather up and |
145 | * send all info at once so FW can do its full set up in |
146 | * one easy operation |
147 | */ |
148 | err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, vq_info: &pdsv->vqs[qid]); |
149 | if (err) { |
150 | dev_err(dev, "Failed to init vq %d: %pe\n" , |
151 | qid, ERR_PTR(err)); |
152 | ready = false; |
153 | } |
154 | } else { |
155 | err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, vq_info: &pdsv->vqs[qid]); |
156 | if (err) |
157 | dev_err(dev, "%s: reset_vq failed qid %d: %pe\n" , |
158 | __func__, qid, ERR_PTR(err)); |
159 | } |
160 | |
161 | pdsv->vqs[qid].ready = ready; |
162 | } |
163 | |
164 | static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) |
165 | { |
166 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
167 | |
168 | return pdsv->vqs[qid].ready; |
169 | } |
170 | |
171 | static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, |
172 | const struct vdpa_vq_state *state) |
173 | { |
174 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
175 | struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; |
176 | struct device *dev = &padev->aux_dev.dev; |
177 | u64 driver_features; |
178 | u16 avail; |
179 | u16 used; |
180 | |
181 | if (pdsv->vqs[qid].ready) { |
182 | dev_err(dev, "Setting device position is denied while vq is enabled\n" ); |
183 | return -EINVAL; |
184 | } |
185 | |
186 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); |
187 | if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { |
188 | avail = state->packed.last_avail_idx | |
189 | (state->packed.last_avail_counter << 15); |
190 | used = state->packed.last_used_idx | |
191 | (state->packed.last_used_counter << 15); |
192 | |
193 | /* The avail and used index are stored with the packed wrap |
194 | * counter bit inverted. This way, in case set_vq_state is |
195 | * not called, the initial value can be set to zero prior to |
196 | * feature negotiation, and it is good for both packed and |
197 | * split vq. |
198 | */ |
199 | avail ^= PDS_VDPA_PACKED_INVERT_IDX; |
200 | used ^= PDS_VDPA_PACKED_INVERT_IDX; |
201 | } else { |
202 | avail = state->split.avail_index; |
203 | /* state->split does not provide a used_index: |
204 | * the vq will be set to "empty" here, and the vq will read |
205 | * the current used index the next time the vq is kicked. |
206 | */ |
207 | used = avail; |
208 | } |
209 | |
210 | if (used != avail) { |
211 | dev_dbg(dev, "Setting used equal to avail, for interoperability\n" ); |
212 | used = avail; |
213 | } |
214 | |
215 | pdsv->vqs[qid].avail_idx = avail; |
216 | pdsv->vqs[qid].used_idx = used; |
217 | |
218 | return 0; |
219 | } |
220 | |
221 | static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, |
222 | struct vdpa_vq_state *state) |
223 | { |
224 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
225 | struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev; |
226 | struct device *dev = &padev->aux_dev.dev; |
227 | u64 driver_features; |
228 | u16 avail; |
229 | u16 used; |
230 | |
231 | if (pdsv->vqs[qid].ready) { |
232 | dev_err(dev, "Getting device position is denied while vq is enabled\n" ); |
233 | return -EINVAL; |
234 | } |
235 | |
236 | avail = pdsv->vqs[qid].avail_idx; |
237 | used = pdsv->vqs[qid].used_idx; |
238 | |
239 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); |
240 | if (driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) { |
241 | avail ^= PDS_VDPA_PACKED_INVERT_IDX; |
242 | used ^= PDS_VDPA_PACKED_INVERT_IDX; |
243 | |
244 | state->packed.last_avail_idx = avail & 0x7fff; |
245 | state->packed.last_avail_counter = avail >> 15; |
246 | state->packed.last_used_idx = used & 0x7fff; |
247 | state->packed.last_used_counter = used >> 15; |
248 | } else { |
249 | state->split.avail_index = avail; |
250 | /* state->split does not provide a used_index. */ |
251 | } |
252 | |
253 | return 0; |
254 | } |
255 | |
256 | static struct vdpa_notification_area |
257 | pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid) |
258 | { |
259 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
260 | struct virtio_pci_modern_device *vd_mdev; |
261 | struct vdpa_notification_area area; |
262 | |
263 | area.addr = pdsv->vqs[qid].notify_pa; |
264 | |
265 | vd_mdev = &pdsv->vdpa_aux->vd_mdev; |
266 | if (!vd_mdev->notify_offset_multiplier) |
267 | area.size = PDS_PAGE_SIZE; |
268 | else |
269 | area.size = vd_mdev->notify_offset_multiplier; |
270 | |
271 | return area; |
272 | } |
273 | |
274 | static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid) |
275 | { |
276 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
277 | |
278 | return pdsv->vqs[qid].irq; |
279 | } |
280 | |
281 | static u32 pds_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) |
282 | { |
283 | return PDS_PAGE_SIZE; |
284 | } |
285 | |
286 | static u32 pds_vdpa_get_vq_group(struct vdpa_device *vdpa_dev, u16 idx) |
287 | { |
288 | return 0; |
289 | } |
290 | |
291 | static u64 pds_vdpa_get_device_features(struct vdpa_device *vdpa_dev) |
292 | { |
293 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
294 | |
295 | return pdsv->supported_features; |
296 | } |
297 | |
298 | static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) |
299 | { |
300 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
301 | struct device *dev = &pdsv->vdpa_dev.dev; |
302 | u64 driver_features; |
303 | u64 nego_features; |
304 | u64 hw_features; |
305 | u64 missing; |
306 | |
307 | if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { |
308 | dev_err(dev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n" ); |
309 | return -EOPNOTSUPP; |
310 | } |
311 | |
312 | /* Check for valid feature bits */ |
313 | nego_features = features & pdsv->supported_features; |
314 | missing = features & ~nego_features; |
315 | if (missing) { |
316 | dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n" , |
317 | features, missing); |
318 | return -EOPNOTSUPP; |
319 | } |
320 | |
321 | driver_features = pds_vdpa_get_driver_features(vdpa_dev); |
322 | pdsv->negotiated_features = nego_features; |
323 | dev_dbg(dev, "%s: %#llx => %#llx\n" , |
324 | __func__, driver_features, nego_features); |
325 | |
326 | /* if we're faking the F_MAC, strip it before writing to device */ |
327 | hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features); |
328 | if (!(hw_features & BIT_ULL(VIRTIO_NET_F_MAC))) |
329 | nego_features &= ~BIT_ULL(VIRTIO_NET_F_MAC); |
330 | |
331 | if (driver_features == nego_features) |
332 | return 0; |
333 | |
334 | vp_modern_set_features(mdev: &pdsv->vdpa_aux->vd_mdev, features: nego_features); |
335 | |
336 | return 0; |
337 | } |
338 | |
339 | static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) |
340 | { |
341 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
342 | |
343 | return pdsv->negotiated_features; |
344 | } |
345 | |
346 | static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, |
347 | struct vdpa_callback *cb) |
348 | { |
349 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
350 | |
351 | pdsv->config_cb.callback = cb->callback; |
352 | pdsv->config_cb.private = cb->private; |
353 | } |
354 | |
355 | static u16 pds_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) |
356 | { |
357 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
358 | |
359 | /* qemu has assert() that vq_num_max <= VIRTQUEUE_MAX_SIZE (1024) */ |
360 | return min_t(u16, 1024, BIT(le16_to_cpu(pdsv->vdpa_aux->ident.max_qlen))); |
361 | } |
362 | |
363 | static u32 pds_vdpa_get_device_id(struct vdpa_device *vdpa_dev) |
364 | { |
365 | return VIRTIO_ID_NET; |
366 | } |
367 | |
368 | static u32 pds_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) |
369 | { |
370 | return PCI_VENDOR_ID_PENSANDO; |
371 | } |
372 | |
373 | static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev) |
374 | { |
375 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
376 | |
377 | return vp_modern_get_status(mdev: &pdsv->vdpa_aux->vd_mdev); |
378 | } |
379 | |
380 | static int pds_vdpa_request_irqs(struct pds_vdpa_device *pdsv) |
381 | { |
382 | struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; |
383 | struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; |
384 | struct device *dev = &pdsv->vdpa_dev.dev; |
385 | int max_vq, nintrs, qid, err; |
386 | |
387 | max_vq = vdpa_aux->vdpa_mdev.max_supported_vqs; |
388 | |
389 | nintrs = pci_alloc_irq_vectors(dev: pdev, min_vecs: max_vq, max_vecs: max_vq, PCI_IRQ_MSIX); |
390 | if (nintrs < 0) { |
391 | dev_err(dev, "Couldn't get %d msix vectors: %pe\n" , |
392 | max_vq, ERR_PTR(nintrs)); |
393 | return nintrs; |
394 | } |
395 | |
396 | for (qid = 0; qid < pdsv->num_vqs; ++qid) { |
397 | int irq = pci_irq_vector(dev: pdev, nr: qid); |
398 | |
399 | snprintf(buf: pdsv->vqs[qid].irq_name, size: sizeof(pdsv->vqs[qid].irq_name), |
400 | fmt: "vdpa-%s-%d" , dev_name(dev), qid); |
401 | |
402 | err = request_irq(irq, handler: pds_vdpa_isr, flags: 0, |
403 | name: pdsv->vqs[qid].irq_name, |
404 | dev: &pdsv->vqs[qid]); |
405 | if (err) { |
406 | dev_err(dev, "%s: no irq for qid %d: %pe\n" , |
407 | __func__, qid, ERR_PTR(err)); |
408 | goto err_release; |
409 | } |
410 | |
411 | pdsv->vqs[qid].irq = irq; |
412 | } |
413 | |
414 | vdpa_aux->nintrs = nintrs; |
415 | |
416 | return 0; |
417 | |
418 | err_release: |
419 | while (qid--) |
420 | pds_vdpa_release_irq(pdsv, qid); |
421 | |
422 | pci_free_irq_vectors(dev: pdev); |
423 | |
424 | vdpa_aux->nintrs = 0; |
425 | |
426 | return err; |
427 | } |
428 | |
429 | void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv) |
430 | { |
431 | struct pds_vdpa_aux *vdpa_aux; |
432 | struct pci_dev *pdev; |
433 | int qid; |
434 | |
435 | if (!pdsv) |
436 | return; |
437 | |
438 | pdev = pdsv->vdpa_aux->padev->vf_pdev; |
439 | vdpa_aux = pdsv->vdpa_aux; |
440 | |
441 | if (!vdpa_aux->nintrs) |
442 | return; |
443 | |
444 | for (qid = 0; qid < pdsv->num_vqs; qid++) |
445 | pds_vdpa_release_irq(pdsv, qid); |
446 | |
447 | pci_free_irq_vectors(dev: pdev); |
448 | |
449 | vdpa_aux->nintrs = 0; |
450 | } |
451 | |
452 | static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) |
453 | { |
454 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
455 | struct device *dev = &pdsv->vdpa_dev.dev; |
456 | u8 old_status; |
457 | int i; |
458 | |
459 | old_status = pds_vdpa_get_status(vdpa_dev); |
460 | dev_dbg(dev, "%s: old %#x new %#x\n" , __func__, old_status, status); |
461 | |
462 | if (status & ~old_status & VIRTIO_CONFIG_S_DRIVER_OK) { |
463 | if (pds_vdpa_request_irqs(pdsv)) |
464 | status = old_status | VIRTIO_CONFIG_S_FAILED; |
465 | } |
466 | |
467 | pds_vdpa_cmd_set_status(pdsv, status); |
468 | |
469 | if (status == 0) { |
470 | struct vdpa_callback null_cb = { }; |
471 | |
472 | pds_vdpa_set_config_cb(vdpa_dev, cb: &null_cb); |
473 | pds_vdpa_cmd_reset(pdsv); |
474 | |
475 | for (i = 0; i < pdsv->num_vqs; i++) { |
476 | pdsv->vqs[i].avail_idx = 0; |
477 | pdsv->vqs[i].used_idx = 0; |
478 | } |
479 | |
480 | pds_vdpa_cmd_set_mac(pdsv, mac: pdsv->mac); |
481 | } |
482 | |
483 | if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) { |
484 | for (i = 0; i < pdsv->num_vqs; i++) { |
485 | pdsv->vqs[i].notify = |
486 | vp_modern_map_vq_notify(mdev: &pdsv->vdpa_aux->vd_mdev, |
487 | index: i, pa: &pdsv->vqs[i].notify_pa); |
488 | } |
489 | } |
490 | |
491 | if (old_status & ~status & VIRTIO_CONFIG_S_DRIVER_OK) |
492 | pds_vdpa_release_irqs(pdsv); |
493 | } |
494 | |
495 | static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid, |
496 | void __iomem *notify) |
497 | { |
498 | memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0])); |
499 | pdsv->vqs[qid].qid = qid; |
500 | pdsv->vqs[qid].pdsv = pdsv; |
501 | pdsv->vqs[qid].ready = false; |
502 | pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; |
503 | pdsv->vqs[qid].notify = notify; |
504 | } |
505 | |
506 | static int pds_vdpa_reset(struct vdpa_device *vdpa_dev) |
507 | { |
508 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
509 | struct device *dev; |
510 | int err = 0; |
511 | u8 status; |
512 | int i; |
513 | |
514 | dev = &pdsv->vdpa_aux->padev->aux_dev.dev; |
515 | status = pds_vdpa_get_status(vdpa_dev); |
516 | |
517 | if (status == 0) |
518 | return 0; |
519 | |
520 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) { |
521 | /* Reset the vqs */ |
522 | for (i = 0; i < pdsv->num_vqs && !err; i++) { |
523 | err = pds_vdpa_cmd_reset_vq(pdsv, qid: i, invert_idx: 0, vq_info: &pdsv->vqs[i]); |
524 | if (err) |
525 | dev_err(dev, "%s: reset_vq failed qid %d: %pe\n" , |
526 | __func__, i, ERR_PTR(err)); |
527 | } |
528 | } |
529 | |
530 | pds_vdpa_set_status(vdpa_dev, status: 0); |
531 | |
532 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) { |
533 | /* Reset the vq info */ |
534 | for (i = 0; i < pdsv->num_vqs && !err; i++) |
535 | pds_vdpa_init_vqs_entry(pdsv, qid: i, notify: pdsv->vqs[i].notify); |
536 | } |
537 | |
538 | return 0; |
539 | } |
540 | |
541 | static size_t pds_vdpa_get_config_size(struct vdpa_device *vdpa_dev) |
542 | { |
543 | return sizeof(struct virtio_net_config); |
544 | } |
545 | |
546 | static void pds_vdpa_get_config(struct vdpa_device *vdpa_dev, |
547 | unsigned int offset, |
548 | void *buf, unsigned int len) |
549 | { |
550 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
551 | void __iomem *device; |
552 | |
553 | if (offset + len > sizeof(struct virtio_net_config)) { |
554 | WARN(true, "%s: bad read, offset %d len %d\n" , __func__, offset, len); |
555 | return; |
556 | } |
557 | |
558 | device = pdsv->vdpa_aux->vd_mdev.device; |
559 | memcpy_fromio(buf, device + offset, len); |
560 | } |
561 | |
562 | static void pds_vdpa_set_config(struct vdpa_device *vdpa_dev, |
563 | unsigned int offset, const void *buf, |
564 | unsigned int len) |
565 | { |
566 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
567 | void __iomem *device; |
568 | |
569 | if (offset + len > sizeof(struct virtio_net_config)) { |
570 | WARN(true, "%s: bad read, offset %d len %d\n" , __func__, offset, len); |
571 | return; |
572 | } |
573 | |
574 | device = pdsv->vdpa_aux->vd_mdev.device; |
575 | memcpy_toio(device + offset, buf, len); |
576 | } |
577 | |
578 | static const struct vdpa_config_ops pds_vdpa_ops = { |
579 | .set_vq_address = pds_vdpa_set_vq_address, |
580 | .set_vq_num = pds_vdpa_set_vq_num, |
581 | .kick_vq = pds_vdpa_kick_vq, |
582 | .set_vq_cb = pds_vdpa_set_vq_cb, |
583 | .set_vq_ready = pds_vdpa_set_vq_ready, |
584 | .get_vq_ready = pds_vdpa_get_vq_ready, |
585 | .set_vq_state = pds_vdpa_set_vq_state, |
586 | .get_vq_state = pds_vdpa_get_vq_state, |
587 | .get_vq_notification = pds_vdpa_get_vq_notification, |
588 | .get_vq_irq = pds_vdpa_get_vq_irq, |
589 | .get_vq_align = pds_vdpa_get_vq_align, |
590 | .get_vq_group = pds_vdpa_get_vq_group, |
591 | |
592 | .get_device_features = pds_vdpa_get_device_features, |
593 | .set_driver_features = pds_vdpa_set_driver_features, |
594 | .get_driver_features = pds_vdpa_get_driver_features, |
595 | .set_config_cb = pds_vdpa_set_config_cb, |
596 | .get_vq_num_max = pds_vdpa_get_vq_num_max, |
597 | .get_device_id = pds_vdpa_get_device_id, |
598 | .get_vendor_id = pds_vdpa_get_vendor_id, |
599 | .get_status = pds_vdpa_get_status, |
600 | .set_status = pds_vdpa_set_status, |
601 | .reset = pds_vdpa_reset, |
602 | .get_config_size = pds_vdpa_get_config_size, |
603 | .get_config = pds_vdpa_get_config, |
604 | .set_config = pds_vdpa_set_config, |
605 | }; |
606 | static struct virtio_device_id pds_vdpa_id_table[] = { |
607 | {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID}, |
608 | {0}, |
609 | }; |
610 | |
611 | static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, |
612 | const struct vdpa_dev_set_config *add_config) |
613 | { |
614 | struct pds_vdpa_aux *vdpa_aux; |
615 | struct pds_vdpa_device *pdsv; |
616 | struct vdpa_mgmt_dev *mgmt; |
617 | u16 fw_max_vqs, vq_pairs; |
618 | struct device *dma_dev; |
619 | struct pci_dev *pdev; |
620 | struct device *dev; |
621 | u8 status; |
622 | int err; |
623 | int i; |
624 | |
625 | vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); |
626 | dev = &vdpa_aux->padev->aux_dev.dev; |
627 | mgmt = &vdpa_aux->vdpa_mdev; |
628 | |
629 | if (vdpa_aux->pdsv) { |
630 | dev_warn(dev, "Multiple vDPA devices on a VF is not supported.\n" ); |
631 | return -EOPNOTSUPP; |
632 | } |
633 | |
634 | pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev, |
635 | dev, &pds_vdpa_ops, 1, 1, name, false); |
636 | if (IS_ERR(ptr: pdsv)) { |
637 | dev_err(dev, "Failed to allocate vDPA structure: %pe\n" , pdsv); |
638 | return PTR_ERR(ptr: pdsv); |
639 | } |
640 | |
641 | vdpa_aux->pdsv = pdsv; |
642 | pdsv->vdpa_aux = vdpa_aux; |
643 | |
644 | pdev = vdpa_aux->padev->vf_pdev; |
645 | dma_dev = &pdev->dev; |
646 | pdsv->vdpa_dev.dma_dev = dma_dev; |
647 | |
648 | status = pds_vdpa_get_status(vdpa_dev: &pdsv->vdpa_dev); |
649 | if (status == 0xff) { |
650 | dev_err(dev, "Broken PCI - status %#x\n" , status); |
651 | err = -ENXIO; |
652 | goto err_unmap; |
653 | } |
654 | |
655 | pdsv->supported_features = mgmt->supported_features; |
656 | |
657 | if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { |
658 | u64 unsupp_features = |
659 | add_config->device_features & ~pdsv->supported_features; |
660 | |
661 | if (unsupp_features) { |
662 | dev_err(dev, "Unsupported features: %#llx\n" , unsupp_features); |
663 | err = -EOPNOTSUPP; |
664 | goto err_unmap; |
665 | } |
666 | |
667 | pdsv->supported_features = add_config->device_features; |
668 | } |
669 | |
670 | err = pds_vdpa_cmd_reset(pdsv); |
671 | if (err) { |
672 | dev_err(dev, "Failed to reset hw: %pe\n" , ERR_PTR(err)); |
673 | goto err_unmap; |
674 | } |
675 | |
676 | err = pds_vdpa_init_hw(pdsv); |
677 | if (err) { |
678 | dev_err(dev, "Failed to init hw: %pe\n" , ERR_PTR(err)); |
679 | goto err_unmap; |
680 | } |
681 | |
682 | fw_max_vqs = le16_to_cpu(pdsv->vdpa_aux->ident.max_vqs); |
683 | vq_pairs = fw_max_vqs / 2; |
684 | |
685 | /* Make sure we have the queues being requested */ |
686 | if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) |
687 | vq_pairs = add_config->net.max_vq_pairs; |
688 | |
689 | pdsv->num_vqs = 2 * vq_pairs; |
690 | if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) |
691 | pdsv->num_vqs++; |
692 | |
693 | if (pdsv->num_vqs > fw_max_vqs) { |
694 | dev_err(dev, "%s: queue count requested %u greater than max %u\n" , |
695 | __func__, pdsv->num_vqs, fw_max_vqs); |
696 | err = -ENOSPC; |
697 | goto err_unmap; |
698 | } |
699 | |
700 | if (pdsv->num_vqs != fw_max_vqs) { |
701 | err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, max_vqp: vq_pairs); |
702 | if (err) { |
703 | dev_err(dev, "Failed to set max_vq_pairs: %pe\n" , |
704 | ERR_PTR(err)); |
705 | goto err_unmap; |
706 | } |
707 | } |
708 | |
709 | /* Set a mac, either from the user config if provided |
710 | * or use the device's mac if not 00:..:00 |
711 | * or set a random mac |
712 | */ |
713 | if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) { |
714 | ether_addr_copy(dst: pdsv->mac, src: add_config->net.mac); |
715 | } else { |
716 | struct virtio_net_config __iomem *vc; |
717 | |
718 | vc = pdsv->vdpa_aux->vd_mdev.device; |
719 | memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac)); |
720 | if (is_zero_ether_addr(addr: pdsv->mac) && |
721 | (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) { |
722 | eth_random_addr(addr: pdsv->mac); |
723 | dev_info(dev, "setting random mac %pM\n" , pdsv->mac); |
724 | } |
725 | } |
726 | pds_vdpa_cmd_set_mac(pdsv, mac: pdsv->mac); |
727 | |
728 | for (i = 0; i < pdsv->num_vqs; i++) { |
729 | void __iomem *notify; |
730 | |
731 | notify = vp_modern_map_vq_notify(mdev: &pdsv->vdpa_aux->vd_mdev, |
732 | index: i, pa: &pdsv->vqs[i].notify_pa); |
733 | pds_vdpa_init_vqs_entry(pdsv, qid: i, notify); |
734 | } |
735 | |
736 | pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev; |
737 | |
738 | err = pds_vdpa_register_event_handler(pdsv); |
739 | if (err) { |
740 | dev_err(dev, "Failed to register for PDS events: %pe\n" , ERR_PTR(err)); |
741 | goto err_unmap; |
742 | } |
743 | |
744 | /* We use the _vdpa_register_device() call rather than the |
745 | * vdpa_register_device() to avoid a deadlock because our |
746 | * dev_add() is called with the vdpa_dev_lock already set |
747 | * by vdpa_nl_cmd_dev_add_set_doit() |
748 | */ |
749 | err = _vdpa_register_device(vdev: &pdsv->vdpa_dev, nvqs: pdsv->num_vqs); |
750 | if (err) { |
751 | dev_err(dev, "Failed to register to vDPA bus: %pe\n" , ERR_PTR(err)); |
752 | goto err_unevent; |
753 | } |
754 | |
755 | pds_vdpa_debugfs_add_vdpadev(vdpa_aux); |
756 | |
757 | return 0; |
758 | |
759 | err_unevent: |
760 | pds_vdpa_unregister_event_handler(pdsv); |
761 | err_unmap: |
762 | put_device(dev: &pdsv->vdpa_dev.dev); |
763 | vdpa_aux->pdsv = NULL; |
764 | return err; |
765 | } |
766 | |
767 | static void pds_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, |
768 | struct vdpa_device *vdpa_dev) |
769 | { |
770 | struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); |
771 | struct pds_vdpa_aux *vdpa_aux; |
772 | |
773 | pds_vdpa_unregister_event_handler(pdsv); |
774 | |
775 | vdpa_aux = container_of(mdev, struct pds_vdpa_aux, vdpa_mdev); |
776 | _vdpa_unregister_device(vdev: vdpa_dev); |
777 | |
778 | pds_vdpa_cmd_reset(pdsv: vdpa_aux->pdsv); |
779 | pds_vdpa_debugfs_reset_vdpadev(vdpa_aux); |
780 | |
781 | vdpa_aux->pdsv = NULL; |
782 | |
783 | dev_info(&vdpa_aux->padev->aux_dev.dev, "Removed vdpa device\n" ); |
784 | } |
785 | |
786 | static const struct vdpa_mgmtdev_ops pds_vdpa_mgmt_dev_ops = { |
787 | .dev_add = pds_vdpa_dev_add, |
788 | .dev_del = pds_vdpa_dev_del |
789 | }; |
790 | |
791 | int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux) |
792 | { |
793 | union pds_core_adminq_cmd cmd = { |
794 | .vdpa_ident.opcode = PDS_VDPA_CMD_IDENT, |
795 | .vdpa_ident.vf_id = cpu_to_le16(vdpa_aux->vf_id), |
796 | }; |
797 | union pds_core_adminq_comp comp = {}; |
798 | struct vdpa_mgmt_dev *mgmt; |
799 | struct pci_dev *pf_pdev; |
800 | struct device *pf_dev; |
801 | struct pci_dev *pdev; |
802 | dma_addr_t ident_pa; |
803 | struct device *dev; |
804 | u16 dev_intrs; |
805 | u16 max_vqs; |
806 | int err; |
807 | |
808 | dev = &vdpa_aux->padev->aux_dev.dev; |
809 | pdev = vdpa_aux->padev->vf_pdev; |
810 | mgmt = &vdpa_aux->vdpa_mdev; |
811 | |
812 | /* Get resource info through the PF's adminq. It is a block of info, |
813 | * so we need to map some memory for PF to make available to the |
814 | * firmware for writing the data. |
815 | */ |
816 | pf_pdev = pci_physfn(dev: vdpa_aux->padev->vf_pdev); |
817 | pf_dev = &pf_pdev->dev; |
818 | ident_pa = dma_map_single(pf_dev, &vdpa_aux->ident, |
819 | sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); |
820 | if (dma_mapping_error(dev: pf_dev, dma_addr: ident_pa)) { |
821 | dev_err(dev, "Failed to map ident space\n" ); |
822 | return -ENOMEM; |
823 | } |
824 | |
825 | cmd.vdpa_ident.ident_pa = cpu_to_le64(ident_pa); |
826 | cmd.vdpa_ident.len = cpu_to_le32(sizeof(vdpa_aux->ident)); |
827 | err = pds_client_adminq_cmd(padev: vdpa_aux->padev, req: &cmd, |
828 | req_len: sizeof(cmd.vdpa_ident), resp: &comp, flags: 0); |
829 | dma_unmap_single(pf_dev, ident_pa, |
830 | sizeof(vdpa_aux->ident), DMA_FROM_DEVICE); |
831 | if (err) { |
832 | dev_err(dev, "Failed to ident hw, status %d: %pe\n" , |
833 | comp.status, ERR_PTR(err)); |
834 | return err; |
835 | } |
836 | |
837 | max_vqs = le16_to_cpu(vdpa_aux->ident.max_vqs); |
838 | dev_intrs = pci_msix_vec_count(dev: pdev); |
839 | dev_dbg(dev, "ident.max_vqs %d dev_intrs %d\n" , max_vqs, dev_intrs); |
840 | |
841 | max_vqs = min_t(u16, dev_intrs, max_vqs); |
842 | mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs); |
843 | vdpa_aux->nintrs = 0; |
844 | |
845 | mgmt->ops = &pds_vdpa_mgmt_dev_ops; |
846 | mgmt->id_table = pds_vdpa_id_table; |
847 | mgmt->device = dev; |
848 | mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features); |
849 | |
850 | /* advertise F_MAC even if the device doesn't */ |
851 | mgmt->supported_features |= BIT_ULL(VIRTIO_NET_F_MAC); |
852 | |
853 | mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); |
854 | mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); |
855 | mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES); |
856 | |
857 | return 0; |
858 | } |
859 | |