1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | |
3 | #include <linux/virtio_pci_modern.h> |
4 | #include <linux/module.h> |
5 | #include <linux/pci.h> |
6 | #include <linux/delay.h> |
7 | |
8 | /* |
9 | * vp_modern_map_capability - map a part of virtio pci capability |
10 | * @mdev: the modern virtio-pci device |
11 | * @off: offset of the capability |
12 | * @minlen: minimal length of the capability |
13 | * @align: align requirement |
14 | * @start: start from the capability |
15 | * @size: map size |
16 | * @len: the length that is actually mapped |
17 | * @pa: physical address of the capability |
18 | * |
19 | * Returns the io address of for the part of the capability |
20 | */ |
21 | static void __iomem * |
22 | vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, |
23 | size_t minlen, u32 align, u32 start, u32 size, |
24 | size_t *len, resource_size_t *pa) |
25 | { |
26 | struct pci_dev *dev = mdev->pci_dev; |
27 | u8 bar; |
28 | u32 offset, length; |
29 | void __iomem *p; |
30 | |
31 | pci_read_config_byte(dev, where: off + offsetof(struct virtio_pci_cap, |
32 | bar), |
33 | val: &bar); |
34 | pci_read_config_dword(dev, where: off + offsetof(struct virtio_pci_cap, offset), |
35 | val: &offset); |
36 | pci_read_config_dword(dev, where: off + offsetof(struct virtio_pci_cap, length), |
37 | val: &length); |
38 | |
39 | /* Check if the BAR may have changed since we requested the region. */ |
40 | if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) { |
41 | dev_err(&dev->dev, |
42 | "virtio_pci: bar unexpectedly changed to %u\n" , bar); |
43 | return NULL; |
44 | } |
45 | |
46 | if (length <= start) { |
47 | dev_err(&dev->dev, |
48 | "virtio_pci: bad capability len %u (>%u expected)\n" , |
49 | length, start); |
50 | return NULL; |
51 | } |
52 | |
53 | if (length - start < minlen) { |
54 | dev_err(&dev->dev, |
55 | "virtio_pci: bad capability len %u (>=%zu expected)\n" , |
56 | length, minlen); |
57 | return NULL; |
58 | } |
59 | |
60 | length -= start; |
61 | |
62 | if (start + offset < offset) { |
63 | dev_err(&dev->dev, |
64 | "virtio_pci: map wrap-around %u+%u\n" , |
65 | start, offset); |
66 | return NULL; |
67 | } |
68 | |
69 | offset += start; |
70 | |
71 | if (offset & (align - 1)) { |
72 | dev_err(&dev->dev, |
73 | "virtio_pci: offset %u not aligned to %u\n" , |
74 | offset, align); |
75 | return NULL; |
76 | } |
77 | |
78 | if (length > size) |
79 | length = size; |
80 | |
81 | if (len) |
82 | *len = length; |
83 | |
84 | if (minlen + offset < minlen || |
85 | minlen + offset > pci_resource_len(dev, bar)) { |
86 | dev_err(&dev->dev, |
87 | "virtio_pci: map virtio %zu@%u " |
88 | "out of range on bar %i length %lu\n" , |
89 | minlen, offset, |
90 | bar, (unsigned long)pci_resource_len(dev, bar)); |
91 | return NULL; |
92 | } |
93 | |
94 | p = pci_iomap_range(dev, bar, offset, maxlen: length); |
95 | if (!p) |
96 | dev_err(&dev->dev, |
97 | "virtio_pci: unable to map virtio %u@%u on bar %i\n" , |
98 | length, offset, bar); |
99 | else if (pa) |
100 | *pa = pci_resource_start(dev, bar) + offset; |
101 | |
102 | return p; |
103 | } |
104 | |
105 | /** |
106 | * virtio_pci_find_capability - walk capabilities to find device info. |
107 | * @dev: the pci device |
108 | * @cfg_type: the VIRTIO_PCI_CAP_* value we seek |
109 | * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. |
110 | * @bars: the bitmask of BARs |
111 | * |
112 | * Returns offset of the capability, or 0. |
113 | */ |
114 | static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, |
115 | u32 ioresource_types, int *bars) |
116 | { |
117 | int pos; |
118 | |
119 | for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); |
120 | pos > 0; |
121 | pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { |
122 | u8 type, bar; |
123 | pci_read_config_byte(dev, where: pos + offsetof(struct virtio_pci_cap, |
124 | cfg_type), |
125 | val: &type); |
126 | pci_read_config_byte(dev, where: pos + offsetof(struct virtio_pci_cap, |
127 | bar), |
128 | val: &bar); |
129 | |
130 | /* Ignore structures with reserved BAR values */ |
131 | if (bar >= PCI_STD_NUM_BARS) |
132 | continue; |
133 | |
134 | if (type == cfg_type) { |
135 | if (pci_resource_len(dev, bar) && |
136 | pci_resource_flags(dev, bar) & ioresource_types) { |
137 | *bars |= (1 << bar); |
138 | return pos; |
139 | } |
140 | } |
141 | } |
142 | return 0; |
143 | } |
144 | |
145 | /* This is part of the ABI. Don't screw with it. */ |
146 | static inline void check_offsets(void) |
147 | { |
148 | /* Note: disk space was harmed in compilation of this function. */ |
149 | BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != |
150 | offsetof(struct virtio_pci_cap, cap_vndr)); |
151 | BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != |
152 | offsetof(struct virtio_pci_cap, cap_next)); |
153 | BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != |
154 | offsetof(struct virtio_pci_cap, cap_len)); |
155 | BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != |
156 | offsetof(struct virtio_pci_cap, cfg_type)); |
157 | BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != |
158 | offsetof(struct virtio_pci_cap, bar)); |
159 | BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != |
160 | offsetof(struct virtio_pci_cap, offset)); |
161 | BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != |
162 | offsetof(struct virtio_pci_cap, length)); |
163 | BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != |
164 | offsetof(struct virtio_pci_notify_cap, |
165 | notify_off_multiplier)); |
166 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != |
167 | offsetof(struct virtio_pci_common_cfg, |
168 | device_feature_select)); |
169 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != |
170 | offsetof(struct virtio_pci_common_cfg, device_feature)); |
171 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != |
172 | offsetof(struct virtio_pci_common_cfg, |
173 | guest_feature_select)); |
174 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != |
175 | offsetof(struct virtio_pci_common_cfg, guest_feature)); |
176 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != |
177 | offsetof(struct virtio_pci_common_cfg, msix_config)); |
178 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != |
179 | offsetof(struct virtio_pci_common_cfg, num_queues)); |
180 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != |
181 | offsetof(struct virtio_pci_common_cfg, device_status)); |
182 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != |
183 | offsetof(struct virtio_pci_common_cfg, config_generation)); |
184 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != |
185 | offsetof(struct virtio_pci_common_cfg, queue_select)); |
186 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != |
187 | offsetof(struct virtio_pci_common_cfg, queue_size)); |
188 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != |
189 | offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); |
190 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != |
191 | offsetof(struct virtio_pci_common_cfg, queue_enable)); |
192 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != |
193 | offsetof(struct virtio_pci_common_cfg, queue_notify_off)); |
194 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != |
195 | offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); |
196 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != |
197 | offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); |
198 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != |
199 | offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); |
200 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != |
201 | offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); |
202 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != |
203 | offsetof(struct virtio_pci_common_cfg, queue_used_lo)); |
204 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != |
205 | offsetof(struct virtio_pci_common_cfg, queue_used_hi)); |
206 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NDATA != |
207 | offsetof(struct virtio_pci_modern_common_cfg, queue_notify_data)); |
208 | BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_RESET != |
209 | offsetof(struct virtio_pci_modern_common_cfg, queue_reset)); |
210 | } |
211 | |
212 | /* |
213 | * vp_modern_probe: probe the modern virtio pci device, note that the |
214 | * caller is required to enable PCI device before calling this function. |
215 | * @mdev: the modern virtio-pci device |
216 | * |
217 | * Return 0 on succeed otherwise fail |
218 | */ |
219 | int vp_modern_probe(struct virtio_pci_modern_device *mdev) |
220 | { |
221 | struct pci_dev *pci_dev = mdev->pci_dev; |
222 | int err, common, isr, notify, device; |
223 | u32 notify_length; |
224 | u32 notify_offset; |
225 | int devid; |
226 | |
227 | check_offsets(); |
228 | |
229 | if (mdev->device_id_check) { |
230 | devid = mdev->device_id_check(pci_dev); |
231 | if (devid < 0) |
232 | return devid; |
233 | mdev->id.device = devid; |
234 | } else { |
235 | /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ |
236 | if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) |
237 | return -ENODEV; |
238 | |
239 | if (pci_dev->device < 0x1040) { |
240 | /* Transitional devices: use the PCI subsystem device id as |
241 | * virtio device id, same as legacy driver always did. |
242 | */ |
243 | mdev->id.device = pci_dev->subsystem_device; |
244 | } else { |
245 | /* Modern devices: simply use PCI device id, but start from 0x1040. */ |
246 | mdev->id.device = pci_dev->device - 0x1040; |
247 | } |
248 | } |
249 | mdev->id.vendor = pci_dev->subsystem_vendor; |
250 | |
251 | /* check for a common config: if not, use legacy mode (bar 0). */ |
252 | common = virtio_pci_find_capability(dev: pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, |
253 | IORESOURCE_IO | IORESOURCE_MEM, |
254 | bars: &mdev->modern_bars); |
255 | if (!common) { |
256 | dev_info(&pci_dev->dev, |
257 | "virtio_pci: leaving for legacy driver\n" ); |
258 | return -ENODEV; |
259 | } |
260 | |
261 | /* If common is there, these should be too... */ |
262 | isr = virtio_pci_find_capability(dev: pci_dev, VIRTIO_PCI_CAP_ISR_CFG, |
263 | IORESOURCE_IO | IORESOURCE_MEM, |
264 | bars: &mdev->modern_bars); |
265 | notify = virtio_pci_find_capability(dev: pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, |
266 | IORESOURCE_IO | IORESOURCE_MEM, |
267 | bars: &mdev->modern_bars); |
268 | if (!isr || !notify) { |
269 | dev_err(&pci_dev->dev, |
270 | "virtio_pci: missing capabilities %i/%i/%i\n" , |
271 | common, isr, notify); |
272 | return -EINVAL; |
273 | } |
274 | |
275 | err = dma_set_mask_and_coherent(dev: &pci_dev->dev, |
276 | mask: mdev->dma_mask ? : DMA_BIT_MASK(64)); |
277 | if (err) |
278 | err = dma_set_mask_and_coherent(dev: &pci_dev->dev, |
279 | DMA_BIT_MASK(32)); |
280 | if (err) |
281 | dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n" ); |
282 | |
283 | /* Device capability is only mandatory for devices that have |
284 | * device-specific configuration. |
285 | */ |
286 | device = virtio_pci_find_capability(dev: pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, |
287 | IORESOURCE_IO | IORESOURCE_MEM, |
288 | bars: &mdev->modern_bars); |
289 | |
290 | err = pci_request_selected_regions(pci_dev, mdev->modern_bars, |
291 | "virtio-pci-modern" ); |
292 | if (err) |
293 | return err; |
294 | |
295 | err = -EINVAL; |
296 | mdev->common = vp_modern_map_capability(mdev, off: common, |
297 | minlen: sizeof(struct virtio_pci_common_cfg), align: 4, |
298 | start: 0, size: sizeof(struct virtio_pci_modern_common_cfg), |
299 | len: &mdev->common_len, NULL); |
300 | if (!mdev->common) |
301 | goto err_map_common; |
302 | mdev->isr = vp_modern_map_capability(mdev, off: isr, minlen: sizeof(u8), align: 1, |
303 | start: 0, size: 1, |
304 | NULL, NULL); |
305 | if (!mdev->isr) |
306 | goto err_map_isr; |
307 | |
308 | /* Read notify_off_multiplier from config space. */ |
309 | pci_read_config_dword(dev: pci_dev, |
310 | where: notify + offsetof(struct virtio_pci_notify_cap, |
311 | notify_off_multiplier), |
312 | val: &mdev->notify_offset_multiplier); |
313 | /* Read notify length and offset from config space. */ |
314 | pci_read_config_dword(dev: pci_dev, |
315 | where: notify + offsetof(struct virtio_pci_notify_cap, |
316 | cap.length), |
317 | val: ¬ify_length); |
318 | |
319 | pci_read_config_dword(dev: pci_dev, |
320 | where: notify + offsetof(struct virtio_pci_notify_cap, |
321 | cap.offset), |
322 | val: ¬ify_offset); |
323 | |
324 | /* We don't know how many VQs we'll map, ahead of the time. |
325 | * If notify length is small, map it all now. |
326 | * Otherwise, map each VQ individually later. |
327 | */ |
328 | if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { |
329 | mdev->notify_base = vp_modern_map_capability(mdev, off: notify, |
330 | minlen: 2, align: 2, |
331 | start: 0, size: notify_length, |
332 | len: &mdev->notify_len, |
333 | pa: &mdev->notify_pa); |
334 | if (!mdev->notify_base) |
335 | goto err_map_notify; |
336 | } else { |
337 | mdev->notify_map_cap = notify; |
338 | } |
339 | |
340 | /* Again, we don't know how much we should map, but PAGE_SIZE |
341 | * is more than enough for all existing devices. |
342 | */ |
343 | if (device) { |
344 | mdev->device = vp_modern_map_capability(mdev, off: device, minlen: 0, align: 4, |
345 | start: 0, PAGE_SIZE, |
346 | len: &mdev->device_len, |
347 | NULL); |
348 | if (!mdev->device) |
349 | goto err_map_device; |
350 | } |
351 | |
352 | return 0; |
353 | |
354 | err_map_device: |
355 | if (mdev->notify_base) |
356 | pci_iounmap(dev: pci_dev, mdev->notify_base); |
357 | err_map_notify: |
358 | pci_iounmap(dev: pci_dev, mdev->isr); |
359 | err_map_isr: |
360 | pci_iounmap(dev: pci_dev, mdev->common); |
361 | err_map_common: |
362 | pci_release_selected_regions(pci_dev, mdev->modern_bars); |
363 | return err; |
364 | } |
365 | EXPORT_SYMBOL_GPL(vp_modern_probe); |
366 | |
367 | /* |
368 | * vp_modern_remove: remove and cleanup the modern virtio pci device |
369 | * @mdev: the modern virtio-pci device |
370 | */ |
371 | void vp_modern_remove(struct virtio_pci_modern_device *mdev) |
372 | { |
373 | struct pci_dev *pci_dev = mdev->pci_dev; |
374 | |
375 | if (mdev->device) |
376 | pci_iounmap(dev: pci_dev, mdev->device); |
377 | if (mdev->notify_base) |
378 | pci_iounmap(dev: pci_dev, mdev->notify_base); |
379 | pci_iounmap(dev: pci_dev, mdev->isr); |
380 | pci_iounmap(dev: pci_dev, mdev->common); |
381 | pci_release_selected_regions(pci_dev, mdev->modern_bars); |
382 | } |
383 | EXPORT_SYMBOL_GPL(vp_modern_remove); |
384 | |
385 | /* |
386 | * vp_modern_get_features - get features from device |
387 | * @mdev: the modern virtio-pci device |
388 | * |
389 | * Returns the features read from the device |
390 | */ |
391 | u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev) |
392 | { |
393 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
394 | |
395 | u64 features; |
396 | |
397 | vp_iowrite32(value: 0, addr: &cfg->device_feature_select); |
398 | features = vp_ioread32(addr: &cfg->device_feature); |
399 | vp_iowrite32(value: 1, addr: &cfg->device_feature_select); |
400 | features |= ((u64)vp_ioread32(addr: &cfg->device_feature) << 32); |
401 | |
402 | return features; |
403 | } |
404 | EXPORT_SYMBOL_GPL(vp_modern_get_features); |
405 | |
406 | /* |
407 | * vp_modern_get_driver_features - get driver features from device |
408 | * @mdev: the modern virtio-pci device |
409 | * |
410 | * Returns the driver features read from the device |
411 | */ |
412 | u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev) |
413 | { |
414 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
415 | |
416 | u64 features; |
417 | |
418 | vp_iowrite32(value: 0, addr: &cfg->guest_feature_select); |
419 | features = vp_ioread32(addr: &cfg->guest_feature); |
420 | vp_iowrite32(value: 1, addr: &cfg->guest_feature_select); |
421 | features |= ((u64)vp_ioread32(addr: &cfg->guest_feature) << 32); |
422 | |
423 | return features; |
424 | } |
425 | EXPORT_SYMBOL_GPL(vp_modern_get_driver_features); |
426 | |
427 | /* |
428 | * vp_modern_set_features - set features to device |
429 | * @mdev: the modern virtio-pci device |
430 | * @features: the features set to device |
431 | */ |
432 | void vp_modern_set_features(struct virtio_pci_modern_device *mdev, |
433 | u64 features) |
434 | { |
435 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
436 | |
437 | vp_iowrite32(value: 0, addr: &cfg->guest_feature_select); |
438 | vp_iowrite32(value: (u32)features, addr: &cfg->guest_feature); |
439 | vp_iowrite32(value: 1, addr: &cfg->guest_feature_select); |
440 | vp_iowrite32(value: features >> 32, addr: &cfg->guest_feature); |
441 | } |
442 | EXPORT_SYMBOL_GPL(vp_modern_set_features); |
443 | |
444 | /* |
445 | * vp_modern_generation - get the device genreation |
446 | * @mdev: the modern virtio-pci device |
447 | * |
448 | * Returns the genreation read from device |
449 | */ |
450 | u32 vp_modern_generation(struct virtio_pci_modern_device *mdev) |
451 | { |
452 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
453 | |
454 | return vp_ioread8(addr: &cfg->config_generation); |
455 | } |
456 | EXPORT_SYMBOL_GPL(vp_modern_generation); |
457 | |
458 | /* |
459 | * vp_modern_get_status - get the device status |
460 | * @mdev: the modern virtio-pci device |
461 | * |
462 | * Returns the status read from device |
463 | */ |
464 | u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev) |
465 | { |
466 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
467 | |
468 | return vp_ioread8(addr: &cfg->device_status); |
469 | } |
470 | EXPORT_SYMBOL_GPL(vp_modern_get_status); |
471 | |
472 | /* |
473 | * vp_modern_set_status - set status to device |
474 | * @mdev: the modern virtio-pci device |
475 | * @status: the status set to device |
476 | */ |
477 | void vp_modern_set_status(struct virtio_pci_modern_device *mdev, |
478 | u8 status) |
479 | { |
480 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
481 | |
482 | /* |
483 | * Per memory-barriers.txt, wmb() is not needed to guarantee |
484 | * that the cache coherent memory writes have completed |
485 | * before writing to the MMIO region. |
486 | */ |
487 | vp_iowrite8(value: status, addr: &cfg->device_status); |
488 | } |
489 | EXPORT_SYMBOL_GPL(vp_modern_set_status); |
490 | |
491 | /* |
492 | * vp_modern_get_queue_reset - get the queue reset status |
493 | * @mdev: the modern virtio-pci device |
494 | * @index: queue index |
495 | */ |
496 | int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index) |
497 | { |
498 | struct virtio_pci_modern_common_cfg __iomem *cfg; |
499 | |
500 | cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common; |
501 | |
502 | vp_iowrite16(value: index, addr: &cfg->cfg.queue_select); |
503 | return vp_ioread16(addr: &cfg->queue_reset); |
504 | } |
505 | EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset); |
506 | |
507 | /* |
508 | * vp_modern_set_queue_reset - reset the queue |
509 | * @mdev: the modern virtio-pci device |
510 | * @index: queue index |
511 | */ |
512 | void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index) |
513 | { |
514 | struct virtio_pci_modern_common_cfg __iomem *cfg; |
515 | |
516 | cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common; |
517 | |
518 | vp_iowrite16(value: index, addr: &cfg->cfg.queue_select); |
519 | vp_iowrite16(value: 1, addr: &cfg->queue_reset); |
520 | |
521 | while (vp_ioread16(addr: &cfg->queue_reset)) |
522 | msleep(msecs: 1); |
523 | |
524 | while (vp_ioread16(addr: &cfg->cfg.queue_enable)) |
525 | msleep(msecs: 1); |
526 | } |
527 | EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset); |
528 | |
529 | /* |
530 | * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue |
531 | * @mdev: the modern virtio-pci device |
532 | * @index: queue index |
533 | * @vector: the config vector |
534 | * |
535 | * Returns the config vector read from the device |
536 | */ |
537 | u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, |
538 | u16 index, u16 vector) |
539 | { |
540 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
541 | |
542 | vp_iowrite16(value: index, addr: &cfg->queue_select); |
543 | vp_iowrite16(value: vector, addr: &cfg->queue_msix_vector); |
544 | /* Flush the write out to device */ |
545 | return vp_ioread16(addr: &cfg->queue_msix_vector); |
546 | } |
547 | EXPORT_SYMBOL_GPL(vp_modern_queue_vector); |
548 | |
549 | /* |
550 | * vp_modern_config_vector - set the vector for config interrupt |
551 | * @mdev: the modern virtio-pci device |
552 | * @vector: the config vector |
553 | * |
554 | * Returns the config vector read from the device |
555 | */ |
556 | u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, |
557 | u16 vector) |
558 | { |
559 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
560 | |
561 | /* Setup the vector used for configuration events */ |
562 | vp_iowrite16(value: vector, addr: &cfg->msix_config); |
563 | /* Verify we had enough resources to assign the vector */ |
564 | /* Will also flush the write out to device */ |
565 | return vp_ioread16(addr: &cfg->msix_config); |
566 | } |
567 | EXPORT_SYMBOL_GPL(vp_modern_config_vector); |
568 | |
569 | /* |
570 | * vp_modern_queue_address - set the virtqueue address |
571 | * @mdev: the modern virtio-pci device |
572 | * @index: the queue index |
573 | * @desc_addr: address of the descriptor area |
574 | * @driver_addr: address of the driver area |
575 | * @device_addr: address of the device area |
576 | */ |
577 | void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, |
578 | u16 index, u64 desc_addr, u64 driver_addr, |
579 | u64 device_addr) |
580 | { |
581 | struct virtio_pci_common_cfg __iomem *cfg = mdev->common; |
582 | |
583 | vp_iowrite16(value: index, addr: &cfg->queue_select); |
584 | |
585 | vp_iowrite64_twopart(val: desc_addr, lo: &cfg->queue_desc_lo, |
586 | hi: &cfg->queue_desc_hi); |
587 | vp_iowrite64_twopart(val: driver_addr, lo: &cfg->queue_avail_lo, |
588 | hi: &cfg->queue_avail_hi); |
589 | vp_iowrite64_twopart(val: device_addr, lo: &cfg->queue_used_lo, |
590 | hi: &cfg->queue_used_hi); |
591 | } |
592 | EXPORT_SYMBOL_GPL(vp_modern_queue_address); |
593 | |
594 | /* |
595 | * vp_modern_set_queue_enable - enable a virtqueue |
596 | * @mdev: the modern virtio-pci device |
597 | * @index: the queue index |
598 | * @enable: whether the virtqueue is enable or not |
599 | */ |
600 | void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, |
601 | u16 index, bool enable) |
602 | { |
603 | vp_iowrite16(value: index, addr: &mdev->common->queue_select); |
604 | vp_iowrite16(value: enable, addr: &mdev->common->queue_enable); |
605 | } |
606 | EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable); |
607 | |
608 | /* |
609 | * vp_modern_get_queue_enable - enable a virtqueue |
610 | * @mdev: the modern virtio-pci device |
611 | * @index: the queue index |
612 | * |
613 | * Returns whether a virtqueue is enabled or not |
614 | */ |
615 | bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, |
616 | u16 index) |
617 | { |
618 | vp_iowrite16(value: index, addr: &mdev->common->queue_select); |
619 | |
620 | return vp_ioread16(addr: &mdev->common->queue_enable); |
621 | } |
622 | EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable); |
623 | |
624 | /* |
625 | * vp_modern_set_queue_size - set size for a virtqueue |
626 | * @mdev: the modern virtio-pci device |
627 | * @index: the queue index |
628 | * @size: the size of the virtqueue |
629 | */ |
630 | void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, |
631 | u16 index, u16 size) |
632 | { |
633 | vp_iowrite16(value: index, addr: &mdev->common->queue_select); |
634 | vp_iowrite16(value: size, addr: &mdev->common->queue_size); |
635 | |
636 | } |
637 | EXPORT_SYMBOL_GPL(vp_modern_set_queue_size); |
638 | |
639 | /* |
640 | * vp_modern_get_queue_size - get size for a virtqueue |
641 | * @mdev: the modern virtio-pci device |
642 | * @index: the queue index |
643 | * |
644 | * Returns the size of the virtqueue |
645 | */ |
646 | u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, |
647 | u16 index) |
648 | { |
649 | vp_iowrite16(value: index, addr: &mdev->common->queue_select); |
650 | |
651 | return vp_ioread16(addr: &mdev->common->queue_size); |
652 | |
653 | } |
654 | EXPORT_SYMBOL_GPL(vp_modern_get_queue_size); |
655 | |
656 | /* |
657 | * vp_modern_get_num_queues - get the number of virtqueues |
658 | * @mdev: the modern virtio-pci device |
659 | * |
660 | * Returns the number of virtqueues |
661 | */ |
662 | u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev) |
663 | { |
664 | return vp_ioread16(addr: &mdev->common->num_queues); |
665 | } |
666 | EXPORT_SYMBOL_GPL(vp_modern_get_num_queues); |
667 | |
668 | /* |
669 | * vp_modern_get_queue_notify_off - get notification offset for a virtqueue |
670 | * @mdev: the modern virtio-pci device |
671 | * @index: the queue index |
672 | * |
673 | * Returns the notification offset for a virtqueue |
674 | */ |
675 | static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, |
676 | u16 index) |
677 | { |
678 | vp_iowrite16(value: index, addr: &mdev->common->queue_select); |
679 | |
680 | return vp_ioread16(addr: &mdev->common->queue_notify_off); |
681 | } |
682 | |
683 | /* |
684 | * vp_modern_map_vq_notify - map notification area for a |
685 | * specific virtqueue |
686 | * @mdev: the modern virtio-pci device |
687 | * @index: the queue index |
688 | * @pa: the pointer to the physical address of the nofity area |
689 | * |
690 | * Returns the address of the notification area |
691 | */ |
692 | void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, |
693 | u16 index, resource_size_t *pa) |
694 | { |
695 | u16 off = vp_modern_get_queue_notify_off(mdev, index); |
696 | |
697 | if (mdev->notify_base) { |
698 | /* offset should not wrap */ |
699 | if ((u64)off * mdev->notify_offset_multiplier + 2 |
700 | > mdev->notify_len) { |
701 | dev_warn(&mdev->pci_dev->dev, |
702 | "bad notification offset %u (x %u) " |
703 | "for queue %u > %zd" , |
704 | off, mdev->notify_offset_multiplier, |
705 | index, mdev->notify_len); |
706 | return NULL; |
707 | } |
708 | if (pa) |
709 | *pa = mdev->notify_pa + |
710 | off * mdev->notify_offset_multiplier; |
711 | return mdev->notify_base + off * mdev->notify_offset_multiplier; |
712 | } else { |
713 | return vp_modern_map_capability(mdev, |
714 | off: mdev->notify_map_cap, minlen: 2, align: 2, |
715 | start: off * mdev->notify_offset_multiplier, size: 2, |
716 | NULL, pa); |
717 | } |
718 | } |
719 | EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify); |
720 | |
721 | MODULE_VERSION("0.1" ); |
722 | MODULE_DESCRIPTION("Modern Virtio PCI Device" ); |
723 | MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>" ); |
724 | MODULE_LICENSE("GPL" ); |
725 | |