1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * IBM PowerPC Virtual I/O Infrastructure Support. |
4 | * |
5 | * Copyright (c) 2003,2008 IBM Corp. |
6 | * Dave Engebretsen engebret@us.ibm.com |
7 | * Santiago Leon santil@us.ibm.com |
8 | * Hollis Blanchard <hollisb@us.ibm.com> |
9 | * Stephen Rothwell |
10 | * Robert Jennings <rcjenn@us.ibm.com> |
11 | */ |
12 | |
13 | #include <linux/cpu.h> |
14 | #include <linux/types.h> |
15 | #include <linux/delay.h> |
16 | #include <linux/stat.h> |
17 | #include <linux/device.h> |
18 | #include <linux/init.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/console.h> |
21 | #include <linux/export.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/dma-map-ops.h> |
24 | #include <linux/kobject.h> |
25 | #include <linux/kexec.h> |
26 | #include <linux/of_irq.h> |
27 | |
28 | #include <asm/iommu.h> |
29 | #include <asm/dma.h> |
30 | #include <asm/vio.h> |
31 | #include <asm/prom.h> |
32 | #include <asm/firmware.h> |
33 | #include <asm/tce.h> |
34 | #include <asm/page.h> |
35 | #include <asm/hvcall.h> |
36 | #include <asm/machdep.h> |
37 | |
38 | static struct vio_dev vio_bus_device = { /* fake "parent" device */ |
39 | .name = "vio" , |
40 | .type = "" , |
41 | .dev.init_name = "vio" , |
42 | .dev.bus = &vio_bus_type, |
43 | }; |
44 | |
45 | #ifdef CONFIG_PPC_SMLPAR |
46 | /** |
47 | * vio_cmo_pool - A pool of IO memory for CMO use |
48 | * |
49 | * @size: The size of the pool in bytes |
50 | * @free: The amount of free memory in the pool |
51 | */ |
52 | struct vio_cmo_pool { |
53 | size_t size; |
54 | size_t free; |
55 | }; |
56 | |
57 | /* How many ms to delay queued balance work */ |
58 | #define VIO_CMO_BALANCE_DELAY 100 |
59 | |
60 | /* Portion out IO memory to CMO devices by this chunk size */ |
61 | #define VIO_CMO_BALANCE_CHUNK 131072 |
62 | |
63 | /** |
64 | * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement |
65 | * |
66 | * @vio_dev: struct vio_dev pointer |
67 | * @list: pointer to other devices on bus that are being tracked |
68 | */ |
69 | struct vio_cmo_dev_entry { |
70 | struct vio_dev *viodev; |
71 | struct list_head list; |
72 | }; |
73 | |
74 | /** |
75 | * vio_cmo - VIO bus accounting structure for CMO entitlement |
76 | * |
77 | * @lock: spinlock for entire structure |
78 | * @balance_q: work queue for balancing system entitlement |
79 | * @device_list: list of CMO-enabled devices requiring entitlement |
80 | * @entitled: total system entitlement in bytes |
81 | * @reserve: pool of memory from which devices reserve entitlement, incl. spare |
82 | * @excess: pool of excess entitlement not needed for device reserves or spare |
83 | * @spare: IO memory for device hotplug functionality |
84 | * @min: minimum necessary for system operation |
85 | * @desired: desired memory for system operation |
86 | * @curr: bytes currently allocated |
87 | * @high: high water mark for IO data usage |
88 | */ |
89 | static struct vio_cmo { |
90 | spinlock_t lock; |
91 | struct delayed_work balance_q; |
92 | struct list_head device_list; |
93 | size_t entitled; |
94 | struct vio_cmo_pool reserve; |
95 | struct vio_cmo_pool excess; |
96 | size_t spare; |
97 | size_t min; |
98 | size_t desired; |
99 | size_t curr; |
100 | size_t high; |
101 | } vio_cmo; |
102 | |
103 | /** |
104 | * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows |
105 | */ |
106 | static int vio_cmo_num_OF_devs(void) |
107 | { |
108 | struct device_node *node_vroot; |
109 | int count = 0; |
110 | |
111 | /* |
112 | * Count the number of vdevice entries with an |
113 | * ibm,my-dma-window OF property |
114 | */ |
115 | node_vroot = of_find_node_by_name(NULL, "vdevice" ); |
116 | if (node_vroot) { |
117 | struct device_node *of_node; |
118 | struct property *prop; |
119 | |
120 | for_each_child_of_node(node_vroot, of_node) { |
121 | prop = of_find_property(of_node, "ibm,my-dma-window" , |
122 | NULL); |
123 | if (prop) |
124 | count++; |
125 | } |
126 | } |
127 | of_node_put(node_vroot); |
128 | return count; |
129 | } |
130 | |
131 | /** |
132 | * vio_cmo_alloc - allocate IO memory for CMO-enable devices |
133 | * |
134 | * @viodev: VIO device requesting IO memory |
135 | * @size: size of allocation requested |
136 | * |
137 | * Allocations come from memory reserved for the devices and any excess |
138 | * IO memory available to all devices. The spare pool used to service |
139 | * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be |
140 | * made available. |
141 | * |
142 | * Return codes: |
143 | * 0 for successful allocation and -ENOMEM for a failure |
144 | */ |
145 | static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) |
146 | { |
147 | unsigned long flags; |
148 | size_t reserve_free = 0; |
149 | size_t excess_free = 0; |
150 | int ret = -ENOMEM; |
151 | |
152 | spin_lock_irqsave(&vio_cmo.lock, flags); |
153 | |
154 | /* Determine the amount of free entitlement available in reserve */ |
155 | if (viodev->cmo.entitled > viodev->cmo.allocated) |
156 | reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; |
157 | |
158 | /* If spare is not fulfilled, the excess pool can not be used. */ |
159 | if (vio_cmo.spare >= VIO_CMO_MIN_ENT) |
160 | excess_free = vio_cmo.excess.free; |
161 | |
162 | /* The request can be satisfied */ |
163 | if ((reserve_free + excess_free) >= size) { |
164 | vio_cmo.curr += size; |
165 | if (vio_cmo.curr > vio_cmo.high) |
166 | vio_cmo.high = vio_cmo.curr; |
167 | viodev->cmo.allocated += size; |
168 | size -= min(reserve_free, size); |
169 | vio_cmo.excess.free -= size; |
170 | ret = 0; |
171 | } |
172 | |
173 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
174 | return ret; |
175 | } |
176 | |
177 | /** |
178 | * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices |
179 | * @viodev: VIO device freeing IO memory |
180 | * @size: size of deallocation |
181 | * |
182 | * IO memory is freed by the device back to the correct memory pools. |
183 | * The spare pool is replenished first from either memory pool, then |
184 | * the reserve pool is used to reduce device entitlement, the excess |
185 | * pool is used to increase the reserve pool toward the desired entitlement |
186 | * target, and then the remaining memory is returned to the pools. |
187 | * |
188 | */ |
189 | static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) |
190 | { |
191 | unsigned long flags; |
192 | size_t spare_needed = 0; |
193 | size_t excess_freed = 0; |
194 | size_t reserve_freed = size; |
195 | size_t tmp; |
196 | int balance = 0; |
197 | |
198 | spin_lock_irqsave(&vio_cmo.lock, flags); |
199 | vio_cmo.curr -= size; |
200 | |
201 | /* Amount of memory freed from the excess pool */ |
202 | if (viodev->cmo.allocated > viodev->cmo.entitled) { |
203 | excess_freed = min(reserve_freed, (viodev->cmo.allocated - |
204 | viodev->cmo.entitled)); |
205 | reserve_freed -= excess_freed; |
206 | } |
207 | |
208 | /* Remove allocation from device */ |
209 | viodev->cmo.allocated -= (reserve_freed + excess_freed); |
210 | |
211 | /* Spare is a subset of the reserve pool, replenish it first. */ |
212 | spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; |
213 | |
214 | /* |
215 | * Replenish the spare in the reserve pool from the excess pool. |
216 | * This moves entitlement into the reserve pool. |
217 | */ |
218 | if (spare_needed && excess_freed) { |
219 | tmp = min(excess_freed, spare_needed); |
220 | vio_cmo.excess.size -= tmp; |
221 | vio_cmo.reserve.size += tmp; |
222 | vio_cmo.spare += tmp; |
223 | excess_freed -= tmp; |
224 | spare_needed -= tmp; |
225 | balance = 1; |
226 | } |
227 | |
228 | /* |
229 | * Replenish the spare in the reserve pool from the reserve pool. |
230 | * This removes entitlement from the device down to VIO_CMO_MIN_ENT, |
231 | * if needed, and gives it to the spare pool. The amount of used |
232 | * memory in this pool does not change. |
233 | */ |
234 | if (spare_needed && reserve_freed) { |
235 | tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); |
236 | |
237 | vio_cmo.spare += tmp; |
238 | viodev->cmo.entitled -= tmp; |
239 | reserve_freed -= tmp; |
240 | spare_needed -= tmp; |
241 | balance = 1; |
242 | } |
243 | |
244 | /* |
245 | * Increase the reserve pool until the desired allocation is met. |
246 | * Move an allocation freed from the excess pool into the reserve |
247 | * pool and schedule a balance operation. |
248 | */ |
249 | if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { |
250 | tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); |
251 | |
252 | vio_cmo.excess.size -= tmp; |
253 | vio_cmo.reserve.size += tmp; |
254 | excess_freed -= tmp; |
255 | balance = 1; |
256 | } |
257 | |
258 | /* Return memory from the excess pool to that pool */ |
259 | if (excess_freed) |
260 | vio_cmo.excess.free += excess_freed; |
261 | |
262 | if (balance) |
263 | schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); |
264 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
265 | } |
266 | |
267 | /** |
268 | * vio_cmo_entitlement_update - Manage system entitlement changes |
269 | * |
270 | * @new_entitlement: new system entitlement to attempt to accommodate |
271 | * |
272 | * Increases in entitlement will be used to fulfill the spare entitlement |
273 | * and the rest is given to the excess pool. Decreases, if they are |
274 | * possible, come from the excess pool and from unused device entitlement |
275 | * |
276 | * Returns: 0 on success, -ENOMEM when change can not be made |
277 | */ |
278 | int vio_cmo_entitlement_update(size_t new_entitlement) |
279 | { |
280 | struct vio_dev *viodev; |
281 | struct vio_cmo_dev_entry *dev_ent; |
282 | unsigned long flags; |
283 | size_t avail, delta, tmp; |
284 | |
285 | spin_lock_irqsave(&vio_cmo.lock, flags); |
286 | |
287 | /* Entitlement increases */ |
288 | if (new_entitlement > vio_cmo.entitled) { |
289 | delta = new_entitlement - vio_cmo.entitled; |
290 | |
291 | /* Fulfill spare allocation */ |
292 | if (vio_cmo.spare < VIO_CMO_MIN_ENT) { |
293 | tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); |
294 | vio_cmo.spare += tmp; |
295 | vio_cmo.reserve.size += tmp; |
296 | delta -= tmp; |
297 | } |
298 | |
299 | /* Remaining new allocation goes to the excess pool */ |
300 | vio_cmo.entitled += delta; |
301 | vio_cmo.excess.size += delta; |
302 | vio_cmo.excess.free += delta; |
303 | |
304 | goto out; |
305 | } |
306 | |
307 | /* Entitlement decreases */ |
308 | delta = vio_cmo.entitled - new_entitlement; |
309 | avail = vio_cmo.excess.free; |
310 | |
311 | /* |
312 | * Need to check how much unused entitlement each device can |
313 | * sacrifice to fulfill entitlement change. |
314 | */ |
315 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { |
316 | if (avail >= delta) |
317 | break; |
318 | |
319 | viodev = dev_ent->viodev; |
320 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && |
321 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) |
322 | avail += viodev->cmo.entitled - |
323 | max_t(size_t, viodev->cmo.allocated, |
324 | VIO_CMO_MIN_ENT); |
325 | } |
326 | |
327 | if (delta <= avail) { |
328 | vio_cmo.entitled -= delta; |
329 | |
330 | /* Take entitlement from the excess pool first */ |
331 | tmp = min(vio_cmo.excess.free, delta); |
332 | vio_cmo.excess.size -= tmp; |
333 | vio_cmo.excess.free -= tmp; |
334 | delta -= tmp; |
335 | |
336 | /* |
337 | * Remove all but VIO_CMO_MIN_ENT bytes from devices |
338 | * until entitlement change is served |
339 | */ |
340 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { |
341 | if (!delta) |
342 | break; |
343 | |
344 | viodev = dev_ent->viodev; |
345 | tmp = 0; |
346 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && |
347 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) |
348 | tmp = viodev->cmo.entitled - |
349 | max_t(size_t, viodev->cmo.allocated, |
350 | VIO_CMO_MIN_ENT); |
351 | viodev->cmo.entitled -= min(tmp, delta); |
352 | delta -= min(tmp, delta); |
353 | } |
354 | } else { |
355 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
356 | return -ENOMEM; |
357 | } |
358 | |
359 | out: |
360 | schedule_delayed_work(&vio_cmo.balance_q, 0); |
361 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
362 | return 0; |
363 | } |
364 | |
365 | /** |
366 | * vio_cmo_balance - Balance entitlement among devices |
367 | * |
368 | * @work: work queue structure for this operation |
369 | * |
370 | * Any system entitlement above the minimum needed for devices, or |
371 | * already allocated to devices, can be distributed to the devices. |
372 | * The list of devices is iterated through to recalculate the desired |
373 | * entitlement level and to determine how much entitlement above the |
374 | * minimum entitlement is allocated to devices. |
375 | * |
376 | * Small chunks of the available entitlement are given to devices until |
377 | * their requirements are fulfilled or there is no entitlement left to give. |
378 | * Upon completion sizes of the reserve and excess pools are calculated. |
379 | * |
380 | * The system minimum entitlement level is also recalculated here. |
381 | * Entitlement will be reserved for devices even after vio_bus_remove to |
382 | * accommodate reloading the driver. The OF tree is walked to count the |
383 | * number of devices present and this will remove entitlement for devices |
384 | * that have actually left the system after having vio_bus_remove called. |
385 | */ |
386 | static void vio_cmo_balance(struct work_struct *work) |
387 | { |
388 | struct vio_cmo *cmo; |
389 | struct vio_dev *viodev; |
390 | struct vio_cmo_dev_entry *dev_ent; |
391 | unsigned long flags; |
392 | size_t avail = 0, level, chunk, need; |
393 | int devcount = 0, fulfilled; |
394 | |
395 | cmo = container_of(work, struct vio_cmo, balance_q.work); |
396 | |
397 | spin_lock_irqsave(&vio_cmo.lock, flags); |
398 | |
399 | /* Calculate minimum entitlement and fulfill spare */ |
400 | cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; |
401 | BUG_ON(cmo->min > cmo->entitled); |
402 | cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); |
403 | cmo->min += cmo->spare; |
404 | cmo->desired = cmo->min; |
405 | |
406 | /* |
407 | * Determine how much entitlement is available and reset device |
408 | * entitlements |
409 | */ |
410 | avail = cmo->entitled - cmo->spare; |
411 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { |
412 | viodev = dev_ent->viodev; |
413 | devcount++; |
414 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; |
415 | cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); |
416 | avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); |
417 | } |
418 | |
419 | /* |
420 | * Having provided each device with the minimum entitlement, loop |
421 | * over the devices portioning out the remaining entitlement |
422 | * until there is nothing left. |
423 | */ |
424 | level = VIO_CMO_MIN_ENT; |
425 | while (avail) { |
426 | fulfilled = 0; |
427 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { |
428 | viodev = dev_ent->viodev; |
429 | |
430 | if (viodev->cmo.desired <= level) { |
431 | fulfilled++; |
432 | continue; |
433 | } |
434 | |
435 | /* |
436 | * Give the device up to VIO_CMO_BALANCE_CHUNK |
437 | * bytes of entitlement, but do not exceed the |
438 | * desired level of entitlement for the device. |
439 | */ |
440 | chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); |
441 | chunk = min(chunk, (viodev->cmo.desired - |
442 | viodev->cmo.entitled)); |
443 | viodev->cmo.entitled += chunk; |
444 | |
445 | /* |
446 | * If the memory for this entitlement increase was |
447 | * already allocated to the device it does not come |
448 | * from the available pool being portioned out. |
449 | */ |
450 | need = max(viodev->cmo.allocated, viodev->cmo.entitled)- |
451 | max(viodev->cmo.allocated, level); |
452 | avail -= need; |
453 | |
454 | } |
455 | if (fulfilled == devcount) |
456 | break; |
457 | level += VIO_CMO_BALANCE_CHUNK; |
458 | } |
459 | |
460 | /* Calculate new reserve and excess pool sizes */ |
461 | cmo->reserve.size = cmo->min; |
462 | cmo->excess.free = 0; |
463 | cmo->excess.size = 0; |
464 | need = 0; |
465 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { |
466 | viodev = dev_ent->viodev; |
467 | /* Calculated reserve size above the minimum entitlement */ |
468 | if (viodev->cmo.entitled) |
469 | cmo->reserve.size += (viodev->cmo.entitled - |
470 | VIO_CMO_MIN_ENT); |
471 | /* Calculated used excess entitlement */ |
472 | if (viodev->cmo.allocated > viodev->cmo.entitled) |
473 | need += viodev->cmo.allocated - viodev->cmo.entitled; |
474 | } |
475 | cmo->excess.size = cmo->entitled - cmo->reserve.size; |
476 | cmo->excess.free = cmo->excess.size - need; |
477 | |
478 | cancel_delayed_work(to_delayed_work(work)); |
479 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
480 | } |
481 | |
482 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, |
483 | dma_addr_t *dma_handle, gfp_t flag, |
484 | unsigned long attrs) |
485 | { |
486 | struct vio_dev *viodev = to_vio_dev(dev); |
487 | void *ret; |
488 | |
489 | if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { |
490 | atomic_inc(&viodev->cmo.allocs_failed); |
491 | return NULL; |
492 | } |
493 | |
494 | ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, |
495 | dma_handle, dev->coherent_dma_mask, flag, |
496 | dev_to_node(dev)); |
497 | if (unlikely(ret == NULL)) { |
498 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
499 | atomic_inc(&viodev->cmo.allocs_failed); |
500 | } |
501 | |
502 | return ret; |
503 | } |
504 | |
505 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, |
506 | void *vaddr, dma_addr_t dma_handle, |
507 | unsigned long attrs) |
508 | { |
509 | struct vio_dev *viodev = to_vio_dev(dev); |
510 | |
511 | iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); |
512 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); |
513 | } |
514 | |
515 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, |
516 | unsigned long offset, size_t size, |
517 | enum dma_data_direction direction, |
518 | unsigned long attrs) |
519 | { |
520 | struct vio_dev *viodev = to_vio_dev(dev); |
521 | struct iommu_table *tbl = get_iommu_table_base(dev); |
522 | dma_addr_t ret = DMA_MAPPING_ERROR; |
523 | |
524 | if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) |
525 | goto out_fail; |
526 | ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev), |
527 | direction, attrs); |
528 | if (unlikely(ret == DMA_MAPPING_ERROR)) |
529 | goto out_deallocate; |
530 | return ret; |
531 | |
532 | out_deallocate: |
533 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); |
534 | out_fail: |
535 | atomic_inc(&viodev->cmo.allocs_failed); |
536 | return DMA_MAPPING_ERROR; |
537 | } |
538 | |
539 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
540 | size_t size, |
541 | enum dma_data_direction direction, |
542 | unsigned long attrs) |
543 | { |
544 | struct vio_dev *viodev = to_vio_dev(dev); |
545 | struct iommu_table *tbl = get_iommu_table_base(dev); |
546 | |
547 | iommu_unmap_page(tbl, dma_handle, size, direction, attrs); |
548 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); |
549 | } |
550 | |
551 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
552 | int nelems, enum dma_data_direction direction, |
553 | unsigned long attrs) |
554 | { |
555 | struct vio_dev *viodev = to_vio_dev(dev); |
556 | struct iommu_table *tbl = get_iommu_table_base(dev); |
557 | struct scatterlist *sgl; |
558 | int ret, count; |
559 | size_t alloc_size = 0; |
560 | |
561 | for_each_sg(sglist, sgl, nelems, count) |
562 | alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); |
563 | |
564 | ret = vio_cmo_alloc(viodev, alloc_size); |
565 | if (ret) |
566 | goto out_fail; |
567 | ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), |
568 | direction, attrs); |
569 | if (unlikely(!ret)) |
570 | goto out_deallocate; |
571 | |
572 | for_each_sg(sglist, sgl, ret, count) |
573 | alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); |
574 | if (alloc_size) |
575 | vio_cmo_dealloc(viodev, alloc_size); |
576 | return ret; |
577 | |
578 | out_deallocate: |
579 | vio_cmo_dealloc(viodev, alloc_size); |
580 | out_fail: |
581 | atomic_inc(&viodev->cmo.allocs_failed); |
582 | return ret; |
583 | } |
584 | |
585 | static void vio_dma_iommu_unmap_sg(struct device *dev, |
586 | struct scatterlist *sglist, int nelems, |
587 | enum dma_data_direction direction, |
588 | unsigned long attrs) |
589 | { |
590 | struct vio_dev *viodev = to_vio_dev(dev); |
591 | struct iommu_table *tbl = get_iommu_table_base(dev); |
592 | struct scatterlist *sgl; |
593 | size_t alloc_size = 0; |
594 | int count; |
595 | |
596 | for_each_sg(sglist, sgl, nelems, count) |
597 | alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); |
598 | |
599 | ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs); |
600 | vio_cmo_dealloc(viodev, alloc_size); |
601 | } |
602 | |
603 | static const struct dma_map_ops vio_dma_mapping_ops = { |
604 | .alloc = vio_dma_iommu_alloc_coherent, |
605 | .free = vio_dma_iommu_free_coherent, |
606 | .map_sg = vio_dma_iommu_map_sg, |
607 | .unmap_sg = vio_dma_iommu_unmap_sg, |
608 | .map_page = vio_dma_iommu_map_page, |
609 | .unmap_page = vio_dma_iommu_unmap_page, |
610 | .dma_supported = dma_iommu_dma_supported, |
611 | .get_required_mask = dma_iommu_get_required_mask, |
612 | .mmap = dma_common_mmap, |
613 | .get_sgtable = dma_common_get_sgtable, |
614 | .alloc_pages = dma_common_alloc_pages, |
615 | .free_pages = dma_common_free_pages, |
616 | }; |
617 | |
618 | /** |
619 | * vio_cmo_set_dev_desired - Set desired entitlement for a device |
620 | * |
621 | * @viodev: struct vio_dev for device to alter |
622 | * @desired: new desired entitlement level in bytes |
623 | * |
624 | * For use by devices to request a change to their entitlement at runtime or |
625 | * through sysfs. The desired entitlement level is changed and a balancing |
626 | * of system resources is scheduled to run in the future. |
627 | */ |
628 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) |
629 | { |
630 | unsigned long flags; |
631 | struct vio_cmo_dev_entry *dev_ent; |
632 | int found = 0; |
633 | |
634 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
635 | return; |
636 | |
637 | spin_lock_irqsave(&vio_cmo.lock, flags); |
638 | if (desired < VIO_CMO_MIN_ENT) |
639 | desired = VIO_CMO_MIN_ENT; |
640 | |
641 | /* |
642 | * Changes will not be made for devices not in the device list. |
643 | * If it is not in the device list, then no driver is loaded |
644 | * for the device and it can not receive entitlement. |
645 | */ |
646 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) |
647 | if (viodev == dev_ent->viodev) { |
648 | found = 1; |
649 | break; |
650 | } |
651 | if (!found) { |
652 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
653 | return; |
654 | } |
655 | |
656 | /* Increase/decrease in desired device entitlement */ |
657 | if (desired >= viodev->cmo.desired) { |
658 | /* Just bump the bus and device values prior to a balance*/ |
659 | vio_cmo.desired += desired - viodev->cmo.desired; |
660 | viodev->cmo.desired = desired; |
661 | } else { |
662 | /* Decrease bus and device values for desired entitlement */ |
663 | vio_cmo.desired -= viodev->cmo.desired - desired; |
664 | viodev->cmo.desired = desired; |
665 | /* |
666 | * If less entitlement is desired than current entitlement, move |
667 | * any reserve memory in the change region to the excess pool. |
668 | */ |
669 | if (viodev->cmo.entitled > desired) { |
670 | vio_cmo.reserve.size -= viodev->cmo.entitled - desired; |
671 | vio_cmo.excess.size += viodev->cmo.entitled - desired; |
672 | /* |
673 | * If entitlement moving from the reserve pool to the |
674 | * excess pool is currently unused, add to the excess |
675 | * free counter. |
676 | */ |
677 | if (viodev->cmo.allocated < viodev->cmo.entitled) |
678 | vio_cmo.excess.free += viodev->cmo.entitled - |
679 | max(viodev->cmo.allocated, desired); |
680 | viodev->cmo.entitled = desired; |
681 | } |
682 | } |
683 | schedule_delayed_work(&vio_cmo.balance_q, 0); |
684 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
685 | } |
686 | |
687 | /** |
688 | * vio_cmo_bus_probe - Handle CMO specific bus probe activities |
689 | * |
690 | * @viodev - Pointer to struct vio_dev for device |
691 | * |
692 | * Determine the devices IO memory entitlement needs, attempting |
693 | * to satisfy the system minimum entitlement at first and scheduling |
694 | * a balance operation to take care of the rest at a later time. |
695 | * |
696 | * Returns: 0 on success, -EINVAL when device doesn't support CMO, and |
697 | * -ENOMEM when entitlement is not available for device or |
698 | * device entry. |
699 | * |
700 | */ |
701 | static int vio_cmo_bus_probe(struct vio_dev *viodev) |
702 | { |
703 | struct vio_cmo_dev_entry *dev_ent; |
704 | struct device *dev = &viodev->dev; |
705 | struct iommu_table *tbl; |
706 | struct vio_driver *viodrv = to_vio_driver(dev->driver); |
707 | unsigned long flags; |
708 | size_t size; |
709 | bool dma_capable = false; |
710 | |
711 | tbl = get_iommu_table_base(dev); |
712 | |
713 | /* A device requires entitlement if it has a DMA window property */ |
714 | switch (viodev->family) { |
715 | case VDEVICE: |
716 | if (of_get_property(viodev->dev.of_node, |
717 | "ibm,my-dma-window" , NULL)) |
718 | dma_capable = true; |
719 | break; |
720 | case PFO: |
721 | dma_capable = false; |
722 | break; |
723 | default: |
724 | dev_warn(dev, "unknown device family: %d\n" , viodev->family); |
725 | BUG(); |
726 | break; |
727 | } |
728 | |
729 | /* Configure entitlement for the device. */ |
730 | if (dma_capable) { |
731 | /* Check that the driver is CMO enabled and get desired DMA */ |
732 | if (!viodrv->get_desired_dma) { |
733 | dev_err(dev, "%s: device driver does not support CMO\n" , |
734 | __func__); |
735 | return -EINVAL; |
736 | } |
737 | |
738 | viodev->cmo.desired = |
739 | IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl); |
740 | if (viodev->cmo.desired < VIO_CMO_MIN_ENT) |
741 | viodev->cmo.desired = VIO_CMO_MIN_ENT; |
742 | size = VIO_CMO_MIN_ENT; |
743 | |
744 | dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), |
745 | GFP_KERNEL); |
746 | if (!dev_ent) |
747 | return -ENOMEM; |
748 | |
749 | dev_ent->viodev = viodev; |
750 | spin_lock_irqsave(&vio_cmo.lock, flags); |
751 | list_add(&dev_ent->list, &vio_cmo.device_list); |
752 | } else { |
753 | viodev->cmo.desired = 0; |
754 | size = 0; |
755 | spin_lock_irqsave(&vio_cmo.lock, flags); |
756 | } |
757 | |
758 | /* |
759 | * If the needs for vio_cmo.min have not changed since they |
760 | * were last set, the number of devices in the OF tree has |
761 | * been constant and the IO memory for this is already in |
762 | * the reserve pool. |
763 | */ |
764 | if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * |
765 | VIO_CMO_MIN_ENT)) { |
766 | /* Updated desired entitlement if device requires it */ |
767 | if (size) |
768 | vio_cmo.desired += (viodev->cmo.desired - |
769 | VIO_CMO_MIN_ENT); |
770 | } else { |
771 | size_t tmp; |
772 | |
773 | tmp = vio_cmo.spare + vio_cmo.excess.free; |
774 | if (tmp < size) { |
775 | dev_err(dev, "%s: insufficient free " |
776 | "entitlement to add device. " |
777 | "Need %lu, have %lu\n" , __func__, |
778 | size, (vio_cmo.spare + tmp)); |
779 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
780 | return -ENOMEM; |
781 | } |
782 | |
783 | /* Use excess pool first to fulfill request */ |
784 | tmp = min(size, vio_cmo.excess.free); |
785 | vio_cmo.excess.free -= tmp; |
786 | vio_cmo.excess.size -= tmp; |
787 | vio_cmo.reserve.size += tmp; |
788 | |
789 | /* Use spare if excess pool was insufficient */ |
790 | vio_cmo.spare -= size - tmp; |
791 | |
792 | /* Update bus accounting */ |
793 | vio_cmo.min += size; |
794 | vio_cmo.desired += viodev->cmo.desired; |
795 | } |
796 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
797 | return 0; |
798 | } |
799 | |
800 | /** |
801 | * vio_cmo_bus_remove - Handle CMO specific bus removal activities |
802 | * |
803 | * @viodev - Pointer to struct vio_dev for device |
804 | * |
805 | * Remove the device from the cmo device list. The minimum entitlement |
806 | * will be reserved for the device as long as it is in the system. The |
807 | * rest of the entitlement the device had been allocated will be returned |
808 | * to the system. |
809 | */ |
810 | static void vio_cmo_bus_remove(struct vio_dev *viodev) |
811 | { |
812 | struct vio_cmo_dev_entry *dev_ent; |
813 | unsigned long flags; |
814 | size_t tmp; |
815 | |
816 | spin_lock_irqsave(&vio_cmo.lock, flags); |
817 | if (viodev->cmo.allocated) { |
818 | dev_err(&viodev->dev, "%s: device had %lu bytes of IO " |
819 | "allocated after remove operation.\n" , |
820 | __func__, viodev->cmo.allocated); |
821 | BUG(); |
822 | } |
823 | |
824 | /* |
825 | * Remove the device from the device list being maintained for |
826 | * CMO enabled devices. |
827 | */ |
828 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) |
829 | if (viodev == dev_ent->viodev) { |
830 | list_del(&dev_ent->list); |
831 | kfree(dev_ent); |
832 | break; |
833 | } |
834 | |
835 | /* |
836 | * Devices may not require any entitlement and they do not need |
837 | * to be processed. Otherwise, return the device's entitlement |
838 | * back to the pools. |
839 | */ |
840 | if (viodev->cmo.entitled) { |
841 | /* |
842 | * This device has not yet left the OF tree, it's |
843 | * minimum entitlement remains in vio_cmo.min and |
844 | * vio_cmo.desired |
845 | */ |
846 | vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); |
847 | |
848 | /* |
849 | * Save min allocation for device in reserve as long |
850 | * as it exists in OF tree as determined by later |
851 | * balance operation |
852 | */ |
853 | viodev->cmo.entitled -= VIO_CMO_MIN_ENT; |
854 | |
855 | /* Replenish spare from freed reserve pool */ |
856 | if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { |
857 | tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - |
858 | vio_cmo.spare)); |
859 | vio_cmo.spare += tmp; |
860 | viodev->cmo.entitled -= tmp; |
861 | } |
862 | |
863 | /* Remaining reserve goes to excess pool */ |
864 | vio_cmo.excess.size += viodev->cmo.entitled; |
865 | vio_cmo.excess.free += viodev->cmo.entitled; |
866 | vio_cmo.reserve.size -= viodev->cmo.entitled; |
867 | |
868 | /* |
869 | * Until the device is removed it will keep a |
870 | * minimum entitlement; this will guarantee that |
871 | * a module unload/load will result in a success. |
872 | */ |
873 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; |
874 | viodev->cmo.desired = VIO_CMO_MIN_ENT; |
875 | atomic_set(&viodev->cmo.allocs_failed, 0); |
876 | } |
877 | |
878 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
879 | } |
880 | |
881 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) |
882 | { |
883 | set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); |
884 | } |
885 | |
886 | /** |
887 | * vio_cmo_bus_init - CMO entitlement initialization at bus init time |
888 | * |
889 | * Set up the reserve and excess entitlement pools based on available |
890 | * system entitlement and the number of devices in the OF tree that |
891 | * require entitlement in the reserve pool. |
892 | */ |
893 | static void vio_cmo_bus_init(void) |
894 | { |
895 | struct hvcall_mpp_data mpp_data; |
896 | int err; |
897 | |
898 | memset(&vio_cmo, 0, sizeof(struct vio_cmo)); |
899 | spin_lock_init(&vio_cmo.lock); |
900 | INIT_LIST_HEAD(&vio_cmo.device_list); |
901 | INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); |
902 | |
903 | /* Get current system entitlement */ |
904 | err = h_get_mpp(&mpp_data); |
905 | |
906 | /* |
907 | * On failure, continue with entitlement set to 0, will panic() |
908 | * later when spare is reserved. |
909 | */ |
910 | if (err != H_SUCCESS) { |
911 | printk(KERN_ERR "%s: unable to determine system IO " \ |
912 | "entitlement. (%d)\n" , __func__, err); |
913 | vio_cmo.entitled = 0; |
914 | } else { |
915 | vio_cmo.entitled = mpp_data.entitled_mem; |
916 | } |
917 | |
918 | /* Set reservation and check against entitlement */ |
919 | vio_cmo.spare = VIO_CMO_MIN_ENT; |
920 | vio_cmo.reserve.size = vio_cmo.spare; |
921 | vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * |
922 | VIO_CMO_MIN_ENT); |
923 | if (vio_cmo.reserve.size > vio_cmo.entitled) { |
924 | printk(KERN_ERR "%s: insufficient system entitlement\n" , |
925 | __func__); |
926 | panic("%s: Insufficient system entitlement" , __func__); |
927 | } |
928 | |
929 | /* Set the remaining accounting variables */ |
930 | vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; |
931 | vio_cmo.excess.free = vio_cmo.excess.size; |
932 | vio_cmo.min = vio_cmo.reserve.size; |
933 | vio_cmo.desired = vio_cmo.reserve.size; |
934 | } |
935 | |
936 | /* sysfs device functions and data structures for CMO */ |
937 | |
938 | #define viodev_cmo_rd_attr(name) \ |
939 | static ssize_t cmo_##name##_show(struct device *dev, \ |
940 | struct device_attribute *attr, \ |
941 | char *buf) \ |
942 | { \ |
943 | return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \ |
944 | } |
945 | |
946 | static ssize_t cmo_allocs_failed_show(struct device *dev, |
947 | struct device_attribute *attr, char *buf) |
948 | { |
949 | struct vio_dev *viodev = to_vio_dev(dev); |
950 | return sprintf(buf, "%d\n" , atomic_read(&viodev->cmo.allocs_failed)); |
951 | } |
952 | |
953 | static ssize_t cmo_allocs_failed_store(struct device *dev, |
954 | struct device_attribute *attr, const char *buf, size_t count) |
955 | { |
956 | struct vio_dev *viodev = to_vio_dev(dev); |
957 | atomic_set(&viodev->cmo.allocs_failed, 0); |
958 | return count; |
959 | } |
960 | |
961 | static ssize_t cmo_desired_store(struct device *dev, |
962 | struct device_attribute *attr, const char *buf, size_t count) |
963 | { |
964 | struct vio_dev *viodev = to_vio_dev(dev); |
965 | size_t new_desired; |
966 | int ret; |
967 | |
968 | ret = kstrtoul(buf, 10, &new_desired); |
969 | if (ret) |
970 | return ret; |
971 | |
972 | vio_cmo_set_dev_desired(viodev, new_desired); |
973 | return count; |
974 | } |
975 | |
976 | viodev_cmo_rd_attr(desired); |
977 | viodev_cmo_rd_attr(entitled); |
978 | viodev_cmo_rd_attr(allocated); |
979 | |
980 | static ssize_t name_show(struct device *, struct device_attribute *, char *); |
981 | static ssize_t devspec_show(struct device *, struct device_attribute *, char *); |
982 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
983 | char *buf); |
984 | |
985 | static struct device_attribute dev_attr_name; |
986 | static struct device_attribute dev_attr_devspec; |
987 | static struct device_attribute dev_attr_modalias; |
988 | |
989 | static DEVICE_ATTR_RO(cmo_entitled); |
990 | static DEVICE_ATTR_RO(cmo_allocated); |
991 | static DEVICE_ATTR_RW(cmo_desired); |
992 | static DEVICE_ATTR_RW(cmo_allocs_failed); |
993 | |
994 | /* sysfs bus functions and data structures for CMO */ |
995 | |
996 | #define viobus_cmo_rd_attr(name) \ |
997 | static ssize_t cmo_bus_##name##_show(const struct bus_type *bt, char *buf) \ |
998 | { \ |
999 | return sprintf(buf, "%lu\n", vio_cmo.name); \ |
1000 | } \ |
1001 | static struct bus_attribute bus_attr_cmo_bus_##name = \ |
1002 | __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL) |
1003 | |
1004 | #define viobus_cmo_pool_rd_attr(name, var) \ |
1005 | static ssize_t \ |
1006 | cmo_##name##_##var##_show(const struct bus_type *bt, char *buf) \ |
1007 | { \ |
1008 | return sprintf(buf, "%lu\n", vio_cmo.name.var); \ |
1009 | } \ |
1010 | static BUS_ATTR_RO(cmo_##name##_##var) |
1011 | |
1012 | viobus_cmo_rd_attr(entitled); |
1013 | viobus_cmo_rd_attr(spare); |
1014 | viobus_cmo_rd_attr(min); |
1015 | viobus_cmo_rd_attr(desired); |
1016 | viobus_cmo_rd_attr(curr); |
1017 | viobus_cmo_pool_rd_attr(reserve, size); |
1018 | viobus_cmo_pool_rd_attr(excess, size); |
1019 | viobus_cmo_pool_rd_attr(excess, free); |
1020 | |
1021 | static ssize_t cmo_high_show(const struct bus_type *bt, char *buf) |
1022 | { |
1023 | return sprintf(buf, "%lu\n" , vio_cmo.high); |
1024 | } |
1025 | |
1026 | static ssize_t cmo_high_store(const struct bus_type *bt, const char *buf, |
1027 | size_t count) |
1028 | { |
1029 | unsigned long flags; |
1030 | |
1031 | spin_lock_irqsave(&vio_cmo.lock, flags); |
1032 | vio_cmo.high = vio_cmo.curr; |
1033 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
1034 | |
1035 | return count; |
1036 | } |
1037 | static BUS_ATTR_RW(cmo_high); |
1038 | |
1039 | static struct attribute *vio_bus_attrs[] = { |
1040 | &bus_attr_cmo_bus_entitled.attr, |
1041 | &bus_attr_cmo_bus_spare.attr, |
1042 | &bus_attr_cmo_bus_min.attr, |
1043 | &bus_attr_cmo_bus_desired.attr, |
1044 | &bus_attr_cmo_bus_curr.attr, |
1045 | &bus_attr_cmo_high.attr, |
1046 | &bus_attr_cmo_reserve_size.attr, |
1047 | &bus_attr_cmo_excess_size.attr, |
1048 | &bus_attr_cmo_excess_free.attr, |
1049 | NULL, |
1050 | }; |
1051 | ATTRIBUTE_GROUPS(vio_bus); |
1052 | |
1053 | static void __init vio_cmo_sysfs_init(void) { } |
1054 | #else /* CONFIG_PPC_SMLPAR */ |
1055 | int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } |
1056 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} |
1057 | static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } |
1058 | static void vio_cmo_bus_remove(struct vio_dev *viodev) {} |
1059 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} |
1060 | static void vio_cmo_bus_init(void) {} |
1061 | static void __init vio_cmo_sysfs_init(void) { } |
1062 | #endif /* CONFIG_PPC_SMLPAR */ |
1063 | EXPORT_SYMBOL(vio_cmo_entitlement_update); |
1064 | EXPORT_SYMBOL(vio_cmo_set_dev_desired); |
1065 | |
1066 | |
1067 | /* |
1068 | * Platform Facilities Option (PFO) support |
1069 | */ |
1070 | |
1071 | /** |
1072 | * vio_h_cop_sync - Perform a synchronous PFO co-processor operation |
1073 | * |
1074 | * @vdev - Pointer to a struct vio_dev for device |
1075 | * @op - Pointer to a struct vio_pfo_op for the operation parameters |
1076 | * |
1077 | * Calls the hypervisor to synchronously perform the PFO operation |
1078 | * described in @op. In the case of a busy response from the hypervisor, |
1079 | * the operation will be re-submitted indefinitely unless a non-zero timeout |
1080 | * is specified or an error occurs. The timeout places a limit on when to |
1081 | * stop re-submitting a operation, the total time can be exceeded if an |
1082 | * operation is in progress. |
1083 | * |
1084 | * If op->hcall_ret is not NULL, this will be set to the return from the |
1085 | * last h_cop_op call or it will be 0 if an error not involving the h_call |
1086 | * was encountered. |
1087 | * |
1088 | * Returns: |
1089 | * 0 on success, |
1090 | * -EINVAL if the h_call fails due to an invalid parameter, |
1091 | * -E2BIG if the h_call can not be performed synchronously, |
1092 | * -EBUSY if a timeout is specified and has elapsed, |
1093 | * -EACCES if the memory area for data/status has been rescinded, or |
1094 | * -EPERM if a hardware fault has been indicated |
1095 | */ |
1096 | int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op) |
1097 | { |
1098 | struct device *dev = &vdev->dev; |
1099 | unsigned long deadline = 0; |
1100 | long hret = 0; |
1101 | int ret = 0; |
1102 | |
1103 | if (op->timeout) |
1104 | deadline = jiffies + msecs_to_jiffies(m: op->timeout); |
1105 | |
1106 | while (true) { |
1107 | hret = plpar_hcall_norets(H_COP, op->flags, |
1108 | vdev->resource_id, |
1109 | op->in, op->inlen, op->out, |
1110 | op->outlen, op->csbcpb); |
1111 | |
1112 | if (hret == H_SUCCESS || |
1113 | (hret != H_NOT_ENOUGH_RESOURCES && |
1114 | hret != H_BUSY && hret != H_RESOURCE) || |
1115 | (op->timeout && time_after(deadline, jiffies))) |
1116 | break; |
1117 | |
1118 | dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n" , __func__, hret); |
1119 | } |
1120 | |
1121 | switch (hret) { |
1122 | case H_SUCCESS: |
1123 | ret = 0; |
1124 | break; |
1125 | case H_OP_MODE: |
1126 | case H_TOO_BIG: |
1127 | ret = -E2BIG; |
1128 | break; |
1129 | case H_RESCINDED: |
1130 | ret = -EACCES; |
1131 | break; |
1132 | case H_HARDWARE: |
1133 | ret = -EPERM; |
1134 | break; |
1135 | case H_NOT_ENOUGH_RESOURCES: |
1136 | case H_RESOURCE: |
1137 | case H_BUSY: |
1138 | ret = -EBUSY; |
1139 | break; |
1140 | default: |
1141 | ret = -EINVAL; |
1142 | break; |
1143 | } |
1144 | |
1145 | if (ret) |
1146 | dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n" , |
1147 | __func__, ret, hret); |
1148 | |
1149 | op->hcall_err = hret; |
1150 | return ret; |
1151 | } |
1152 | EXPORT_SYMBOL(vio_h_cop_sync); |
1153 | |
1154 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) |
1155 | { |
1156 | const __be32 *dma_window; |
1157 | struct iommu_table *tbl; |
1158 | unsigned long offset, size; |
1159 | |
1160 | dma_window = of_get_property(node: dev->dev.of_node, |
1161 | name: "ibm,my-dma-window" , NULL); |
1162 | if (!dma_window) |
1163 | return NULL; |
1164 | |
1165 | tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); |
1166 | if (tbl == NULL) |
1167 | return NULL; |
1168 | |
1169 | kref_init(kref: &tbl->it_kref); |
1170 | |
1171 | of_parse_dma_window(dev->dev.of_node, dma_window, |
1172 | &tbl->it_index, &offset, &size); |
1173 | |
1174 | /* TCE table size - measured in tce entries */ |
1175 | tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K; |
1176 | tbl->it_size = size >> tbl->it_page_shift; |
1177 | /* offset for VIO should always be 0 */ |
1178 | tbl->it_offset = offset >> tbl->it_page_shift; |
1179 | tbl->it_busno = 0; |
1180 | tbl->it_type = TCE_VB; |
1181 | tbl->it_blocksize = 16; |
1182 | |
1183 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
1184 | tbl->it_ops = &iommu_table_lpar_multi_ops; |
1185 | else |
1186 | tbl->it_ops = &iommu_table_pseries_ops; |
1187 | |
1188 | return iommu_init_table(tbl, -1, 0, 0); |
1189 | } |
1190 | |
1191 | /** |
1192 | * vio_match_device: - Tell if a VIO device has a matching |
1193 | * VIO device id structure. |
1194 | * @ids: array of VIO device id structures to search in |
1195 | * @dev: the VIO device structure to match against |
1196 | * |
1197 | * Used by a driver to check whether a VIO device present in the |
1198 | * system is in its list of supported devices. Returns the matching |
1199 | * vio_device_id structure or NULL if there is no match. |
1200 | */ |
1201 | static const struct vio_device_id *vio_match_device( |
1202 | const struct vio_device_id *ids, const struct vio_dev *dev) |
1203 | { |
1204 | while (ids->type[0] != '\0') { |
1205 | if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && |
1206 | of_device_is_compatible(device: dev->dev.of_node, |
1207 | ids->compat)) |
1208 | return ids; |
1209 | ids++; |
1210 | } |
1211 | return NULL; |
1212 | } |
1213 | |
1214 | /* |
1215 | * Convert from struct device to struct vio_dev and pass to driver. |
1216 | * dev->driver has already been set by generic code because vio_bus_match |
1217 | * succeeded. |
1218 | */ |
1219 | static int vio_bus_probe(struct device *dev) |
1220 | { |
1221 | struct vio_dev *viodev = to_vio_dev(dev); |
1222 | struct vio_driver *viodrv = to_vio_driver(dev->driver); |
1223 | const struct vio_device_id *id; |
1224 | int error = -ENODEV; |
1225 | |
1226 | if (!viodrv->probe) |
1227 | return error; |
1228 | |
1229 | id = vio_match_device(ids: viodrv->id_table, dev: viodev); |
1230 | if (id) { |
1231 | memset(&viodev->cmo, 0, sizeof(viodev->cmo)); |
1232 | if (firmware_has_feature(FW_FEATURE_CMO)) { |
1233 | error = vio_cmo_bus_probe(viodev); |
1234 | if (error) |
1235 | return error; |
1236 | } |
1237 | error = viodrv->probe(viodev, id); |
1238 | if (error && firmware_has_feature(FW_FEATURE_CMO)) |
1239 | vio_cmo_bus_remove(viodev); |
1240 | } |
1241 | |
1242 | return error; |
1243 | } |
1244 | |
1245 | /* convert from struct device to struct vio_dev and pass to driver. */ |
1246 | static void vio_bus_remove(struct device *dev) |
1247 | { |
1248 | struct vio_dev *viodev = to_vio_dev(dev); |
1249 | struct vio_driver *viodrv = to_vio_driver(dev->driver); |
1250 | struct device *devptr; |
1251 | |
1252 | /* |
1253 | * Hold a reference to the device after the remove function is called |
1254 | * to allow for CMO accounting cleanup for the device. |
1255 | */ |
1256 | devptr = get_device(dev); |
1257 | |
1258 | if (viodrv->remove) |
1259 | viodrv->remove(viodev); |
1260 | |
1261 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1262 | vio_cmo_bus_remove(viodev); |
1263 | |
1264 | put_device(dev: devptr); |
1265 | } |
1266 | |
1267 | static void vio_bus_shutdown(struct device *dev) |
1268 | { |
1269 | struct vio_dev *viodev = to_vio_dev(dev); |
1270 | struct vio_driver *viodrv; |
1271 | |
1272 | if (dev->driver) { |
1273 | viodrv = to_vio_driver(dev->driver); |
1274 | if (viodrv->shutdown) |
1275 | viodrv->shutdown(viodev); |
1276 | else if (kexec_in_progress) |
1277 | vio_bus_remove(dev); |
1278 | } |
1279 | } |
1280 | |
1281 | /** |
1282 | * vio_register_driver: - Register a new vio driver |
1283 | * @viodrv: The vio_driver structure to be registered. |
1284 | */ |
1285 | int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, |
1286 | const char *mod_name) |
1287 | { |
1288 | // vio_bus_type is only initialised for pseries |
1289 | if (!machine_is(pseries)) |
1290 | return -ENODEV; |
1291 | |
1292 | pr_debug("%s: driver %s registering\n" , __func__, viodrv->name); |
1293 | |
1294 | /* fill in 'struct driver' fields */ |
1295 | viodrv->driver.name = viodrv->name; |
1296 | viodrv->driver.pm = viodrv->pm; |
1297 | viodrv->driver.bus = &vio_bus_type; |
1298 | viodrv->driver.owner = owner; |
1299 | viodrv->driver.mod_name = mod_name; |
1300 | |
1301 | return driver_register(drv: &viodrv->driver); |
1302 | } |
1303 | EXPORT_SYMBOL(__vio_register_driver); |
1304 | |
1305 | /** |
1306 | * vio_unregister_driver - Remove registration of vio driver. |
1307 | * @viodrv: The vio_driver struct to be removed form registration |
1308 | */ |
1309 | void vio_unregister_driver(struct vio_driver *viodrv) |
1310 | { |
1311 | driver_unregister(drv: &viodrv->driver); |
1312 | } |
1313 | EXPORT_SYMBOL(vio_unregister_driver); |
1314 | |
1315 | /* vio_dev refcount hit 0 */ |
1316 | static void vio_dev_release(struct device *dev) |
1317 | { |
1318 | struct iommu_table *tbl = get_iommu_table_base(dev); |
1319 | |
1320 | if (tbl) |
1321 | iommu_tce_table_put(tbl); |
1322 | of_node_put(node: dev->of_node); |
1323 | kfree(objp: to_vio_dev(dev)); |
1324 | } |
1325 | |
1326 | /** |
1327 | * vio_register_device_node: - Register a new vio device. |
1328 | * @of_node: The OF node for this device. |
1329 | * |
1330 | * Creates and initializes a vio_dev structure from the data in |
1331 | * of_node and adds it to the list of virtual devices. |
1332 | * Returns a pointer to the created vio_dev or NULL if node has |
1333 | * NULL device_type or compatible fields. |
1334 | */ |
1335 | struct vio_dev *vio_register_device_node(struct device_node *of_node) |
1336 | { |
1337 | struct vio_dev *viodev; |
1338 | struct device_node *parent_node; |
1339 | const __be32 *prop; |
1340 | enum vio_dev_family family; |
1341 | |
1342 | /* |
1343 | * Determine if this node is a under the /vdevice node or under the |
1344 | * /ibm,platform-facilities node. This decides the device's family. |
1345 | */ |
1346 | parent_node = of_get_parent(node: of_node); |
1347 | if (parent_node) { |
1348 | if (of_node_is_type(np: parent_node, type: "ibm,platform-facilities" )) |
1349 | family = PFO; |
1350 | else if (of_node_is_type(np: parent_node, type: "vdevice" )) |
1351 | family = VDEVICE; |
1352 | else { |
1353 | pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n" , |
1354 | __func__, |
1355 | parent_node, |
1356 | of_node); |
1357 | of_node_put(node: parent_node); |
1358 | return NULL; |
1359 | } |
1360 | of_node_put(node: parent_node); |
1361 | } else { |
1362 | pr_warn("%s: could not determine the parent of node %pOFn.\n" , |
1363 | __func__, of_node); |
1364 | return NULL; |
1365 | } |
1366 | |
1367 | if (family == PFO) { |
1368 | if (of_property_read_bool(np: of_node, propname: "interrupt-controller" )) { |
1369 | pr_debug("%s: Skipping the interrupt controller %pOFn.\n" , |
1370 | __func__, of_node); |
1371 | return NULL; |
1372 | } |
1373 | } |
1374 | |
1375 | /* allocate a vio_dev for this node */ |
1376 | viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); |
1377 | if (viodev == NULL) { |
1378 | pr_warn("%s: allocation failure for VIO device.\n" , __func__); |
1379 | return NULL; |
1380 | } |
1381 | |
1382 | /* we need the 'device_type' property, in order to match with drivers */ |
1383 | viodev->family = family; |
1384 | if (viodev->family == VDEVICE) { |
1385 | unsigned int unit_address; |
1386 | |
1387 | viodev->type = of_node_get_device_type(np: of_node); |
1388 | if (!viodev->type) { |
1389 | pr_warn("%s: node %pOFn is missing the 'device_type' " |
1390 | "property.\n" , __func__, of_node); |
1391 | goto out; |
1392 | } |
1393 | |
1394 | prop = of_get_property(node: of_node, name: "reg" , NULL); |
1395 | if (prop == NULL) { |
1396 | pr_warn("%s: node %pOFn missing 'reg'\n" , |
1397 | __func__, of_node); |
1398 | goto out; |
1399 | } |
1400 | unit_address = of_read_number(cell: prop, size: 1); |
1401 | dev_set_name(dev: &viodev->dev, name: "%x" , unit_address); |
1402 | viodev->irq = irq_of_parse_and_map(node: of_node, index: 0); |
1403 | viodev->unit_address = unit_address; |
1404 | } else { |
1405 | /* PFO devices need their resource_id for submitting COP_OPs |
1406 | * This is an optional field for devices, but is required when |
1407 | * performing synchronous ops */ |
1408 | prop = of_get_property(node: of_node, name: "ibm,resource-id" , NULL); |
1409 | if (prop != NULL) |
1410 | viodev->resource_id = of_read_number(cell: prop, size: 1); |
1411 | |
1412 | dev_set_name(dev: &viodev->dev, name: "%pOFn" , of_node); |
1413 | viodev->type = dev_name(dev: &viodev->dev); |
1414 | viodev->irq = 0; |
1415 | } |
1416 | |
1417 | viodev->name = of_node->name; |
1418 | viodev->dev.of_node = of_node_get(node: of_node); |
1419 | |
1420 | set_dev_node(dev: &viodev->dev, node: of_node_to_nid(np: of_node)); |
1421 | |
1422 | /* init generic 'struct device' fields: */ |
1423 | viodev->dev.parent = &vio_bus_device.dev; |
1424 | viodev->dev.bus = &vio_bus_type; |
1425 | viodev->dev.release = vio_dev_release; |
1426 | |
1427 | if (of_property_present(np: viodev->dev.of_node, propname: "ibm,my-dma-window" )) { |
1428 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1429 | vio_cmo_set_dma_ops(viodev); |
1430 | else |
1431 | set_dma_ops(dev: &viodev->dev, dma_ops: &dma_iommu_ops); |
1432 | |
1433 | set_iommu_table_base(&viodev->dev, |
1434 | vio_build_iommu_table(dev: viodev)); |
1435 | |
1436 | /* needed to ensure proper operation of coherent allocations |
1437 | * later, in case driver doesn't set it explicitly */ |
1438 | viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); |
1439 | viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; |
1440 | } |
1441 | |
1442 | /* register with generic device framework */ |
1443 | if (device_register(dev: &viodev->dev)) { |
1444 | printk(KERN_ERR "%s: failed to register device %s\n" , |
1445 | __func__, dev_name(&viodev->dev)); |
1446 | put_device(dev: &viodev->dev); |
1447 | return NULL; |
1448 | } |
1449 | |
1450 | return viodev; |
1451 | |
1452 | out: /* Use this exit point for any return prior to device_register */ |
1453 | kfree(objp: viodev); |
1454 | |
1455 | return NULL; |
1456 | } |
1457 | EXPORT_SYMBOL(vio_register_device_node); |
1458 | |
1459 | /* |
1460 | * vio_bus_scan_for_devices - Scan OF and register each child device |
1461 | * @root_name - OF node name for the root of the subtree to search. |
1462 | * This must be non-NULL |
1463 | * |
1464 | * Starting from the root node provide, register the device node for |
1465 | * each child beneath the root. |
1466 | */ |
1467 | static void __init vio_bus_scan_register_devices(char *root_name) |
1468 | { |
1469 | struct device_node *node_root, *node_child; |
1470 | |
1471 | if (!root_name) |
1472 | return; |
1473 | |
1474 | node_root = of_find_node_by_name(NULL, name: root_name); |
1475 | if (node_root) { |
1476 | |
1477 | /* |
1478 | * Create struct vio_devices for each virtual device in |
1479 | * the device tree. Drivers will associate with them later. |
1480 | */ |
1481 | node_child = of_get_next_child(node: node_root, NULL); |
1482 | while (node_child) { |
1483 | vio_register_device_node(node_child); |
1484 | node_child = of_get_next_child(node: node_root, prev: node_child); |
1485 | } |
1486 | of_node_put(node: node_root); |
1487 | } |
1488 | } |
1489 | |
1490 | /** |
1491 | * vio_bus_init: - Initialize the virtual IO bus |
1492 | */ |
1493 | static int __init vio_bus_init(void) |
1494 | { |
1495 | int err; |
1496 | |
1497 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1498 | vio_cmo_sysfs_init(); |
1499 | |
1500 | err = bus_register(bus: &vio_bus_type); |
1501 | if (err) { |
1502 | printk(KERN_ERR "failed to register VIO bus\n" ); |
1503 | return err; |
1504 | } |
1505 | |
1506 | /* |
1507 | * The fake parent of all vio devices, just to give us |
1508 | * a nice directory |
1509 | */ |
1510 | err = device_register(dev: &vio_bus_device.dev); |
1511 | if (err) { |
1512 | printk(KERN_WARNING "%s: device_register returned %i\n" , |
1513 | __func__, err); |
1514 | return err; |
1515 | } |
1516 | |
1517 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1518 | vio_cmo_bus_init(); |
1519 | |
1520 | return 0; |
1521 | } |
1522 | machine_postcore_initcall(pseries, vio_bus_init); |
1523 | |
1524 | static int __init vio_device_init(void) |
1525 | { |
1526 | vio_bus_scan_register_devices(root_name: "vdevice" ); |
1527 | vio_bus_scan_register_devices(root_name: "ibm,platform-facilities" ); |
1528 | |
1529 | return 0; |
1530 | } |
1531 | machine_device_initcall(pseries, vio_device_init); |
1532 | |
1533 | static ssize_t name_show(struct device *dev, |
1534 | struct device_attribute *attr, char *buf) |
1535 | { |
1536 | return sprintf(buf, fmt: "%s\n" , to_vio_dev(dev)->name); |
1537 | } |
1538 | static DEVICE_ATTR_RO(name); |
1539 | |
1540 | static ssize_t devspec_show(struct device *dev, |
1541 | struct device_attribute *attr, char *buf) |
1542 | { |
1543 | struct device_node *of_node = dev->of_node; |
1544 | |
1545 | return sprintf(buf, fmt: "%pOF\n" , of_node); |
1546 | } |
1547 | static DEVICE_ATTR_RO(devspec); |
1548 | |
1549 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
1550 | char *buf) |
1551 | { |
1552 | const struct vio_dev *vio_dev = to_vio_dev(dev); |
1553 | struct device_node *dn; |
1554 | const char *cp; |
1555 | |
1556 | dn = dev->of_node; |
1557 | if (!dn) { |
1558 | strcpy(p: buf, q: "\n" ); |
1559 | return strlen(buf); |
1560 | } |
1561 | cp = of_get_property(node: dn, name: "compatible" , NULL); |
1562 | if (!cp) { |
1563 | strcpy(p: buf, q: "\n" ); |
1564 | return strlen(buf); |
1565 | } |
1566 | |
1567 | return sprintf(buf, fmt: "vio:T%sS%s\n" , vio_dev->type, cp); |
1568 | } |
1569 | static DEVICE_ATTR_RO(modalias); |
1570 | |
1571 | void vio_unregister_device(struct vio_dev *viodev) |
1572 | { |
1573 | device_unregister(dev: &viodev->dev); |
1574 | if (viodev->family == VDEVICE) |
1575 | irq_dispose_mapping(virq: viodev->irq); |
1576 | } |
1577 | EXPORT_SYMBOL(vio_unregister_device); |
1578 | |
1579 | static int vio_bus_match(struct device *dev, struct device_driver *drv) |
1580 | { |
1581 | const struct vio_dev *vio_dev = to_vio_dev(dev); |
1582 | struct vio_driver *vio_drv = to_vio_driver(drv); |
1583 | const struct vio_device_id *ids = vio_drv->id_table; |
1584 | |
1585 | return (ids != NULL) && (vio_match_device(ids, dev: vio_dev) != NULL); |
1586 | } |
1587 | |
1588 | static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) |
1589 | { |
1590 | const struct vio_dev *vio_dev = to_vio_dev(dev); |
1591 | const struct device_node *dn; |
1592 | const char *cp; |
1593 | |
1594 | dn = dev->of_node; |
1595 | if (!dn) |
1596 | return -ENODEV; |
1597 | cp = of_get_property(node: dn, name: "compatible" , NULL); |
1598 | if (!cp) |
1599 | return -ENODEV; |
1600 | |
1601 | add_uevent_var(env, format: "MODALIAS=vio:T%sS%s" , vio_dev->type, cp); |
1602 | return 0; |
1603 | } |
1604 | |
1605 | #ifdef CONFIG_PPC_SMLPAR |
1606 | static struct attribute *vio_cmo_dev_attrs[] = { |
1607 | &dev_attr_name.attr, |
1608 | &dev_attr_devspec.attr, |
1609 | &dev_attr_modalias.attr, |
1610 | &dev_attr_cmo_entitled.attr, |
1611 | &dev_attr_cmo_allocated.attr, |
1612 | &dev_attr_cmo_desired.attr, |
1613 | &dev_attr_cmo_allocs_failed.attr, |
1614 | NULL, |
1615 | }; |
1616 | ATTRIBUTE_GROUPS(vio_cmo_dev); |
1617 | |
1618 | const struct bus_type vio_bus_type = { |
1619 | .name = "vio" , |
1620 | .dev_groups = vio_cmo_dev_groups, |
1621 | .bus_groups = vio_bus_groups, |
1622 | .uevent = vio_hotplug, |
1623 | .match = vio_bus_match, |
1624 | .probe = vio_bus_probe, |
1625 | .remove = vio_bus_remove, |
1626 | .shutdown = vio_bus_shutdown, |
1627 | }; |
1628 | #else /* CONFIG_PPC_SMLPAR */ |
1629 | static struct attribute *vio_dev_attrs[] = { |
1630 | &dev_attr_name.attr, |
1631 | &dev_attr_devspec.attr, |
1632 | &dev_attr_modalias.attr, |
1633 | NULL, |
1634 | }; |
1635 | ATTRIBUTE_GROUPS(vio_dev); |
1636 | |
1637 | const struct bus_type vio_bus_type = { |
1638 | .name = "vio" , |
1639 | .dev_groups = vio_dev_groups, |
1640 | .uevent = vio_hotplug, |
1641 | .match = vio_bus_match, |
1642 | .probe = vio_bus_probe, |
1643 | .remove = vio_bus_remove, |
1644 | .shutdown = vio_bus_shutdown, |
1645 | }; |
1646 | #endif /* CONFIG_PPC_SMLPAR */ |
1647 | |
1648 | /** |
1649 | * vio_get_attribute: - get attribute for virtual device |
1650 | * @vdev: The vio device to get property. |
1651 | * @which: The property/attribute to be extracted. |
1652 | * @length: Pointer to length of returned data size (unused if NULL). |
1653 | * |
1654 | * Calls prom.c's of_get_property() to return the value of the |
1655 | * attribute specified by @which |
1656 | */ |
1657 | const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) |
1658 | { |
1659 | return of_get_property(node: vdev->dev.of_node, name: which, lenp: length); |
1660 | } |
1661 | EXPORT_SYMBOL(vio_get_attribute); |
1662 | |
1663 | /* vio_find_name() - internal because only vio.c knows how we formatted the |
1664 | * kobject name |
1665 | */ |
1666 | static struct vio_dev *vio_find_name(const char *name) |
1667 | { |
1668 | struct device *found; |
1669 | |
1670 | found = bus_find_device_by_name(bus: &vio_bus_type, NULL, name); |
1671 | if (!found) |
1672 | return NULL; |
1673 | |
1674 | return to_vio_dev(found); |
1675 | } |
1676 | |
1677 | /** |
1678 | * vio_find_node - find an already-registered vio_dev |
1679 | * @vnode: device_node of the virtual device we're looking for |
1680 | * |
1681 | * Takes a reference to the embedded struct device which needs to be dropped |
1682 | * after use. |
1683 | */ |
1684 | struct vio_dev *vio_find_node(struct device_node *vnode) |
1685 | { |
1686 | char kobj_name[20]; |
1687 | struct device_node *vnode_parent; |
1688 | |
1689 | vnode_parent = of_get_parent(node: vnode); |
1690 | if (!vnode_parent) |
1691 | return NULL; |
1692 | |
1693 | /* construct the kobject name from the device node */ |
1694 | if (of_node_is_type(np: vnode_parent, type: "vdevice" )) { |
1695 | const __be32 *prop; |
1696 | |
1697 | prop = of_get_property(node: vnode, name: "reg" , NULL); |
1698 | if (!prop) |
1699 | goto out; |
1700 | snprintf(buf: kobj_name, size: sizeof(kobj_name), fmt: "%x" , |
1701 | (uint32_t)of_read_number(cell: prop, size: 1)); |
1702 | } else if (of_node_is_type(np: vnode_parent, type: "ibm,platform-facilities" )) |
1703 | snprintf(buf: kobj_name, size: sizeof(kobj_name), fmt: "%pOFn" , vnode); |
1704 | else |
1705 | goto out; |
1706 | |
1707 | of_node_put(node: vnode_parent); |
1708 | return vio_find_name(name: kobj_name); |
1709 | out: |
1710 | of_node_put(node: vnode_parent); |
1711 | return NULL; |
1712 | } |
1713 | EXPORT_SYMBOL(vio_find_node); |
1714 | |
1715 | int vio_enable_interrupts(struct vio_dev *dev) |
1716 | { |
1717 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); |
1718 | if (rc != H_SUCCESS) |
1719 | printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n" , rc); |
1720 | return rc; |
1721 | } |
1722 | EXPORT_SYMBOL(vio_enable_interrupts); |
1723 | |
1724 | int vio_disable_interrupts(struct vio_dev *dev) |
1725 | { |
1726 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); |
1727 | if (rc != H_SUCCESS) |
1728 | printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n" , rc); |
1729 | return rc; |
1730 | } |
1731 | EXPORT_SYMBOL(vio_disable_interrupts); |
1732 | |
1733 | static int __init vio_init(void) |
1734 | { |
1735 | dma_debug_add_bus(bus: &vio_bus_type); |
1736 | return 0; |
1737 | } |
1738 | machine_fs_initcall(pseries, vio_init); |
1739 | |