1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
3 | * |
4 | * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net) |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/types.h> |
9 | #include <linux/pci.h> |
10 | #include <linux/init.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/percpu.h> |
14 | #include <linux/irq.h> |
15 | #include <linux/msi.h> |
16 | #include <linux/export.h> |
17 | #include <linux/log2.h> |
18 | #include <linux/of.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/dma-map-ops.h> |
21 | #include <asm/iommu-common.h> |
22 | |
23 | #include <asm/iommu.h> |
24 | #include <asm/irq.h> |
25 | #include <asm/hypervisor.h> |
26 | #include <asm/prom.h> |
27 | |
28 | #include "pci_impl.h" |
29 | #include "iommu_common.h" |
30 | #include "kernel.h" |
31 | |
32 | #include "pci_sun4v.h" |
33 | |
34 | #define DRIVER_NAME "pci_sun4v" |
35 | #define PFX DRIVER_NAME ": " |
36 | |
37 | static unsigned long vpci_major; |
38 | static unsigned long vpci_minor; |
39 | |
40 | struct vpci_version { |
41 | unsigned long major; |
42 | unsigned long minor; |
43 | }; |
44 | |
45 | /* Ordered from largest major to lowest */ |
46 | static struct vpci_version vpci_versions[] = { |
47 | { .major = 2, .minor = 0 }, |
48 | { .major = 1, .minor = 1 }, |
49 | }; |
50 | |
51 | static unsigned long vatu_major = 1; |
52 | static unsigned long vatu_minor = 1; |
53 | |
54 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
55 | |
56 | struct iommu_batch { |
57 | struct device *dev; /* Device mapping is for. */ |
58 | unsigned long prot; /* IOMMU page protections */ |
59 | unsigned long entry; /* Index into IOTSB. */ |
60 | u64 *pglist; /* List of physical pages */ |
61 | unsigned long npages; /* Number of pages in list. */ |
62 | }; |
63 | |
64 | static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); |
65 | static int iommu_batch_initialized; |
66 | |
67 | /* Interrupts must be disabled. */ |
68 | static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) |
69 | { |
70 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
71 | |
72 | p->dev = dev; |
73 | p->prot = prot; |
74 | p->entry = entry; |
75 | p->npages = 0; |
76 | } |
77 | |
78 | static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) |
79 | { |
80 | return iommu->atu && mask > DMA_BIT_MASK(32); |
81 | } |
82 | |
83 | /* Interrupts must be disabled. */ |
84 | static long iommu_batch_flush(struct iommu_batch *p, u64 mask) |
85 | { |
86 | struct pci_pbm_info *pbm = p->dev->archdata.host_controller; |
87 | u64 *pglist = p->pglist; |
88 | u64 index_count; |
89 | unsigned long devhandle = pbm->devhandle; |
90 | unsigned long prot = p->prot; |
91 | unsigned long entry = p->entry; |
92 | unsigned long npages = p->npages; |
93 | unsigned long iotsb_num; |
94 | unsigned long ret; |
95 | long num; |
96 | |
97 | /* VPCI maj=1, min=[0,1] only supports read and write */ |
98 | if (vpci_major < 2) |
99 | prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); |
100 | |
101 | while (npages != 0) { |
102 | if (!iommu_use_atu(iommu: pbm->iommu, mask)) { |
103 | num = pci_sun4v_iommu_map(devhandle, |
104 | tsbid: HV_PCI_TSBID(0, entry), |
105 | num_ttes: npages, |
106 | io_attributes: prot, |
107 | __pa(pglist)); |
108 | if (unlikely(num < 0)) { |
109 | pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n" , |
110 | __func__, |
111 | devhandle, |
112 | HV_PCI_TSBID(0, entry), |
113 | npages, prot, __pa(pglist), |
114 | num); |
115 | return -1; |
116 | } |
117 | } else { |
118 | index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), |
119 | iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; |
120 | ret = pci_sun4v_iotsb_map(devhandle, |
121 | iotsb_num, |
122 | iotsb_index_iottes: index_count, |
123 | io_attributes: prot, |
124 | __pa(pglist), |
125 | mapped: &num); |
126 | if (unlikely(ret != HV_EOK)) { |
127 | pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n" , |
128 | __func__, |
129 | devhandle, iotsb_num, |
130 | index_count, prot, |
131 | __pa(pglist), ret); |
132 | return -1; |
133 | } |
134 | } |
135 | entry += num; |
136 | npages -= num; |
137 | pglist += num; |
138 | } |
139 | |
140 | p->entry = entry; |
141 | p->npages = 0; |
142 | |
143 | return 0; |
144 | } |
145 | |
146 | static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) |
147 | { |
148 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
149 | |
150 | if (p->entry + p->npages == entry) |
151 | return; |
152 | if (p->entry != ~0UL) |
153 | iommu_batch_flush(p, mask); |
154 | p->entry = entry; |
155 | } |
156 | |
157 | /* Interrupts must be disabled. */ |
158 | static inline long iommu_batch_add(u64 phys_page, u64 mask) |
159 | { |
160 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
161 | |
162 | BUG_ON(p->npages >= PGLIST_NENTS); |
163 | |
164 | p->pglist[p->npages++] = phys_page; |
165 | if (p->npages == PGLIST_NENTS) |
166 | return iommu_batch_flush(p, mask); |
167 | |
168 | return 0; |
169 | } |
170 | |
171 | /* Interrupts must be disabled. */ |
172 | static inline long iommu_batch_end(u64 mask) |
173 | { |
174 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
175 | |
176 | BUG_ON(p->npages >= PGLIST_NENTS); |
177 | |
178 | return iommu_batch_flush(p, mask); |
179 | } |
180 | |
181 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, |
182 | dma_addr_t *dma_addrp, gfp_t gfp, |
183 | unsigned long attrs) |
184 | { |
185 | u64 mask; |
186 | unsigned long flags, order, first_page, npages, n; |
187 | unsigned long prot = 0; |
188 | struct iommu *iommu; |
189 | struct iommu_map_table *tbl; |
190 | struct page *page; |
191 | void *ret; |
192 | long entry; |
193 | int nid; |
194 | |
195 | size = IO_PAGE_ALIGN(size); |
196 | order = get_order(size); |
197 | if (unlikely(order > MAX_PAGE_ORDER)) |
198 | return NULL; |
199 | |
200 | npages = size >> IO_PAGE_SHIFT; |
201 | |
202 | if (attrs & DMA_ATTR_WEAK_ORDERING) |
203 | prot = HV_PCI_MAP_ATTR_RELAXED_ORDER; |
204 | |
205 | nid = dev->archdata.numa_node; |
206 | page = alloc_pages_node(nid, gfp_mask: gfp, order); |
207 | if (unlikely(!page)) |
208 | return NULL; |
209 | |
210 | first_page = (unsigned long) page_address(page); |
211 | memset((char *)first_page, 0, PAGE_SIZE << order); |
212 | |
213 | iommu = dev->archdata.iommu; |
214 | mask = dev->coherent_dma_mask; |
215 | if (!iommu_use_atu(iommu, mask)) |
216 | tbl = &iommu->tbl; |
217 | else |
218 | tbl = &iommu->atu->tbl; |
219 | |
220 | entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, |
221 | (unsigned long)(-1), 0); |
222 | |
223 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
224 | goto range_alloc_fail; |
225 | |
226 | *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); |
227 | ret = (void *) first_page; |
228 | first_page = __pa(first_page); |
229 | |
230 | local_irq_save(flags); |
231 | |
232 | iommu_batch_start(dev, |
233 | (HV_PCI_MAP_ATTR_READ | prot | |
234 | HV_PCI_MAP_ATTR_WRITE), |
235 | entry); |
236 | |
237 | for (n = 0; n < npages; n++) { |
238 | long err = iommu_batch_add(phys_page: first_page + (n * PAGE_SIZE), mask); |
239 | if (unlikely(err < 0L)) |
240 | goto iommu_map_fail; |
241 | } |
242 | |
243 | if (unlikely(iommu_batch_end(mask) < 0L)) |
244 | goto iommu_map_fail; |
245 | |
246 | local_irq_restore(flags); |
247 | |
248 | return ret; |
249 | |
250 | iommu_map_fail: |
251 | local_irq_restore(flags); |
252 | iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); |
253 | |
254 | range_alloc_fail: |
255 | free_pages(addr: first_page, order); |
256 | return NULL; |
257 | } |
258 | |
259 | unsigned long dma_4v_iotsb_bind(unsigned long devhandle, |
260 | unsigned long iotsb_num, |
261 | struct pci_bus *bus_dev) |
262 | { |
263 | struct pci_dev *pdev; |
264 | unsigned long err; |
265 | unsigned int bus; |
266 | unsigned int device; |
267 | unsigned int fun; |
268 | |
269 | list_for_each_entry(pdev, &bus_dev->devices, bus_list) { |
270 | if (pdev->subordinate) { |
271 | /* No need to bind pci bridge */ |
272 | dma_4v_iotsb_bind(devhandle, iotsb_num, |
273 | bus_dev: pdev->subordinate); |
274 | } else { |
275 | bus = bus_dev->number; |
276 | device = PCI_SLOT(pdev->devfn); |
277 | fun = PCI_FUNC(pdev->devfn); |
278 | err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, |
279 | pci_device: HV_PCI_DEVICE_BUILD(bus, |
280 | device, |
281 | fun)); |
282 | |
283 | /* If bind fails for one device it is going to fail |
284 | * for rest of the devices because we are sharing |
285 | * IOTSB. So in case of failure simply return with |
286 | * error. |
287 | */ |
288 | if (err) |
289 | return err; |
290 | } |
291 | } |
292 | |
293 | return 0; |
294 | } |
295 | |
296 | static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, |
297 | dma_addr_t dvma, unsigned long iotsb_num, |
298 | unsigned long entry, unsigned long npages) |
299 | { |
300 | unsigned long num, flags; |
301 | unsigned long ret; |
302 | |
303 | local_irq_save(flags); |
304 | do { |
305 | if (dvma <= DMA_BIT_MASK(32)) { |
306 | num = pci_sun4v_iommu_demap(devhandle, |
307 | tsbid: HV_PCI_TSBID(0, entry), |
308 | num_ttes: npages); |
309 | } else { |
310 | ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, |
311 | iotsb_index: entry, iottes: npages, demapped: &num); |
312 | if (unlikely(ret != HV_EOK)) { |
313 | pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n" , |
314 | ret); |
315 | } |
316 | } |
317 | entry += num; |
318 | npages -= num; |
319 | } while (npages != 0); |
320 | local_irq_restore(flags); |
321 | } |
322 | |
323 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, |
324 | dma_addr_t dvma, unsigned long attrs) |
325 | { |
326 | struct pci_pbm_info *pbm; |
327 | struct iommu *iommu; |
328 | struct atu *atu; |
329 | struct iommu_map_table *tbl; |
330 | unsigned long order, npages, entry; |
331 | unsigned long iotsb_num; |
332 | u32 devhandle; |
333 | |
334 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
335 | iommu = dev->archdata.iommu; |
336 | pbm = dev->archdata.host_controller; |
337 | atu = iommu->atu; |
338 | devhandle = pbm->devhandle; |
339 | |
340 | if (!iommu_use_atu(iommu, mask: dvma)) { |
341 | tbl = &iommu->tbl; |
342 | iotsb_num = 0; /* we don't care for legacy iommu */ |
343 | } else { |
344 | tbl = &atu->tbl; |
345 | iotsb_num = atu->iotsb->iotsb_num; |
346 | } |
347 | entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); |
348 | dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); |
349 | iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); |
350 | order = get_order(size); |
351 | if (order < 10) |
352 | free_pages(addr: (unsigned long)cpu, order); |
353 | } |
354 | |
355 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, |
356 | unsigned long offset, size_t sz, |
357 | enum dma_data_direction direction, |
358 | unsigned long attrs) |
359 | { |
360 | struct iommu *iommu; |
361 | struct atu *atu; |
362 | struct iommu_map_table *tbl; |
363 | u64 mask; |
364 | unsigned long flags, npages, oaddr; |
365 | unsigned long i, base_paddr; |
366 | unsigned long prot; |
367 | dma_addr_t bus_addr, ret; |
368 | long entry; |
369 | |
370 | iommu = dev->archdata.iommu; |
371 | atu = iommu->atu; |
372 | |
373 | if (unlikely(direction == DMA_NONE)) |
374 | goto bad; |
375 | |
376 | oaddr = (unsigned long)(page_address(page) + offset); |
377 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
378 | npages >>= IO_PAGE_SHIFT; |
379 | |
380 | mask = *dev->dma_mask; |
381 | if (!iommu_use_atu(iommu, mask)) |
382 | tbl = &iommu->tbl; |
383 | else |
384 | tbl = &atu->tbl; |
385 | |
386 | entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, |
387 | (unsigned long)(-1), 0); |
388 | |
389 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
390 | goto bad; |
391 | |
392 | bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); |
393 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
394 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
395 | prot = HV_PCI_MAP_ATTR_READ; |
396 | if (direction != DMA_TO_DEVICE) |
397 | prot |= HV_PCI_MAP_ATTR_WRITE; |
398 | |
399 | if (attrs & DMA_ATTR_WEAK_ORDERING) |
400 | prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; |
401 | |
402 | local_irq_save(flags); |
403 | |
404 | iommu_batch_start(dev, prot, entry); |
405 | |
406 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
407 | long err = iommu_batch_add(phys_page: base_paddr, mask); |
408 | if (unlikely(err < 0L)) |
409 | goto iommu_map_fail; |
410 | } |
411 | if (unlikely(iommu_batch_end(mask) < 0L)) |
412 | goto iommu_map_fail; |
413 | |
414 | local_irq_restore(flags); |
415 | |
416 | return ret; |
417 | |
418 | bad: |
419 | if (printk_ratelimit()) |
420 | WARN_ON(1); |
421 | return DMA_MAPPING_ERROR; |
422 | |
423 | iommu_map_fail: |
424 | local_irq_restore(flags); |
425 | iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
426 | return DMA_MAPPING_ERROR; |
427 | } |
428 | |
429 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
430 | size_t sz, enum dma_data_direction direction, |
431 | unsigned long attrs) |
432 | { |
433 | struct pci_pbm_info *pbm; |
434 | struct iommu *iommu; |
435 | struct atu *atu; |
436 | struct iommu_map_table *tbl; |
437 | unsigned long npages; |
438 | unsigned long iotsb_num; |
439 | long entry; |
440 | u32 devhandle; |
441 | |
442 | if (unlikely(direction == DMA_NONE)) { |
443 | if (printk_ratelimit()) |
444 | WARN_ON(1); |
445 | return; |
446 | } |
447 | |
448 | iommu = dev->archdata.iommu; |
449 | pbm = dev->archdata.host_controller; |
450 | atu = iommu->atu; |
451 | devhandle = pbm->devhandle; |
452 | |
453 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
454 | npages >>= IO_PAGE_SHIFT; |
455 | bus_addr &= IO_PAGE_MASK; |
456 | |
457 | if (bus_addr <= DMA_BIT_MASK(32)) { |
458 | iotsb_num = 0; /* we don't care for legacy iommu */ |
459 | tbl = &iommu->tbl; |
460 | } else { |
461 | iotsb_num = atu->iotsb->iotsb_num; |
462 | tbl = &atu->tbl; |
463 | } |
464 | entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; |
465 | dma_4v_iommu_demap(dev, devhandle, dvma: bus_addr, iotsb_num, entry, npages); |
466 | iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
467 | } |
468 | |
469 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
470 | int nelems, enum dma_data_direction direction, |
471 | unsigned long attrs) |
472 | { |
473 | struct scatterlist *s, *outs, *segstart; |
474 | unsigned long flags, handle, prot; |
475 | dma_addr_t dma_next = 0, dma_addr; |
476 | unsigned int max_seg_size; |
477 | unsigned long seg_boundary_size; |
478 | int outcount, incount, i; |
479 | struct iommu *iommu; |
480 | struct atu *atu; |
481 | struct iommu_map_table *tbl; |
482 | u64 mask; |
483 | unsigned long base_shift; |
484 | long err; |
485 | |
486 | BUG_ON(direction == DMA_NONE); |
487 | |
488 | iommu = dev->archdata.iommu; |
489 | if (nelems == 0 || !iommu) |
490 | return -EINVAL; |
491 | atu = iommu->atu; |
492 | |
493 | prot = HV_PCI_MAP_ATTR_READ; |
494 | if (direction != DMA_TO_DEVICE) |
495 | prot |= HV_PCI_MAP_ATTR_WRITE; |
496 | |
497 | if (attrs & DMA_ATTR_WEAK_ORDERING) |
498 | prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; |
499 | |
500 | outs = s = segstart = &sglist[0]; |
501 | outcount = 1; |
502 | incount = nelems; |
503 | handle = 0; |
504 | |
505 | /* Init first segment length for backout at failure */ |
506 | outs->dma_length = 0; |
507 | |
508 | local_irq_save(flags); |
509 | |
510 | iommu_batch_start(dev, prot, entry: ~0UL); |
511 | |
512 | max_seg_size = dma_get_max_seg_size(dev); |
513 | seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); |
514 | |
515 | mask = *dev->dma_mask; |
516 | if (!iommu_use_atu(iommu, mask)) |
517 | tbl = &iommu->tbl; |
518 | else |
519 | tbl = &atu->tbl; |
520 | |
521 | base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; |
522 | |
523 | for_each_sg(sglist, s, nelems, i) { |
524 | unsigned long paddr, npages, entry, out_entry = 0, slen; |
525 | |
526 | slen = s->length; |
527 | /* Sanity check */ |
528 | if (slen == 0) { |
529 | dma_next = 0; |
530 | continue; |
531 | } |
532 | /* Allocate iommu entries for that segment */ |
533 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); |
534 | npages = iommu_num_pages(addr: paddr, len: slen, IO_PAGE_SIZE); |
535 | entry = iommu_tbl_range_alloc(dev, tbl, npages, |
536 | &handle, (unsigned long)(-1), 0); |
537 | |
538 | /* Handle failure */ |
539 | if (unlikely(entry == IOMMU_ERROR_CODE)) { |
540 | pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n" , |
541 | tbl, paddr, npages); |
542 | goto iommu_map_failed; |
543 | } |
544 | |
545 | iommu_batch_new_entry(entry, mask); |
546 | |
547 | /* Convert entry to a dma_addr_t */ |
548 | dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); |
549 | dma_addr |= (s->offset & ~IO_PAGE_MASK); |
550 | |
551 | /* Insert into HW table */ |
552 | paddr &= IO_PAGE_MASK; |
553 | while (npages--) { |
554 | err = iommu_batch_add(phys_page: paddr, mask); |
555 | if (unlikely(err < 0L)) |
556 | goto iommu_map_failed; |
557 | paddr += IO_PAGE_SIZE; |
558 | } |
559 | |
560 | /* If we are in an open segment, try merging */ |
561 | if (segstart != s) { |
562 | /* We cannot merge if: |
563 | * - allocated dma_addr isn't contiguous to previous allocation |
564 | */ |
565 | if ((dma_addr != dma_next) || |
566 | (outs->dma_length + s->length > max_seg_size) || |
567 | (is_span_boundary(entry: out_entry, shift: base_shift, |
568 | boundary_size: seg_boundary_size, outs, sg: s))) { |
569 | /* Can't merge: create a new segment */ |
570 | segstart = s; |
571 | outcount++; |
572 | outs = sg_next(outs); |
573 | } else { |
574 | outs->dma_length += s->length; |
575 | } |
576 | } |
577 | |
578 | if (segstart == s) { |
579 | /* This is a new segment, fill entries */ |
580 | outs->dma_address = dma_addr; |
581 | outs->dma_length = slen; |
582 | out_entry = entry; |
583 | } |
584 | |
585 | /* Calculate next page pointer for contiguous check */ |
586 | dma_next = dma_addr + slen; |
587 | } |
588 | |
589 | err = iommu_batch_end(mask); |
590 | |
591 | if (unlikely(err < 0L)) |
592 | goto iommu_map_failed; |
593 | |
594 | local_irq_restore(flags); |
595 | |
596 | if (outcount < incount) { |
597 | outs = sg_next(outs); |
598 | outs->dma_length = 0; |
599 | } |
600 | |
601 | return outcount; |
602 | |
603 | iommu_map_failed: |
604 | for_each_sg(sglist, s, nelems, i) { |
605 | if (s->dma_length != 0) { |
606 | unsigned long vaddr, npages; |
607 | |
608 | vaddr = s->dma_address & IO_PAGE_MASK; |
609 | npages = iommu_num_pages(addr: s->dma_address, len: s->dma_length, |
610 | IO_PAGE_SIZE); |
611 | iommu_tbl_range_free(tbl, vaddr, npages, |
612 | IOMMU_ERROR_CODE); |
613 | /* XXX demap? XXX */ |
614 | s->dma_length = 0; |
615 | } |
616 | if (s == outs) |
617 | break; |
618 | } |
619 | local_irq_restore(flags); |
620 | |
621 | return -EINVAL; |
622 | } |
623 | |
624 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
625 | int nelems, enum dma_data_direction direction, |
626 | unsigned long attrs) |
627 | { |
628 | struct pci_pbm_info *pbm; |
629 | struct scatterlist *sg; |
630 | struct iommu *iommu; |
631 | struct atu *atu; |
632 | unsigned long flags, entry; |
633 | unsigned long iotsb_num; |
634 | u32 devhandle; |
635 | |
636 | BUG_ON(direction == DMA_NONE); |
637 | |
638 | iommu = dev->archdata.iommu; |
639 | pbm = dev->archdata.host_controller; |
640 | atu = iommu->atu; |
641 | devhandle = pbm->devhandle; |
642 | |
643 | local_irq_save(flags); |
644 | |
645 | sg = sglist; |
646 | while (nelems--) { |
647 | dma_addr_t dma_handle = sg->dma_address; |
648 | unsigned int len = sg->dma_length; |
649 | unsigned long npages; |
650 | struct iommu_map_table *tbl; |
651 | unsigned long shift = IO_PAGE_SHIFT; |
652 | |
653 | if (!len) |
654 | break; |
655 | npages = iommu_num_pages(addr: dma_handle, len, IO_PAGE_SIZE); |
656 | |
657 | if (dma_handle <= DMA_BIT_MASK(32)) { |
658 | iotsb_num = 0; /* we don't care for legacy iommu */ |
659 | tbl = &iommu->tbl; |
660 | } else { |
661 | iotsb_num = atu->iotsb->iotsb_num; |
662 | tbl = &atu->tbl; |
663 | } |
664 | entry = ((dma_handle - tbl->table_map_base) >> shift); |
665 | dma_4v_iommu_demap(dev, devhandle, dvma: dma_handle, iotsb_num, |
666 | entry, npages); |
667 | iommu_tbl_range_free(tbl, dma_handle, npages, |
668 | IOMMU_ERROR_CODE); |
669 | sg = sg_next(sg); |
670 | } |
671 | |
672 | local_irq_restore(flags); |
673 | } |
674 | |
675 | static int dma_4v_supported(struct device *dev, u64 device_mask) |
676 | { |
677 | struct iommu *iommu = dev->archdata.iommu; |
678 | |
679 | if (ali_sound_dma_hack(dev, device_mask)) |
680 | return 1; |
681 | if (device_mask < iommu->dma_addr_mask) |
682 | return 0; |
683 | return 1; |
684 | } |
685 | |
686 | static const struct dma_map_ops sun4v_dma_ops = { |
687 | .alloc = dma_4v_alloc_coherent, |
688 | .free = dma_4v_free_coherent, |
689 | .map_page = dma_4v_map_page, |
690 | .unmap_page = dma_4v_unmap_page, |
691 | .map_sg = dma_4v_map_sg, |
692 | .unmap_sg = dma_4v_unmap_sg, |
693 | .dma_supported = dma_4v_supported, |
694 | }; |
695 | |
696 | static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) |
697 | { |
698 | struct property *prop; |
699 | struct device_node *dp; |
700 | |
701 | dp = pbm->op->dev.of_node; |
702 | prop = of_find_property(np: dp, name: "66mhz-capable" , NULL); |
703 | pbm->is_66mhz_capable = (prop != NULL); |
704 | pbm->pci_bus = pci_scan_one_pbm(pbm, parent); |
705 | |
706 | /* XXX register error interrupt handlers XXX */ |
707 | } |
708 | |
709 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
710 | struct iommu_map_table *iommu) |
711 | { |
712 | struct iommu_pool *pool; |
713 | unsigned long i, pool_nr, cnt = 0; |
714 | u32 devhandle; |
715 | |
716 | devhandle = pbm->devhandle; |
717 | for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { |
718 | pool = &(iommu->pools[pool_nr]); |
719 | for (i = pool->start; i <= pool->end; i++) { |
720 | unsigned long ret, io_attrs, ra; |
721 | |
722 | ret = pci_sun4v_iommu_getmap(devhandle, |
723 | tsbid: HV_PCI_TSBID(0, i), |
724 | io_attributes: &io_attrs, real_address: &ra); |
725 | if (ret == HV_EOK) { |
726 | if (page_in_phys_avail(ra)) { |
727 | pci_sun4v_iommu_demap(devhandle, |
728 | tsbid: HV_PCI_TSBID(0, |
729 | i), num_ttes: 1); |
730 | } else { |
731 | cnt++; |
732 | __set_bit(i, iommu->map); |
733 | } |
734 | } |
735 | } |
736 | } |
737 | return cnt; |
738 | } |
739 | |
740 | static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) |
741 | { |
742 | struct atu *atu = pbm->iommu->atu; |
743 | struct atu_iotsb *iotsb; |
744 | void *table; |
745 | u64 table_size; |
746 | u64 iotsb_num; |
747 | unsigned long order; |
748 | unsigned long err; |
749 | |
750 | iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); |
751 | if (!iotsb) { |
752 | err = -ENOMEM; |
753 | goto out_err; |
754 | } |
755 | atu->iotsb = iotsb; |
756 | |
757 | /* calculate size of IOTSB */ |
758 | table_size = (atu->size / IO_PAGE_SIZE) * 8; |
759 | order = get_order(size: table_size); |
760 | table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
761 | if (!table) { |
762 | err = -ENOMEM; |
763 | goto table_failed; |
764 | } |
765 | iotsb->table = table; |
766 | iotsb->ra = __pa(table); |
767 | iotsb->dvma_size = atu->size; |
768 | iotsb->dvma_base = atu->base; |
769 | iotsb->table_size = table_size; |
770 | iotsb->page_size = IO_PAGE_SIZE; |
771 | |
772 | /* configure and register IOTSB with HV */ |
773 | err = pci_sun4v_iotsb_conf(devhandle: pbm->devhandle, |
774 | ra: iotsb->ra, |
775 | table_size: iotsb->table_size, |
776 | page_size: iotsb->page_size, |
777 | dvma_base: iotsb->dvma_base, |
778 | iotsb_num: &iotsb_num); |
779 | if (err) { |
780 | pr_err(PFX "pci_iotsb_conf failed error: %ld\n" , err); |
781 | goto iotsb_conf_failed; |
782 | } |
783 | iotsb->iotsb_num = iotsb_num; |
784 | |
785 | err = dma_4v_iotsb_bind(devhandle: pbm->devhandle, iotsb_num, bus_dev: pbm->pci_bus); |
786 | if (err) { |
787 | pr_err(PFX "pci_iotsb_bind failed error: %ld\n" , err); |
788 | goto iotsb_conf_failed; |
789 | } |
790 | |
791 | return 0; |
792 | |
793 | iotsb_conf_failed: |
794 | free_pages(addr: (unsigned long)table, order); |
795 | table_failed: |
796 | kfree(objp: iotsb); |
797 | out_err: |
798 | return err; |
799 | } |
800 | |
801 | static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) |
802 | { |
803 | struct atu *atu = pbm->iommu->atu; |
804 | unsigned long err; |
805 | const u64 *ranges; |
806 | u64 map_size, num_iotte; |
807 | u64 dma_mask; |
808 | const u32 *page_size; |
809 | int len; |
810 | |
811 | ranges = of_get_property(node: pbm->op->dev.of_node, name: "iommu-address-ranges" , |
812 | lenp: &len); |
813 | if (!ranges) { |
814 | pr_err(PFX "No iommu-address-ranges\n" ); |
815 | return -EINVAL; |
816 | } |
817 | |
818 | page_size = of_get_property(node: pbm->op->dev.of_node, name: "iommu-pagesizes" , |
819 | NULL); |
820 | if (!page_size) { |
821 | pr_err(PFX "No iommu-pagesizes\n" ); |
822 | return -EINVAL; |
823 | } |
824 | |
825 | /* There are 4 iommu-address-ranges supported. Each range is pair of |
826 | * {base, size}. The ranges[0] and ranges[1] are 32bit address space |
827 | * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit |
828 | * address ranges to support 64bit addressing. Because 'size' for |
829 | * address ranges[2] and ranges[3] are same we can select either of |
830 | * ranges[2] or ranges[3] for mapping. However due to 'size' is too |
831 | * large for OS to allocate IOTSB we are using fix size 32G |
832 | * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices |
833 | * to share. |
834 | */ |
835 | atu->ranges = (struct atu_ranges *)ranges; |
836 | atu->base = atu->ranges[3].base; |
837 | atu->size = ATU_64_SPACE_SIZE; |
838 | |
839 | /* Create IOTSB */ |
840 | err = pci_sun4v_atu_alloc_iotsb(pbm); |
841 | if (err) { |
842 | pr_err(PFX "Error creating ATU IOTSB\n" ); |
843 | return err; |
844 | } |
845 | |
846 | /* Create ATU iommu map. |
847 | * One bit represents one iotte in IOTSB table. |
848 | */ |
849 | dma_mask = (roundup_pow_of_two(atu->size) - 1UL); |
850 | num_iotte = atu->size / IO_PAGE_SIZE; |
851 | map_size = num_iotte / 8; |
852 | atu->tbl.table_map_base = atu->base; |
853 | atu->dma_addr_mask = dma_mask; |
854 | atu->tbl.map = kzalloc(size: map_size, GFP_KERNEL); |
855 | if (!atu->tbl.map) |
856 | return -ENOMEM; |
857 | |
858 | iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, |
859 | NULL, false /* no large_pool */, |
860 | 0 /* default npools */, |
861 | false /* want span boundary checking */); |
862 | |
863 | return 0; |
864 | } |
865 | |
866 | static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
867 | { |
868 | static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; |
869 | struct iommu *iommu = pbm->iommu; |
870 | unsigned long num_tsb_entries, sz; |
871 | u32 dma_mask, dma_offset; |
872 | const u32 *vdma; |
873 | |
874 | vdma = of_get_property(node: pbm->op->dev.of_node, name: "virtual-dma" , NULL); |
875 | if (!vdma) |
876 | vdma = vdma_default; |
877 | |
878 | if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { |
879 | printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n" , |
880 | vdma[0], vdma[1]); |
881 | return -EINVAL; |
882 | } |
883 | |
884 | dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); |
885 | num_tsb_entries = vdma[1] / IO_PAGE_SIZE; |
886 | |
887 | dma_offset = vdma[0]; |
888 | |
889 | /* Setup initial software IOMMU state. */ |
890 | spin_lock_init(&iommu->lock); |
891 | iommu->ctx_lowest_free = 1; |
892 | iommu->tbl.table_map_base = dma_offset; |
893 | iommu->dma_addr_mask = dma_mask; |
894 | |
895 | /* Allocate and initialize the free area map. */ |
896 | sz = (num_tsb_entries + 7) / 8; |
897 | sz = (sz + 7UL) & ~7UL; |
898 | iommu->tbl.map = kzalloc(size: sz, GFP_KERNEL); |
899 | if (!iommu->tbl.map) { |
900 | printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n" ); |
901 | return -ENOMEM; |
902 | } |
903 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, |
904 | NULL, false /* no large_pool */, |
905 | 0 /* default npools */, |
906 | false /* want span boundary checking */); |
907 | sz = probe_existing_entries(pbm, iommu: &iommu->tbl); |
908 | if (sz) |
909 | printk("%s: Imported %lu TSB entries from OBP\n" , |
910 | pbm->name, sz); |
911 | |
912 | return 0; |
913 | } |
914 | |
915 | #ifdef CONFIG_PCI_MSI |
916 | struct pci_sun4v_msiq_entry { |
917 | u64 version_type; |
918 | #define MSIQ_VERSION_MASK 0xffffffff00000000UL |
919 | #define MSIQ_VERSION_SHIFT 32 |
920 | #define MSIQ_TYPE_MASK 0x00000000000000ffUL |
921 | #define MSIQ_TYPE_SHIFT 0 |
922 | #define MSIQ_TYPE_NONE 0x00 |
923 | #define MSIQ_TYPE_MSG 0x01 |
924 | #define MSIQ_TYPE_MSI32 0x02 |
925 | #define MSIQ_TYPE_MSI64 0x03 |
926 | #define MSIQ_TYPE_INTX 0x08 |
927 | #define MSIQ_TYPE_NONE2 0xff |
928 | |
929 | u64 intx_sysino; |
930 | u64 reserved1; |
931 | u64 stick; |
932 | u64 req_id; /* bus/device/func */ |
933 | #define MSIQ_REQID_BUS_MASK 0xff00UL |
934 | #define MSIQ_REQID_BUS_SHIFT 8 |
935 | #define MSIQ_REQID_DEVICE_MASK 0x00f8UL |
936 | #define MSIQ_REQID_DEVICE_SHIFT 3 |
937 | #define MSIQ_REQID_FUNC_MASK 0x0007UL |
938 | #define MSIQ_REQID_FUNC_SHIFT 0 |
939 | |
940 | u64 msi_address; |
941 | |
942 | /* The format of this value is message type dependent. |
943 | * For MSI bits 15:0 are the data from the MSI packet. |
944 | * For MSI-X bits 31:0 are the data from the MSI packet. |
945 | * For MSG, the message code and message routing code where: |
946 | * bits 39:32 is the bus/device/fn of the msg target-id |
947 | * bits 18:16 is the message routing code |
948 | * bits 7:0 is the message code |
949 | * For INTx the low order 2-bits are: |
950 | * 00 - INTA |
951 | * 01 - INTB |
952 | * 10 - INTC |
953 | * 11 - INTD |
954 | */ |
955 | u64 msi_data; |
956 | |
957 | u64 reserved2; |
958 | }; |
959 | |
960 | static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
961 | unsigned long *head) |
962 | { |
963 | unsigned long err, limit; |
964 | |
965 | err = pci_sun4v_msiq_gethead(devhandle: pbm->devhandle, msiqid, head); |
966 | if (unlikely(err)) |
967 | return -ENXIO; |
968 | |
969 | limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); |
970 | if (unlikely(*head >= limit)) |
971 | return -EFBIG; |
972 | |
973 | return 0; |
974 | } |
975 | |
976 | static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, |
977 | unsigned long msiqid, unsigned long *head, |
978 | unsigned long *msi) |
979 | { |
980 | struct pci_sun4v_msiq_entry *ep; |
981 | unsigned long err, type; |
982 | |
983 | /* Note: void pointer arithmetic, 'head' is a byte offset */ |
984 | ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * |
985 | (pbm->msiq_ent_count * |
986 | sizeof(struct pci_sun4v_msiq_entry))) + |
987 | *head); |
988 | |
989 | if ((ep->version_type & MSIQ_TYPE_MASK) == 0) |
990 | return 0; |
991 | |
992 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; |
993 | if (unlikely(type != MSIQ_TYPE_MSI32 && |
994 | type != MSIQ_TYPE_MSI64)) |
995 | return -EINVAL; |
996 | |
997 | *msi = ep->msi_data; |
998 | |
999 | err = pci_sun4v_msi_setstate(pbm->devhandle, |
1000 | ep->msi_data /* msi_num */, |
1001 | HV_MSISTATE_IDLE); |
1002 | if (unlikely(err)) |
1003 | return -ENXIO; |
1004 | |
1005 | /* Clear the entry. */ |
1006 | ep->version_type &= ~MSIQ_TYPE_MASK; |
1007 | |
1008 | (*head) += sizeof(struct pci_sun4v_msiq_entry); |
1009 | if (*head >= |
1010 | (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) |
1011 | *head = 0; |
1012 | |
1013 | return 1; |
1014 | } |
1015 | |
1016 | static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
1017 | unsigned long head) |
1018 | { |
1019 | unsigned long err; |
1020 | |
1021 | err = pci_sun4v_msiq_sethead(devhandle: pbm->devhandle, msiqid, head); |
1022 | if (unlikely(err)) |
1023 | return -EINVAL; |
1024 | |
1025 | return 0; |
1026 | } |
1027 | |
1028 | static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, |
1029 | unsigned long msi, int is_msi64) |
1030 | { |
1031 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, |
1032 | (is_msi64 ? |
1033 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) |
1034 | return -ENXIO; |
1035 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) |
1036 | return -ENXIO; |
1037 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) |
1038 | return -ENXIO; |
1039 | return 0; |
1040 | } |
1041 | |
1042 | static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) |
1043 | { |
1044 | unsigned long err, msiqid; |
1045 | |
1046 | err = pci_sun4v_msi_getmsiq(devhandle: pbm->devhandle, msinum: msi, msiq: &msiqid); |
1047 | if (err) |
1048 | return -ENXIO; |
1049 | |
1050 | pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); |
1051 | |
1052 | return 0; |
1053 | } |
1054 | |
1055 | static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) |
1056 | { |
1057 | unsigned long q_size, alloc_size, pages, order; |
1058 | int i; |
1059 | |
1060 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); |
1061 | alloc_size = (pbm->msiq_num * q_size); |
1062 | order = get_order(size: alloc_size); |
1063 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); |
1064 | if (pages == 0UL) { |
1065 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n" , |
1066 | order); |
1067 | return -ENOMEM; |
1068 | } |
1069 | memset((char *)pages, 0, PAGE_SIZE << order); |
1070 | pbm->msi_queues = (void *) pages; |
1071 | |
1072 | for (i = 0; i < pbm->msiq_num; i++) { |
1073 | unsigned long err, base = __pa(pages + (i * q_size)); |
1074 | unsigned long ret1, ret2; |
1075 | |
1076 | err = pci_sun4v_msiq_conf(devhandle: pbm->devhandle, |
1077 | msiqid: pbm->msiq_first + i, |
1078 | msiq_paddr: base, num_entries: pbm->msiq_ent_count); |
1079 | if (err) { |
1080 | printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n" , |
1081 | err); |
1082 | goto h_error; |
1083 | } |
1084 | |
1085 | err = pci_sun4v_msiq_info(devhandle: pbm->devhandle, |
1086 | msiqid: pbm->msiq_first + i, |
1087 | msiq_paddr: &ret1, num_entries: &ret2); |
1088 | if (err) { |
1089 | printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n" , |
1090 | err); |
1091 | goto h_error; |
1092 | } |
1093 | if (ret1 != base || ret2 != pbm->msiq_ent_count) { |
1094 | printk(KERN_ERR "MSI: Bogus qconf " |
1095 | "expected[%lx:%x] got[%lx:%lx]\n" , |
1096 | base, pbm->msiq_ent_count, |
1097 | ret1, ret2); |
1098 | goto h_error; |
1099 | } |
1100 | } |
1101 | |
1102 | return 0; |
1103 | |
1104 | h_error: |
1105 | free_pages(addr: pages, order); |
1106 | return -EINVAL; |
1107 | } |
1108 | |
1109 | static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) |
1110 | { |
1111 | unsigned long q_size, alloc_size, pages, order; |
1112 | int i; |
1113 | |
1114 | for (i = 0; i < pbm->msiq_num; i++) { |
1115 | unsigned long msiqid = pbm->msiq_first + i; |
1116 | |
1117 | (void) pci_sun4v_msiq_conf(devhandle: pbm->devhandle, msiqid, msiq_paddr: 0UL, num_entries: 0); |
1118 | } |
1119 | |
1120 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); |
1121 | alloc_size = (pbm->msiq_num * q_size); |
1122 | order = get_order(size: alloc_size); |
1123 | |
1124 | pages = (unsigned long) pbm->msi_queues; |
1125 | |
1126 | free_pages(addr: pages, order); |
1127 | |
1128 | pbm->msi_queues = NULL; |
1129 | } |
1130 | |
1131 | static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, |
1132 | unsigned long msiqid, |
1133 | unsigned long devino) |
1134 | { |
1135 | unsigned int irq = sun4v_build_irq(pbm->devhandle, devino); |
1136 | |
1137 | if (!irq) |
1138 | return -ENOMEM; |
1139 | |
1140 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) |
1141 | return -EINVAL; |
1142 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) |
1143 | return -EINVAL; |
1144 | |
1145 | return irq; |
1146 | } |
1147 | |
1148 | static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { |
1149 | .get_head = pci_sun4v_get_head, |
1150 | .dequeue_msi = pci_sun4v_dequeue_msi, |
1151 | .set_head = pci_sun4v_set_head, |
1152 | .msi_setup = pci_sun4v_msi_setup, |
1153 | .msi_teardown = pci_sun4v_msi_teardown, |
1154 | .msiq_alloc = pci_sun4v_msiq_alloc, |
1155 | .msiq_free = pci_sun4v_msiq_free, |
1156 | .msiq_build_irq = pci_sun4v_msiq_build_irq, |
1157 | }; |
1158 | |
1159 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) |
1160 | { |
1161 | sparc64_pbm_msi_init(pbm, ops: &pci_sun4v_msiq_ops); |
1162 | } |
1163 | #else /* CONFIG_PCI_MSI */ |
1164 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) |
1165 | { |
1166 | } |
1167 | #endif /* !(CONFIG_PCI_MSI) */ |
1168 | |
1169 | static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, |
1170 | struct platform_device *op, u32 devhandle) |
1171 | { |
1172 | struct device_node *dp = op->dev.of_node; |
1173 | int err; |
1174 | |
1175 | pbm->numa_node = of_node_to_nid(np: dp); |
1176 | |
1177 | pbm->pci_ops = &sun4v_pci_ops; |
1178 | pbm->config_space_reg_bits = 12; |
1179 | |
1180 | pbm->index = pci_num_pbms++; |
1181 | |
1182 | pbm->op = op; |
1183 | |
1184 | pbm->devhandle = devhandle; |
1185 | |
1186 | pbm->name = dp->full_name; |
1187 | |
1188 | printk("%s: SUN4V PCI Bus Module\n" , pbm->name); |
1189 | printk("%s: On NUMA node %d\n" , pbm->name, pbm->numa_node); |
1190 | |
1191 | pci_determine_mem_io_space(pbm); |
1192 | |
1193 | pci_get_pbm_props(pbm); |
1194 | |
1195 | err = pci_sun4v_iommu_init(pbm); |
1196 | if (err) |
1197 | return err; |
1198 | |
1199 | pci_sun4v_msi_init(pbm); |
1200 | |
1201 | pci_sun4v_scan_bus(pbm, parent: &op->dev); |
1202 | |
1203 | /* if atu_init fails its not complete failure. |
1204 | * we can still continue using legacy iommu. |
1205 | */ |
1206 | if (pbm->iommu->atu) { |
1207 | err = pci_sun4v_atu_init(pbm); |
1208 | if (err) { |
1209 | kfree(objp: pbm->iommu->atu); |
1210 | pbm->iommu->atu = NULL; |
1211 | pr_err(PFX "ATU init failed, err=%d\n" , err); |
1212 | } |
1213 | } |
1214 | |
1215 | pbm->next = pci_pbm_root; |
1216 | pci_pbm_root = pbm; |
1217 | |
1218 | return 0; |
1219 | } |
1220 | |
1221 | static int pci_sun4v_probe(struct platform_device *op) |
1222 | { |
1223 | const struct linux_prom64_registers *regs; |
1224 | static int hvapi_negotiated = 0; |
1225 | struct pci_pbm_info *pbm; |
1226 | struct device_node *dp; |
1227 | struct iommu *iommu; |
1228 | struct atu *atu; |
1229 | u32 devhandle; |
1230 | int i, err = -ENODEV; |
1231 | static bool hv_atu = true; |
1232 | |
1233 | dp = op->dev.of_node; |
1234 | |
1235 | if (!hvapi_negotiated++) { |
1236 | for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) { |
1237 | vpci_major = vpci_versions[i].major; |
1238 | vpci_minor = vpci_versions[i].minor; |
1239 | |
1240 | err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major, |
1241 | &vpci_minor); |
1242 | if (!err) |
1243 | break; |
1244 | } |
1245 | |
1246 | if (err) { |
1247 | pr_err(PFX "Could not register hvapi, err=%d\n" , err); |
1248 | return err; |
1249 | } |
1250 | pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n" , |
1251 | vpci_major, vpci_minor); |
1252 | |
1253 | err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); |
1254 | if (err) { |
1255 | /* don't return an error if we fail to register the |
1256 | * ATU group, but ATU hcalls won't be available. |
1257 | */ |
1258 | hv_atu = false; |
1259 | } else { |
1260 | pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n" , |
1261 | vatu_major, vatu_minor); |
1262 | } |
1263 | |
1264 | dma_ops = &sun4v_dma_ops; |
1265 | } |
1266 | |
1267 | regs = of_get_property(node: dp, name: "reg" , NULL); |
1268 | err = -ENODEV; |
1269 | if (!regs) { |
1270 | printk(KERN_ERR PFX "Could not find config registers\n" ); |
1271 | goto out_err; |
1272 | } |
1273 | devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; |
1274 | |
1275 | err = -ENOMEM; |
1276 | if (!iommu_batch_initialized) { |
1277 | for_each_possible_cpu(i) { |
1278 | unsigned long page = get_zeroed_page(GFP_KERNEL); |
1279 | |
1280 | if (!page) |
1281 | goto out_err; |
1282 | |
1283 | per_cpu(iommu_batch, i).pglist = (u64 *) page; |
1284 | } |
1285 | iommu_batch_initialized = 1; |
1286 | } |
1287 | |
1288 | pbm = kzalloc(size: sizeof(*pbm), GFP_KERNEL); |
1289 | if (!pbm) { |
1290 | printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n" ); |
1291 | goto out_err; |
1292 | } |
1293 | |
1294 | iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); |
1295 | if (!iommu) { |
1296 | printk(KERN_ERR PFX "Could not allocate pbm iommu\n" ); |
1297 | goto out_free_controller; |
1298 | } |
1299 | |
1300 | pbm->iommu = iommu; |
1301 | iommu->atu = NULL; |
1302 | if (hv_atu) { |
1303 | atu = kzalloc(sizeof(*atu), GFP_KERNEL); |
1304 | if (!atu) |
1305 | pr_err(PFX "Could not allocate atu\n" ); |
1306 | else |
1307 | iommu->atu = atu; |
1308 | } |
1309 | |
1310 | err = pci_sun4v_pbm_init(pbm, op, devhandle); |
1311 | if (err) |
1312 | goto out_free_iommu; |
1313 | |
1314 | dev_set_drvdata(dev: &op->dev, data: pbm); |
1315 | |
1316 | return 0; |
1317 | |
1318 | out_free_iommu: |
1319 | kfree(objp: iommu->atu); |
1320 | kfree(objp: pbm->iommu); |
1321 | |
1322 | out_free_controller: |
1323 | kfree(objp: pbm); |
1324 | |
1325 | out_err: |
1326 | return err; |
1327 | } |
1328 | |
1329 | static const struct of_device_id pci_sun4v_match[] = { |
1330 | { |
1331 | .name = "pci" , |
1332 | .compatible = "SUNW,sun4v-pci" , |
1333 | }, |
1334 | {}, |
1335 | }; |
1336 | |
1337 | static struct platform_driver pci_sun4v_driver = { |
1338 | .driver = { |
1339 | .name = DRIVER_NAME, |
1340 | .of_match_table = pci_sun4v_match, |
1341 | }, |
1342 | .probe = pci_sun4v_probe, |
1343 | }; |
1344 | |
1345 | static int __init pci_sun4v_init(void) |
1346 | { |
1347 | return platform_driver_register(&pci_sun4v_driver); |
1348 | } |
1349 | |
1350 | subsys_initcall(pci_sun4v_init); |
1351 | |