1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* iommu.c: Generic sparc64 IOMMU support. |
3 | * |
4 | * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) |
6 | */ |
7 | |
8 | #include <linux/kernel.h> |
9 | #include <linux/export.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/device.h> |
13 | #include <linux/dma-map-ops.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/iommu-helper.h> |
16 | #include <linux/bitmap.h> |
17 | #include <asm/iommu-common.h> |
18 | |
19 | #ifdef CONFIG_PCI |
20 | #include <linux/pci.h> |
21 | #endif |
22 | |
23 | #include <asm/iommu.h> |
24 | |
25 | #include "iommu_common.h" |
26 | #include "kernel.h" |
27 | |
28 | #define STC_CTXMATCH_ADDR(STC, CTX) \ |
29 | ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) |
30 | #define STC_FLUSHFLAG_INIT(STC) \ |
31 | (*((STC)->strbuf_flushflag) = 0UL) |
32 | #define STC_FLUSHFLAG_SET(STC) \ |
33 | (*((STC)->strbuf_flushflag) != 0UL) |
34 | |
35 | #define iommu_read(__reg) \ |
36 | ({ u64 __ret; \ |
37 | __asm__ __volatile__("ldxa [%1] %2, %0" \ |
38 | : "=r" (__ret) \ |
39 | : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ |
40 | : "memory"); \ |
41 | __ret; \ |
42 | }) |
43 | #define iommu_write(__reg, __val) \ |
44 | __asm__ __volatile__("stxa %0, [%1] %2" \ |
45 | : /* no outputs */ \ |
46 | : "r" (__val), "r" (__reg), \ |
47 | "i" (ASI_PHYS_BYPASS_EC_E)) |
48 | |
49 | /* Must be invoked under the IOMMU lock. */ |
50 | static void iommu_flushall(struct iommu_map_table *iommu_map_table) |
51 | { |
52 | struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); |
53 | if (iommu->iommu_flushinv) { |
54 | iommu_write(iommu->iommu_flushinv, ~(u64)0); |
55 | } else { |
56 | unsigned long tag; |
57 | int entry; |
58 | |
59 | tag = iommu->iommu_tags; |
60 | for (entry = 0; entry < 16; entry++) { |
61 | iommu_write(tag, 0); |
62 | tag += 8; |
63 | } |
64 | |
65 | /* Ensure completion of previous PIO writes. */ |
66 | (void) iommu_read(iommu->write_complete_reg); |
67 | } |
68 | } |
69 | |
70 | #define IOPTE_CONSISTENT(CTX) \ |
71 | (IOPTE_VALID | IOPTE_CACHE | \ |
72 | (((CTX) << 47) & IOPTE_CONTEXT)) |
73 | |
74 | #define IOPTE_STREAMING(CTX) \ |
75 | (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF) |
76 | |
77 | /* Existing mappings are never marked invalid, instead they |
78 | * are pointed to a dummy page. |
79 | */ |
80 | #define IOPTE_IS_DUMMY(iommu, iopte) \ |
81 | ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) |
82 | |
83 | static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) |
84 | { |
85 | unsigned long val = iopte_val(*iopte); |
86 | |
87 | val &= ~IOPTE_PAGE; |
88 | val |= iommu->dummy_page_pa; |
89 | |
90 | iopte_val(*iopte) = val; |
91 | } |
92 | |
93 | int iommu_table_init(struct iommu *iommu, int tsbsize, |
94 | u32 dma_offset, u32 dma_addr_mask, |
95 | int numa_node) |
96 | { |
97 | unsigned long i, order, sz, num_tsb_entries; |
98 | struct page *page; |
99 | |
100 | num_tsb_entries = tsbsize / sizeof(iopte_t); |
101 | |
102 | /* Setup initial software IOMMU state. */ |
103 | spin_lock_init(&iommu->lock); |
104 | iommu->ctx_lowest_free = 1; |
105 | iommu->tbl.table_map_base = dma_offset; |
106 | iommu->dma_addr_mask = dma_addr_mask; |
107 | |
108 | /* Allocate and initialize the free area map. */ |
109 | sz = num_tsb_entries / 8; |
110 | sz = (sz + 7UL) & ~7UL; |
111 | iommu->tbl.map = kzalloc_node(size: sz, GFP_KERNEL, node: numa_node); |
112 | if (!iommu->tbl.map) |
113 | return -ENOMEM; |
114 | |
115 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, |
116 | (tlb_type != hypervisor ? iommu_flushall : NULL), |
117 | false, 1, false); |
118 | |
119 | /* Allocate and initialize the dummy page which we |
120 | * set inactive IO PTEs to point to. |
121 | */ |
122 | page = alloc_pages_node(nid: numa_node, GFP_KERNEL, order: 0); |
123 | if (!page) { |
124 | printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n" ); |
125 | goto out_free_map; |
126 | } |
127 | iommu->dummy_page = (unsigned long) page_address(page); |
128 | memset((void *)iommu->dummy_page, 0, PAGE_SIZE); |
129 | iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); |
130 | |
131 | /* Now allocate and setup the IOMMU page table itself. */ |
132 | order = get_order(size: tsbsize); |
133 | page = alloc_pages_node(nid: numa_node, GFP_KERNEL, order); |
134 | if (!page) { |
135 | printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n" ); |
136 | goto out_free_dummy_page; |
137 | } |
138 | iommu->page_table = (iopte_t *)page_address(page); |
139 | |
140 | for (i = 0; i < num_tsb_entries; i++) |
141 | iopte_make_dummy(iommu, iopte: &iommu->page_table[i]); |
142 | |
143 | return 0; |
144 | |
145 | out_free_dummy_page: |
146 | free_page(iommu->dummy_page); |
147 | iommu->dummy_page = 0UL; |
148 | |
149 | out_free_map: |
150 | kfree(objp: iommu->tbl.map); |
151 | iommu->tbl.map = NULL; |
152 | |
153 | return -ENOMEM; |
154 | } |
155 | |
156 | static inline iopte_t *alloc_npages(struct device *dev, |
157 | struct iommu *iommu, |
158 | unsigned long npages) |
159 | { |
160 | unsigned long entry; |
161 | |
162 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
163 | (unsigned long)(-1), 0); |
164 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
165 | return NULL; |
166 | |
167 | return iommu->page_table + entry; |
168 | } |
169 | |
170 | static int iommu_alloc_ctx(struct iommu *iommu) |
171 | { |
172 | int lowest = iommu->ctx_lowest_free; |
173 | int n = find_next_zero_bit(addr: iommu->ctx_bitmap, size: IOMMU_NUM_CTXS, offset: lowest); |
174 | |
175 | if (unlikely(n == IOMMU_NUM_CTXS)) { |
176 | n = find_next_zero_bit(addr: iommu->ctx_bitmap, size: lowest, offset: 1); |
177 | if (unlikely(n == lowest)) { |
178 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n" ); |
179 | n = 0; |
180 | } |
181 | } |
182 | if (n) |
183 | __set_bit(n, iommu->ctx_bitmap); |
184 | |
185 | return n; |
186 | } |
187 | |
188 | static inline void iommu_free_ctx(struct iommu *iommu, int ctx) |
189 | { |
190 | if (likely(ctx)) { |
191 | __clear_bit(ctx, iommu->ctx_bitmap); |
192 | if (ctx < iommu->ctx_lowest_free) |
193 | iommu->ctx_lowest_free = ctx; |
194 | } |
195 | } |
196 | |
197 | static void *dma_4u_alloc_coherent(struct device *dev, size_t size, |
198 | dma_addr_t *dma_addrp, gfp_t gfp, |
199 | unsigned long attrs) |
200 | { |
201 | unsigned long order, first_page; |
202 | struct iommu *iommu; |
203 | struct page *page; |
204 | int npages, nid; |
205 | iopte_t *iopte; |
206 | void *ret; |
207 | |
208 | size = IO_PAGE_ALIGN(size); |
209 | order = get_order(size); |
210 | if (order >= 10) |
211 | return NULL; |
212 | |
213 | nid = dev->archdata.numa_node; |
214 | page = alloc_pages_node(nid, gfp_mask: gfp, order); |
215 | if (unlikely(!page)) |
216 | return NULL; |
217 | |
218 | first_page = (unsigned long) page_address(page); |
219 | memset((char *)first_page, 0, PAGE_SIZE << order); |
220 | |
221 | iommu = dev->archdata.iommu; |
222 | |
223 | iopte = alloc_npages(dev, iommu, npages: size >> IO_PAGE_SHIFT); |
224 | |
225 | if (unlikely(iopte == NULL)) { |
226 | free_pages(addr: first_page, order); |
227 | return NULL; |
228 | } |
229 | |
230 | *dma_addrp = (iommu->tbl.table_map_base + |
231 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); |
232 | ret = (void *) first_page; |
233 | npages = size >> IO_PAGE_SHIFT; |
234 | first_page = __pa(first_page); |
235 | while (npages--) { |
236 | iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | |
237 | IOPTE_WRITE | |
238 | (first_page & IOPTE_PAGE)); |
239 | iopte++; |
240 | first_page += IO_PAGE_SIZE; |
241 | } |
242 | |
243 | return ret; |
244 | } |
245 | |
246 | static void dma_4u_free_coherent(struct device *dev, size_t size, |
247 | void *cpu, dma_addr_t dvma, |
248 | unsigned long attrs) |
249 | { |
250 | struct iommu *iommu; |
251 | unsigned long order, npages; |
252 | |
253 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
254 | iommu = dev->archdata.iommu; |
255 | |
256 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); |
257 | |
258 | order = get_order(size); |
259 | if (order < 10) |
260 | free_pages(addr: (unsigned long)cpu, order); |
261 | } |
262 | |
263 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
264 | unsigned long offset, size_t sz, |
265 | enum dma_data_direction direction, |
266 | unsigned long attrs) |
267 | { |
268 | struct iommu *iommu; |
269 | struct strbuf *strbuf; |
270 | iopte_t *base; |
271 | unsigned long flags, npages, oaddr; |
272 | unsigned long i, base_paddr, ctx; |
273 | u32 bus_addr, ret; |
274 | unsigned long iopte_protection; |
275 | |
276 | iommu = dev->archdata.iommu; |
277 | strbuf = dev->archdata.stc; |
278 | |
279 | if (unlikely(direction == DMA_NONE)) |
280 | goto bad_no_ctx; |
281 | |
282 | oaddr = (unsigned long)(page_address(page) + offset); |
283 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
284 | npages >>= IO_PAGE_SHIFT; |
285 | |
286 | base = alloc_npages(dev, iommu, npages); |
287 | spin_lock_irqsave(&iommu->lock, flags); |
288 | ctx = 0; |
289 | if (iommu->iommu_ctxflush) |
290 | ctx = iommu_alloc_ctx(iommu); |
291 | spin_unlock_irqrestore(lock: &iommu->lock, flags); |
292 | |
293 | if (unlikely(!base)) |
294 | goto bad; |
295 | |
296 | bus_addr = (iommu->tbl.table_map_base + |
297 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); |
298 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
299 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
300 | if (strbuf->strbuf_enabled) |
301 | iopte_protection = IOPTE_STREAMING(ctx); |
302 | else |
303 | iopte_protection = IOPTE_CONSISTENT(ctx); |
304 | if (direction != DMA_TO_DEVICE) |
305 | iopte_protection |= IOPTE_WRITE; |
306 | |
307 | for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) |
308 | iopte_val(*base) = iopte_protection | base_paddr; |
309 | |
310 | return ret; |
311 | |
312 | bad: |
313 | iommu_free_ctx(iommu, ctx); |
314 | bad_no_ctx: |
315 | if (printk_ratelimit()) |
316 | WARN_ON(1); |
317 | return DMA_MAPPING_ERROR; |
318 | } |
319 | |
320 | static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, |
321 | u32 vaddr, unsigned long ctx, unsigned long npages, |
322 | enum dma_data_direction direction) |
323 | { |
324 | int limit; |
325 | |
326 | if (strbuf->strbuf_ctxflush && |
327 | iommu->iommu_ctxflush) { |
328 | unsigned long matchreg, flushreg; |
329 | u64 val; |
330 | |
331 | flushreg = strbuf->strbuf_ctxflush; |
332 | matchreg = STC_CTXMATCH_ADDR(strbuf, ctx); |
333 | |
334 | iommu_write(flushreg, ctx); |
335 | val = iommu_read(matchreg); |
336 | val &= 0xffff; |
337 | if (!val) |
338 | goto do_flush_sync; |
339 | |
340 | while (val) { |
341 | if (val & 0x1) |
342 | iommu_write(flushreg, ctx); |
343 | val >>= 1; |
344 | } |
345 | val = iommu_read(matchreg); |
346 | if (unlikely(val)) { |
347 | printk(KERN_WARNING "strbuf_flush: ctx flush " |
348 | "timeout matchreg[%llx] ctx[%lx]\n" , |
349 | val, ctx); |
350 | goto do_page_flush; |
351 | } |
352 | } else { |
353 | unsigned long i; |
354 | |
355 | do_page_flush: |
356 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) |
357 | iommu_write(strbuf->strbuf_pflush, vaddr); |
358 | } |
359 | |
360 | do_flush_sync: |
361 | /* If the device could not have possibly put dirty data into |
362 | * the streaming cache, no flush-flag synchronization needs |
363 | * to be performed. |
364 | */ |
365 | if (direction == DMA_TO_DEVICE) |
366 | return; |
367 | |
368 | STC_FLUSHFLAG_INIT(strbuf); |
369 | iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); |
370 | (void) iommu_read(iommu->write_complete_reg); |
371 | |
372 | limit = 100000; |
373 | while (!STC_FLUSHFLAG_SET(strbuf)) { |
374 | limit--; |
375 | if (!limit) |
376 | break; |
377 | udelay(1); |
378 | rmb(); |
379 | } |
380 | if (!limit) |
381 | printk(KERN_WARNING "strbuf_flush: flushflag timeout " |
382 | "vaddr[%08x] ctx[%lx] npages[%ld]\n" , |
383 | vaddr, ctx, npages); |
384 | } |
385 | |
386 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
387 | size_t sz, enum dma_data_direction direction, |
388 | unsigned long attrs) |
389 | { |
390 | struct iommu *iommu; |
391 | struct strbuf *strbuf; |
392 | iopte_t *base; |
393 | unsigned long flags, npages, ctx, i; |
394 | |
395 | if (unlikely(direction == DMA_NONE)) { |
396 | if (printk_ratelimit()) |
397 | WARN_ON(1); |
398 | return; |
399 | } |
400 | |
401 | iommu = dev->archdata.iommu; |
402 | strbuf = dev->archdata.stc; |
403 | |
404 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
405 | npages >>= IO_PAGE_SHIFT; |
406 | base = iommu->page_table + |
407 | ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); |
408 | bus_addr &= IO_PAGE_MASK; |
409 | |
410 | spin_lock_irqsave(&iommu->lock, flags); |
411 | |
412 | /* Record the context, if any. */ |
413 | ctx = 0; |
414 | if (iommu->iommu_ctxflush) |
415 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; |
416 | |
417 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
418 | if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
419 | strbuf_flush(strbuf, iommu, vaddr: bus_addr, ctx, |
420 | npages, direction); |
421 | |
422 | /* Step 2: Clear out TSB entries. */ |
423 | for (i = 0; i < npages; i++) |
424 | iopte_make_dummy(iommu, base + i); |
425 | |
426 | iommu_free_ctx(iommu, ctx); |
427 | spin_unlock_irqrestore(lock: &iommu->lock, flags); |
428 | |
429 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
430 | } |
431 | |
432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
433 | int nelems, enum dma_data_direction direction, |
434 | unsigned long attrs) |
435 | { |
436 | struct scatterlist *s, *outs, *segstart; |
437 | unsigned long flags, handle, prot, ctx; |
438 | dma_addr_t dma_next = 0, dma_addr; |
439 | unsigned int max_seg_size; |
440 | unsigned long seg_boundary_size; |
441 | int outcount, incount, i; |
442 | struct strbuf *strbuf; |
443 | struct iommu *iommu; |
444 | unsigned long base_shift; |
445 | |
446 | BUG_ON(direction == DMA_NONE); |
447 | |
448 | iommu = dev->archdata.iommu; |
449 | strbuf = dev->archdata.stc; |
450 | if (nelems == 0 || !iommu) |
451 | return -EINVAL; |
452 | |
453 | spin_lock_irqsave(&iommu->lock, flags); |
454 | |
455 | ctx = 0; |
456 | if (iommu->iommu_ctxflush) |
457 | ctx = iommu_alloc_ctx(iommu); |
458 | |
459 | if (strbuf->strbuf_enabled) |
460 | prot = IOPTE_STREAMING(ctx); |
461 | else |
462 | prot = IOPTE_CONSISTENT(ctx); |
463 | if (direction != DMA_TO_DEVICE) |
464 | prot |= IOPTE_WRITE; |
465 | |
466 | outs = s = segstart = &sglist[0]; |
467 | outcount = 1; |
468 | incount = nelems; |
469 | handle = 0; |
470 | |
471 | /* Init first segment length for backout at failure */ |
472 | outs->dma_length = 0; |
473 | |
474 | max_seg_size = dma_get_max_seg_size(dev); |
475 | seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); |
476 | base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; |
477 | for_each_sg(sglist, s, nelems, i) { |
478 | unsigned long paddr, npages, entry, out_entry = 0, slen; |
479 | iopte_t *base; |
480 | |
481 | slen = s->length; |
482 | /* Sanity check */ |
483 | if (slen == 0) { |
484 | dma_next = 0; |
485 | continue; |
486 | } |
487 | /* Allocate iommu entries for that segment */ |
488 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); |
489 | npages = iommu_num_pages(addr: paddr, len: slen, IO_PAGE_SIZE); |
490 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, |
491 | &handle, (unsigned long)(-1), 0); |
492 | |
493 | /* Handle failure */ |
494 | if (unlikely(entry == IOMMU_ERROR_CODE)) { |
495 | if (printk_ratelimit()) |
496 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" |
497 | " npages %lx\n" , iommu, paddr, npages); |
498 | goto iommu_map_failed; |
499 | } |
500 | |
501 | base = iommu->page_table + entry; |
502 | |
503 | /* Convert entry to a dma_addr_t */ |
504 | dma_addr = iommu->tbl.table_map_base + |
505 | (entry << IO_PAGE_SHIFT); |
506 | dma_addr |= (s->offset & ~IO_PAGE_MASK); |
507 | |
508 | /* Insert into HW table */ |
509 | paddr &= IO_PAGE_MASK; |
510 | while (npages--) { |
511 | iopte_val(*base) = prot | paddr; |
512 | base++; |
513 | paddr += IO_PAGE_SIZE; |
514 | } |
515 | |
516 | /* If we are in an open segment, try merging */ |
517 | if (segstart != s) { |
518 | /* We cannot merge if: |
519 | * - allocated dma_addr isn't contiguous to previous allocation |
520 | */ |
521 | if ((dma_addr != dma_next) || |
522 | (outs->dma_length + s->length > max_seg_size) || |
523 | (is_span_boundary(entry: out_entry, shift: base_shift, |
524 | boundary_size: seg_boundary_size, outs, sg: s))) { |
525 | /* Can't merge: create a new segment */ |
526 | segstart = s; |
527 | outcount++; |
528 | outs = sg_next(outs); |
529 | } else { |
530 | outs->dma_length += s->length; |
531 | } |
532 | } |
533 | |
534 | if (segstart == s) { |
535 | /* This is a new segment, fill entries */ |
536 | outs->dma_address = dma_addr; |
537 | outs->dma_length = slen; |
538 | out_entry = entry; |
539 | } |
540 | |
541 | /* Calculate next page pointer for contiguous check */ |
542 | dma_next = dma_addr + slen; |
543 | } |
544 | |
545 | spin_unlock_irqrestore(lock: &iommu->lock, flags); |
546 | |
547 | if (outcount < incount) { |
548 | outs = sg_next(outs); |
549 | outs->dma_length = 0; |
550 | } |
551 | |
552 | return outcount; |
553 | |
554 | iommu_map_failed: |
555 | for_each_sg(sglist, s, nelems, i) { |
556 | if (s->dma_length != 0) { |
557 | unsigned long vaddr, npages, entry, j; |
558 | iopte_t *base; |
559 | |
560 | vaddr = s->dma_address & IO_PAGE_MASK; |
561 | npages = iommu_num_pages(addr: s->dma_address, len: s->dma_length, |
562 | IO_PAGE_SIZE); |
563 | |
564 | entry = (vaddr - iommu->tbl.table_map_base) |
565 | >> IO_PAGE_SHIFT; |
566 | base = iommu->page_table + entry; |
567 | |
568 | for (j = 0; j < npages; j++) |
569 | iopte_make_dummy(iommu, base + j); |
570 | |
571 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, |
572 | IOMMU_ERROR_CODE); |
573 | |
574 | s->dma_length = 0; |
575 | } |
576 | if (s == outs) |
577 | break; |
578 | } |
579 | spin_unlock_irqrestore(lock: &iommu->lock, flags); |
580 | |
581 | return -EINVAL; |
582 | } |
583 | |
584 | /* If contexts are being used, they are the same in all of the mappings |
585 | * we make for a particular SG. |
586 | */ |
587 | static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) |
588 | { |
589 | unsigned long ctx = 0; |
590 | |
591 | if (iommu->iommu_ctxflush) { |
592 | iopte_t *base; |
593 | u32 bus_addr; |
594 | struct iommu_map_table *tbl = &iommu->tbl; |
595 | |
596 | bus_addr = sg->dma_address & IO_PAGE_MASK; |
597 | base = iommu->page_table + |
598 | ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); |
599 | |
600 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; |
601 | } |
602 | return ctx; |
603 | } |
604 | |
605 | static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, |
606 | int nelems, enum dma_data_direction direction, |
607 | unsigned long attrs) |
608 | { |
609 | unsigned long flags, ctx; |
610 | struct scatterlist *sg; |
611 | struct strbuf *strbuf; |
612 | struct iommu *iommu; |
613 | |
614 | BUG_ON(direction == DMA_NONE); |
615 | |
616 | iommu = dev->archdata.iommu; |
617 | strbuf = dev->archdata.stc; |
618 | |
619 | ctx = fetch_sg_ctx(iommu, sg: sglist); |
620 | |
621 | spin_lock_irqsave(&iommu->lock, flags); |
622 | |
623 | sg = sglist; |
624 | while (nelems--) { |
625 | dma_addr_t dma_handle = sg->dma_address; |
626 | unsigned int len = sg->dma_length; |
627 | unsigned long npages, entry; |
628 | iopte_t *base; |
629 | int i; |
630 | |
631 | if (!len) |
632 | break; |
633 | npages = iommu_num_pages(addr: dma_handle, len, IO_PAGE_SIZE); |
634 | |
635 | entry = ((dma_handle - iommu->tbl.table_map_base) |
636 | >> IO_PAGE_SHIFT); |
637 | base = iommu->page_table + entry; |
638 | |
639 | dma_handle &= IO_PAGE_MASK; |
640 | if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
641 | strbuf_flush(strbuf, iommu, vaddr: dma_handle, ctx, |
642 | npages, direction); |
643 | |
644 | for (i = 0; i < npages; i++) |
645 | iopte_make_dummy(iommu, base + i); |
646 | |
647 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, |
648 | IOMMU_ERROR_CODE); |
649 | sg = sg_next(sg); |
650 | } |
651 | |
652 | iommu_free_ctx(iommu, ctx); |
653 | |
654 | spin_unlock_irqrestore(lock: &iommu->lock, flags); |
655 | } |
656 | |
657 | static void dma_4u_sync_single_for_cpu(struct device *dev, |
658 | dma_addr_t bus_addr, size_t sz, |
659 | enum dma_data_direction direction) |
660 | { |
661 | struct iommu *iommu; |
662 | struct strbuf *strbuf; |
663 | unsigned long flags, ctx, npages; |
664 | |
665 | iommu = dev->archdata.iommu; |
666 | strbuf = dev->archdata.stc; |
667 | |
668 | if (!strbuf->strbuf_enabled) |
669 | return; |
670 | |
671 | spin_lock_irqsave(&iommu->lock, flags); |
672 | |
673 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
674 | npages >>= IO_PAGE_SHIFT; |
675 | bus_addr &= IO_PAGE_MASK; |
676 | |
677 | /* Step 1: Record the context, if any. */ |
678 | ctx = 0; |
679 | if (iommu->iommu_ctxflush && |
680 | strbuf->strbuf_ctxflush) { |
681 | iopte_t *iopte; |
682 | struct iommu_map_table *tbl = &iommu->tbl; |
683 | |
684 | iopte = iommu->page_table + |
685 | ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); |
686 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; |
687 | } |
688 | |
689 | /* Step 2: Kick data out of streaming buffers. */ |
690 | strbuf_flush(strbuf, iommu, vaddr: bus_addr, ctx, npages, direction); |
691 | |
692 | spin_unlock_irqrestore(lock: &iommu->lock, flags); |
693 | } |
694 | |
695 | static void dma_4u_sync_sg_for_cpu(struct device *dev, |
696 | struct scatterlist *sglist, int nelems, |
697 | enum dma_data_direction direction) |
698 | { |
699 | struct iommu *iommu; |
700 | struct strbuf *strbuf; |
701 | unsigned long flags, ctx, npages, i; |
702 | struct scatterlist *sg, *sgprv; |
703 | u32 bus_addr; |
704 | |
705 | iommu = dev->archdata.iommu; |
706 | strbuf = dev->archdata.stc; |
707 | |
708 | if (!strbuf->strbuf_enabled) |
709 | return; |
710 | |
711 | spin_lock_irqsave(&iommu->lock, flags); |
712 | |
713 | /* Step 1: Record the context, if any. */ |
714 | ctx = 0; |
715 | if (iommu->iommu_ctxflush && |
716 | strbuf->strbuf_ctxflush) { |
717 | iopte_t *iopte; |
718 | struct iommu_map_table *tbl = &iommu->tbl; |
719 | |
720 | iopte = iommu->page_table + ((sglist[0].dma_address - |
721 | tbl->table_map_base) >> IO_PAGE_SHIFT); |
722 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; |
723 | } |
724 | |
725 | /* Step 2: Kick data out of streaming buffers. */ |
726 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; |
727 | sgprv = NULL; |
728 | for_each_sg(sglist, sg, nelems, i) { |
729 | if (sg->dma_length == 0) |
730 | break; |
731 | sgprv = sg; |
732 | } |
733 | |
734 | npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) |
735 | - bus_addr) >> IO_PAGE_SHIFT; |
736 | strbuf_flush(strbuf, iommu, vaddr: bus_addr, ctx, npages, direction); |
737 | |
738 | spin_unlock_irqrestore(lock: &iommu->lock, flags); |
739 | } |
740 | |
741 | static int dma_4u_supported(struct device *dev, u64 device_mask) |
742 | { |
743 | struct iommu *iommu = dev->archdata.iommu; |
744 | |
745 | if (ali_sound_dma_hack(dev, device_mask)) |
746 | return 1; |
747 | |
748 | if (device_mask < iommu->dma_addr_mask) |
749 | return 0; |
750 | return 1; |
751 | } |
752 | |
753 | static const struct dma_map_ops sun4u_dma_ops = { |
754 | .alloc = dma_4u_alloc_coherent, |
755 | .free = dma_4u_free_coherent, |
756 | .map_page = dma_4u_map_page, |
757 | .unmap_page = dma_4u_unmap_page, |
758 | .map_sg = dma_4u_map_sg, |
759 | .unmap_sg = dma_4u_unmap_sg, |
760 | .sync_single_for_cpu = dma_4u_sync_single_for_cpu, |
761 | .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, |
762 | .dma_supported = dma_4u_supported, |
763 | }; |
764 | |
765 | const struct dma_map_ops *dma_ops = &sun4u_dma_ops; |
766 | EXPORT_SYMBOL(dma_ops); |
767 | |