1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | ** PARISC 1.1 Dynamic DMA mapping support. |
4 | ** This implementation is for PA-RISC platforms that do not support |
5 | ** I/O TLBs (aka DMA address translation hardware). |
6 | ** See Documentation/core-api/dma-api-howto.rst for interface definitions. |
7 | ** |
8 | ** (c) Copyright 1999,2000 Hewlett-Packard Company |
9 | ** (c) Copyright 2000 Grant Grundler |
10 | ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org> |
11 | ** (c) Copyright 2000 John Marvin |
12 | ** |
13 | ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c. |
14 | ** (I assume it's from David Mosberger-Tang but there was no Copyright) |
15 | ** |
16 | ** AFAIK, all PA7100LC and PA7300LC platforms can use this code. |
17 | ** |
18 | ** - ggg |
19 | */ |
20 | |
21 | #include <linux/init.h> |
22 | #include <linux/gfp.h> |
23 | #include <linux/mm.h> |
24 | #include <linux/proc_fs.h> |
25 | #include <linux/seq_file.h> |
26 | #include <linux/string.h> |
27 | #include <linux/types.h> |
28 | #include <linux/dma-direct.h> |
29 | #include <linux/dma-map-ops.h> |
30 | |
31 | #include <asm/cacheflush.h> |
32 | #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ |
33 | #include <asm/io.h> |
34 | #include <asm/page.h> /* get_order */ |
35 | #include <linux/uaccess.h> |
36 | #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ |
37 | |
38 | static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; |
39 | static unsigned long pcxl_used_bytes __read_mostly; |
40 | static unsigned long pcxl_used_pages __read_mostly; |
41 | |
42 | unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */ |
43 | static DEFINE_SPINLOCK(pcxl_res_lock); |
44 | static char *pcxl_res_map; |
45 | static int pcxl_res_hint; |
46 | static int pcxl_res_size; |
47 | |
48 | #ifdef DEBUG_PCXL_RESOURCE |
49 | #define DBG_RES(x...) printk(x) |
50 | #else |
51 | #define DBG_RES(x...) |
52 | #endif |
53 | |
54 | |
55 | /* |
56 | ** Dump a hex representation of the resource map. |
57 | */ |
58 | |
59 | #ifdef DUMP_RESMAP |
60 | static |
61 | void dump_resmap(void) |
62 | { |
63 | u_long *res_ptr = (unsigned long *)pcxl_res_map; |
64 | u_long i = 0; |
65 | |
66 | printk("res_map: " ); |
67 | for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr) |
68 | printk("%08lx " , *res_ptr); |
69 | |
70 | printk("\n" ); |
71 | } |
72 | #else |
73 | static inline void dump_resmap(void) {;} |
74 | #endif |
75 | |
76 | static inline int map_pte_uncached(pte_t * pte, |
77 | unsigned long vaddr, |
78 | unsigned long size, unsigned long *paddr_ptr) |
79 | { |
80 | unsigned long end; |
81 | unsigned long orig_vaddr = vaddr; |
82 | |
83 | vaddr &= ~PMD_MASK; |
84 | end = vaddr + size; |
85 | if (end > PMD_SIZE) |
86 | end = PMD_SIZE; |
87 | do { |
88 | unsigned long flags; |
89 | |
90 | if (!pte_none(pte: *pte)) |
91 | printk(KERN_ERR "map_pte_uncached: page already exists\n" ); |
92 | purge_tlb_start(flags); |
93 | set_pte(ptep: pte, pte: __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); |
94 | pdtlb(SR_KERNEL, orig_vaddr); |
95 | purge_tlb_end(flags); |
96 | vaddr += PAGE_SIZE; |
97 | orig_vaddr += PAGE_SIZE; |
98 | (*paddr_ptr) += PAGE_SIZE; |
99 | pte++; |
100 | } while (vaddr < end); |
101 | return 0; |
102 | } |
103 | |
104 | static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, |
105 | unsigned long size, unsigned long *paddr_ptr) |
106 | { |
107 | unsigned long end; |
108 | unsigned long orig_vaddr = vaddr; |
109 | |
110 | vaddr &= ~PGDIR_MASK; |
111 | end = vaddr + size; |
112 | if (end > PGDIR_SIZE) |
113 | end = PGDIR_SIZE; |
114 | do { |
115 | pte_t * pte = pte_alloc_kernel(pmd, vaddr); |
116 | if (!pte) |
117 | return -ENOMEM; |
118 | if (map_pte_uncached(pte, vaddr: orig_vaddr, size: end - vaddr, paddr_ptr)) |
119 | return -ENOMEM; |
120 | vaddr = (vaddr + PMD_SIZE) & PMD_MASK; |
121 | orig_vaddr += PMD_SIZE; |
122 | pmd++; |
123 | } while (vaddr < end); |
124 | return 0; |
125 | } |
126 | |
127 | static inline int map_uncached_pages(unsigned long vaddr, unsigned long size, |
128 | unsigned long paddr) |
129 | { |
130 | pgd_t * dir; |
131 | unsigned long end = vaddr + size; |
132 | |
133 | dir = pgd_offset_k(vaddr); |
134 | do { |
135 | p4d_t *p4d; |
136 | pud_t *pud; |
137 | pmd_t *pmd; |
138 | |
139 | p4d = p4d_offset(pgd: dir, address: vaddr); |
140 | pud = pud_offset(p4d, address: vaddr); |
141 | pmd = pmd_alloc(NULL, pud, address: vaddr); |
142 | |
143 | if (!pmd) |
144 | return -ENOMEM; |
145 | if (map_pmd_uncached(pmd, vaddr, size: end - vaddr, paddr_ptr: &paddr)) |
146 | return -ENOMEM; |
147 | vaddr = vaddr + PGDIR_SIZE; |
148 | dir++; |
149 | } while (vaddr && (vaddr < end)); |
150 | return 0; |
151 | } |
152 | |
153 | static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, |
154 | unsigned long size) |
155 | { |
156 | pte_t * pte; |
157 | unsigned long end; |
158 | unsigned long orig_vaddr = vaddr; |
159 | |
160 | if (pmd_none(pmd: *pmd)) |
161 | return; |
162 | if (pmd_bad(pmd: *pmd)) { |
163 | pmd_ERROR(*pmd); |
164 | pmd_clear(pmdp: pmd); |
165 | return; |
166 | } |
167 | pte = pte_offset_kernel(pmd, address: vaddr); |
168 | vaddr &= ~PMD_MASK; |
169 | end = vaddr + size; |
170 | if (end > PMD_SIZE) |
171 | end = PMD_SIZE; |
172 | do { |
173 | unsigned long flags; |
174 | pte_t page = *pte; |
175 | |
176 | pte_clear(mm: &init_mm, addr: vaddr, ptep: pte); |
177 | purge_tlb_start(flags); |
178 | pdtlb(SR_KERNEL, orig_vaddr); |
179 | purge_tlb_end(flags); |
180 | vaddr += PAGE_SIZE; |
181 | orig_vaddr += PAGE_SIZE; |
182 | pte++; |
183 | if (pte_none(pte: page) || pte_present(a: page)) |
184 | continue; |
185 | printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n" ); |
186 | } while (vaddr < end); |
187 | } |
188 | |
189 | static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr, |
190 | unsigned long size) |
191 | { |
192 | pmd_t * pmd; |
193 | unsigned long end; |
194 | unsigned long orig_vaddr = vaddr; |
195 | |
196 | if (pgd_none(pgd: *dir)) |
197 | return; |
198 | if (pgd_bad(pgd: *dir)) { |
199 | pgd_ERROR(*dir); |
200 | pgd_clear(dir); |
201 | return; |
202 | } |
203 | pmd = pmd_offset(pud_offset(p4d: p4d_offset(pgd: dir, address: vaddr), address: vaddr), address: vaddr); |
204 | vaddr &= ~PGDIR_MASK; |
205 | end = vaddr + size; |
206 | if (end > PGDIR_SIZE) |
207 | end = PGDIR_SIZE; |
208 | do { |
209 | unmap_uncached_pte(pmd, vaddr: orig_vaddr, size: end - vaddr); |
210 | vaddr = (vaddr + PMD_SIZE) & PMD_MASK; |
211 | orig_vaddr += PMD_SIZE; |
212 | pmd++; |
213 | } while (vaddr < end); |
214 | } |
215 | |
216 | static void unmap_uncached_pages(unsigned long vaddr, unsigned long size) |
217 | { |
218 | pgd_t * dir; |
219 | unsigned long end = vaddr + size; |
220 | |
221 | dir = pgd_offset_k(vaddr); |
222 | do { |
223 | unmap_uncached_pmd(dir, vaddr, size: end - vaddr); |
224 | vaddr = vaddr + PGDIR_SIZE; |
225 | dir++; |
226 | } while (vaddr && (vaddr < end)); |
227 | } |
228 | |
229 | #define PCXL_SEARCH_LOOP(idx, mask, size) \ |
230 | for(; res_ptr < res_end; ++res_ptr) \ |
231 | { \ |
232 | if(0 == ((*res_ptr) & mask)) { \ |
233 | *res_ptr |= mask; \ |
234 | idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \ |
235 | pcxl_res_hint = idx + (size >> 3); \ |
236 | goto resource_found; \ |
237 | } \ |
238 | } |
239 | |
240 | #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \ |
241 | u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \ |
242 | u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \ |
243 | PCXL_SEARCH_LOOP(idx, mask, size); \ |
244 | res_ptr = (u##size *)&pcxl_res_map[0]; \ |
245 | PCXL_SEARCH_LOOP(idx, mask, size); \ |
246 | } |
247 | |
248 | static unsigned long |
249 | pcxl_alloc_range(size_t size) |
250 | { |
251 | int res_idx; |
252 | u_long mask, flags; |
253 | unsigned int pages_needed = size >> PAGE_SHIFT; |
254 | |
255 | mask = (u_long) -1L; |
256 | mask >>= BITS_PER_LONG - pages_needed; |
257 | |
258 | DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n" , |
259 | size, pages_needed, mask); |
260 | |
261 | spin_lock_irqsave(&pcxl_res_lock, flags); |
262 | |
263 | if(pages_needed <= 8) { |
264 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 8); |
265 | } else if(pages_needed <= 16) { |
266 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 16); |
267 | } else if(pages_needed <= 32) { |
268 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 32); |
269 | } else { |
270 | panic(fmt: "%s: pcxl_alloc_range() Too many pages to map.\n" , |
271 | __FILE__); |
272 | } |
273 | |
274 | dump_resmap(); |
275 | panic(fmt: "%s: pcxl_alloc_range() out of dma mapping resources\n" , |
276 | __FILE__); |
277 | |
278 | resource_found: |
279 | |
280 | DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n" , |
281 | res_idx, mask, pcxl_res_hint); |
282 | |
283 | pcxl_used_pages += pages_needed; |
284 | pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1); |
285 | |
286 | spin_unlock_irqrestore(lock: &pcxl_res_lock, flags); |
287 | |
288 | dump_resmap(); |
289 | |
290 | /* |
291 | ** return the corresponding vaddr in the pcxl dma map |
292 | */ |
293 | return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3))); |
294 | } |
295 | |
296 | #define PCXL_FREE_MAPPINGS(idx, m, size) \ |
297 | u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \ |
298 | /* BUG_ON((*res_ptr & m) != m); */ \ |
299 | *res_ptr &= ~m; |
300 | |
301 | /* |
302 | ** clear bits in the pcxl resource map |
303 | */ |
304 | static void |
305 | pcxl_free_range(unsigned long vaddr, size_t size) |
306 | { |
307 | u_long mask, flags; |
308 | unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3); |
309 | unsigned int pages_mapped = size >> PAGE_SHIFT; |
310 | |
311 | mask = (u_long) -1L; |
312 | mask >>= BITS_PER_LONG - pages_mapped; |
313 | |
314 | DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n" , |
315 | res_idx, size, pages_mapped, mask); |
316 | |
317 | spin_lock_irqsave(&pcxl_res_lock, flags); |
318 | |
319 | if(pages_mapped <= 8) { |
320 | PCXL_FREE_MAPPINGS(res_idx, mask, 8); |
321 | } else if(pages_mapped <= 16) { |
322 | PCXL_FREE_MAPPINGS(res_idx, mask, 16); |
323 | } else if(pages_mapped <= 32) { |
324 | PCXL_FREE_MAPPINGS(res_idx, mask, 32); |
325 | } else { |
326 | panic(fmt: "%s: pcxl_free_range() Too many pages to unmap.\n" , |
327 | __FILE__); |
328 | } |
329 | |
330 | pcxl_used_pages -= (pages_mapped ? pages_mapped : 1); |
331 | pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1); |
332 | |
333 | spin_unlock_irqrestore(lock: &pcxl_res_lock, flags); |
334 | |
335 | dump_resmap(); |
336 | } |
337 | |
338 | static int __maybe_unused proc_pcxl_dma_show(struct seq_file *m, void *v) |
339 | { |
340 | #if 0 |
341 | u_long i = 0; |
342 | unsigned long *res_ptr = (u_long *)pcxl_res_map; |
343 | #endif |
344 | unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */ |
345 | |
346 | seq_printf(m, fmt: "\nDMA Mapping Area size : %d bytes (%ld pages)\n" , |
347 | PCXL_DMA_MAP_SIZE, total_pages); |
348 | |
349 | seq_printf(m, fmt: "Resource bitmap : %d bytes\n" , pcxl_res_size); |
350 | |
351 | seq_puts(m, s: " total: free: used: % used:\n" ); |
352 | seq_printf(m, fmt: "blocks %8d %8ld %8ld %8ld%%\n" , pcxl_res_size, |
353 | pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes, |
354 | (pcxl_used_bytes * 100) / pcxl_res_size); |
355 | |
356 | seq_printf(m, fmt: "pages %8ld %8ld %8ld %8ld%%\n" , total_pages, |
357 | total_pages - pcxl_used_pages, pcxl_used_pages, |
358 | (pcxl_used_pages * 100 / total_pages)); |
359 | |
360 | #if 0 |
361 | seq_puts(m, "\nResource bitmap:" ); |
362 | |
363 | for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) { |
364 | if ((i & 7) == 0) |
365 | seq_puts(m,"\n " ); |
366 | seq_printf(m, "%s %08lx" , buf, *res_ptr); |
367 | } |
368 | #endif |
369 | seq_putc(m, c: '\n'); |
370 | return 0; |
371 | } |
372 | |
373 | static int __init |
374 | pcxl_dma_init(void) |
375 | { |
376 | if (pcxl_dma_start == 0) |
377 | return 0; |
378 | |
379 | pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3); |
380 | pcxl_res_hint = 0; |
381 | pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, |
382 | order: get_order(size: pcxl_res_size)); |
383 | memset(pcxl_res_map, 0, pcxl_res_size); |
384 | proc_gsc_root = proc_mkdir("bus/gsc" , NULL); |
385 | if (!proc_gsc_root) |
386 | printk(KERN_WARNING |
387 | "pcxl_dma_init: Unable to create gsc /proc dir entry\n" ); |
388 | else { |
389 | struct proc_dir_entry* ent; |
390 | ent = proc_create_single("pcxl_dma" , 0, proc_gsc_root, |
391 | proc_pcxl_dma_show); |
392 | if (!ent) |
393 | printk(KERN_WARNING |
394 | "pci-dma.c: Unable to create pcxl_dma /proc entry.\n" ); |
395 | } |
396 | return 0; |
397 | } |
398 | |
399 | __initcall(pcxl_dma_init); |
400 | |
401 | void *arch_dma_alloc(struct device *dev, size_t size, |
402 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
403 | { |
404 | unsigned long vaddr; |
405 | unsigned long paddr; |
406 | int order; |
407 | |
408 | if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl) |
409 | return NULL; |
410 | |
411 | order = get_order(size); |
412 | size = 1 << (order + PAGE_SHIFT); |
413 | vaddr = pcxl_alloc_range(size); |
414 | paddr = __get_free_pages(gfp_mask: gfp | __GFP_ZERO, order); |
415 | flush_kernel_dcache_range(paddr, size); |
416 | paddr = __pa(paddr); |
417 | map_uncached_pages(vaddr, size, paddr); |
418 | *dma_handle = (dma_addr_t) paddr; |
419 | |
420 | return (void *)vaddr; |
421 | } |
422 | |
423 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
424 | dma_addr_t dma_handle, unsigned long attrs) |
425 | { |
426 | int order = get_order(size); |
427 | |
428 | WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 && |
429 | boot_cpu_data.cpu_type != pcxl); |
430 | |
431 | size = 1 << (order + PAGE_SHIFT); |
432 | unmap_uncached_pages(vaddr: (unsigned long)vaddr, size); |
433 | pcxl_free_range(vaddr: (unsigned long)vaddr, size); |
434 | |
435 | free_pages(addr: (unsigned long)__va(dma_handle), order); |
436 | } |
437 | |
438 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
439 | enum dma_data_direction dir) |
440 | { |
441 | /* |
442 | * fdc: The data cache line is written back to memory, if and only if |
443 | * it is dirty, and then invalidated from the data cache. |
444 | */ |
445 | flush_kernel_dcache_range((unsigned long)phys_to_virt(address: paddr), size); |
446 | } |
447 | |
448 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
449 | enum dma_data_direction dir) |
450 | { |
451 | unsigned long addr = (unsigned long) phys_to_virt(address: paddr); |
452 | |
453 | switch (dir) { |
454 | case DMA_TO_DEVICE: |
455 | case DMA_BIDIRECTIONAL: |
456 | flush_kernel_dcache_range(addr, size); |
457 | return; |
458 | case DMA_FROM_DEVICE: |
459 | purge_kernel_dcache_range_asm(addr, addr + size); |
460 | return; |
461 | default: |
462 | BUG(); |
463 | } |
464 | } |
465 | |