1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Dynamic DMA mapping support. |
4 | * |
5 | * This implementation is a fallback for platforms that do not support |
6 | * I/O TLBs (aka DMA address translation hardware). |
7 | * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> |
8 | * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> |
9 | * Copyright (C) 2000, 2003 Hewlett-Packard Co |
10 | * David Mosberger-Tang <davidm@hpl.hp.com> |
11 | * |
12 | * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. |
13 | * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid |
14 | * unnecessary i-cache flushing. |
15 | * 04/07/.. ak Better overflow handling. Assorted fixes. |
16 | * 05/09/10 linville Add support for syncing ranges, support syncing for |
17 | * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. |
18 | * 08/12/11 beckyb Add highmem support |
19 | */ |
20 | |
21 | #define pr_fmt(fmt) "software IO TLB: " fmt |
22 | |
23 | #include <linux/cache.h> |
24 | #include <linux/cc_platform.h> |
25 | #include <linux/ctype.h> |
26 | #include <linux/debugfs.h> |
27 | #include <linux/dma-direct.h> |
28 | #include <linux/dma-map-ops.h> |
29 | #include <linux/export.h> |
30 | #include <linux/gfp.h> |
31 | #include <linux/highmem.h> |
32 | #include <linux/io.h> |
33 | #include <linux/iommu-helper.h> |
34 | #include <linux/init.h> |
35 | #include <linux/memblock.h> |
36 | #include <linux/mm.h> |
37 | #include <linux/pfn.h> |
38 | #include <linux/rculist.h> |
39 | #include <linux/scatterlist.h> |
40 | #include <linux/set_memory.h> |
41 | #include <linux/spinlock.h> |
42 | #include <linux/string.h> |
43 | #include <linux/swiotlb.h> |
44 | #include <linux/types.h> |
45 | #ifdef CONFIG_DMA_RESTRICTED_POOL |
46 | #include <linux/of.h> |
47 | #include <linux/of_fdt.h> |
48 | #include <linux/of_reserved_mem.h> |
49 | #include <linux/slab.h> |
50 | #endif |
51 | |
52 | #define CREATE_TRACE_POINTS |
53 | #include <trace/events/swiotlb.h> |
54 | |
55 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
56 | |
57 | /* |
58 | * Minimum IO TLB size to bother booting with. Systems with mainly |
59 | * 64bit capable cards will only lightly use the swiotlb. If we can't |
60 | * allocate a contiguous 1MB, we're probably in trouble anyway. |
61 | */ |
62 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
63 | |
64 | #define INVALID_PHYS_ADDR (~(phys_addr_t)0) |
65 | |
66 | /** |
67 | * struct io_tlb_slot - IO TLB slot descriptor |
68 | * @orig_addr: The original address corresponding to a mapped entry. |
69 | * @alloc_size: Size of the allocated buffer. |
70 | * @list: The free list describing the number of free entries available |
71 | * from each index. |
72 | * @pad_slots: Number of preceding padding slots. Valid only in the first |
73 | * allocated non-padding slot. |
74 | */ |
75 | struct io_tlb_slot { |
76 | phys_addr_t orig_addr; |
77 | size_t alloc_size; |
78 | unsigned short list; |
79 | unsigned short pad_slots; |
80 | }; |
81 | |
82 | static bool swiotlb_force_bounce; |
83 | static bool swiotlb_force_disable; |
84 | |
85 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
86 | |
87 | static void swiotlb_dyn_alloc(struct work_struct *work); |
88 | |
89 | static struct io_tlb_mem io_tlb_default_mem = { |
90 | .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock), |
91 | .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools), |
92 | .dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc, |
93 | swiotlb_dyn_alloc), |
94 | }; |
95 | |
96 | #else /* !CONFIG_SWIOTLB_DYNAMIC */ |
97 | |
98 | static struct io_tlb_mem io_tlb_default_mem; |
99 | |
100 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
101 | |
102 | static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; |
103 | static unsigned long default_nareas; |
104 | |
105 | /** |
106 | * struct io_tlb_area - IO TLB memory area descriptor |
107 | * |
108 | * This is a single area with a single lock. |
109 | * |
110 | * @used: The number of used IO TLB block. |
111 | * @index: The slot index to start searching in this area for next round. |
112 | * @lock: The lock to protect the above data structures in the map and |
113 | * unmap calls. |
114 | */ |
115 | struct io_tlb_area { |
116 | unsigned long used; |
117 | unsigned int index; |
118 | spinlock_t lock; |
119 | }; |
120 | |
121 | /* |
122 | * Round up number of slabs to the next power of 2. The last area is going |
123 | * be smaller than the rest if default_nslabs is not power of two. |
124 | * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE, |
125 | * otherwise a segment may span two or more areas. It conflicts with free |
126 | * contiguous slots tracking: free slots are treated contiguous no matter |
127 | * whether they cross an area boundary. |
128 | * |
129 | * Return true if default_nslabs is rounded up. |
130 | */ |
131 | static bool round_up_default_nslabs(void) |
132 | { |
133 | if (!default_nareas) |
134 | return false; |
135 | |
136 | if (default_nslabs < IO_TLB_SEGSIZE * default_nareas) |
137 | default_nslabs = IO_TLB_SEGSIZE * default_nareas; |
138 | else if (is_power_of_2(n: default_nslabs)) |
139 | return false; |
140 | default_nslabs = roundup_pow_of_two(default_nslabs); |
141 | return true; |
142 | } |
143 | |
144 | /** |
145 | * swiotlb_adjust_nareas() - adjust the number of areas and slots |
146 | * @nareas: Desired number of areas. Zero is treated as 1. |
147 | * |
148 | * Adjust the default number of areas in a memory pool. |
149 | * The default size of the memory pool may also change to meet minimum area |
150 | * size requirements. |
151 | */ |
152 | static void swiotlb_adjust_nareas(unsigned int nareas) |
153 | { |
154 | if (!nareas) |
155 | nareas = 1; |
156 | else if (!is_power_of_2(n: nareas)) |
157 | nareas = roundup_pow_of_two(nareas); |
158 | |
159 | default_nareas = nareas; |
160 | |
161 | pr_info("area num %d.\n", nareas); |
162 | if (round_up_default_nslabs()) |
163 | pr_info("SWIOTLB bounce buffer size roundup to %luMB", |
164 | (default_nslabs << IO_TLB_SHIFT) >> 20); |
165 | } |
166 | |
167 | /** |
168 | * limit_nareas() - get the maximum number of areas for a given memory pool size |
169 | * @nareas: Desired number of areas. |
170 | * @nslots: Total number of slots in the memory pool. |
171 | * |
172 | * Limit the number of areas to the maximum possible number of areas in |
173 | * a memory pool of the given size. |
174 | * |
175 | * Return: Maximum possible number of areas. |
176 | */ |
177 | static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots) |
178 | { |
179 | if (nslots < nareas * IO_TLB_SEGSIZE) |
180 | return nslots / IO_TLB_SEGSIZE; |
181 | return nareas; |
182 | } |
183 | |
184 | static int __init |
185 | setup_io_tlb_npages(char *str) |
186 | { |
187 | if (isdigit(c: *str)) { |
188 | /* avoid tail segment of size < IO_TLB_SEGSIZE */ |
189 | default_nslabs = |
190 | ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); |
191 | } |
192 | if (*str == ',') |
193 | ++str; |
194 | if (isdigit(c: *str)) |
195 | swiotlb_adjust_nareas(nareas: simple_strtoul(str, &str, 0)); |
196 | if (*str == ',') |
197 | ++str; |
198 | if (!strcmp(str, "force")) |
199 | swiotlb_force_bounce = true; |
200 | else if (!strcmp(str, "noforce")) |
201 | swiotlb_force_disable = true; |
202 | |
203 | return 0; |
204 | } |
205 | early_param("swiotlb", setup_io_tlb_npages); |
206 | |
207 | unsigned long swiotlb_size_or_default(void) |
208 | { |
209 | return default_nslabs << IO_TLB_SHIFT; |
210 | } |
211 | |
212 | void __init swiotlb_adjust_size(unsigned long size) |
213 | { |
214 | /* |
215 | * If swiotlb parameter has not been specified, give a chance to |
216 | * architectures such as those supporting memory encryption to |
217 | * adjust/expand SWIOTLB size for their use. |
218 | */ |
219 | if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) |
220 | return; |
221 | |
222 | size = ALIGN(size, IO_TLB_SIZE); |
223 | default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); |
224 | if (round_up_default_nslabs()) |
225 | size = default_nslabs << IO_TLB_SHIFT; |
226 | pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); |
227 | } |
228 | |
229 | void swiotlb_print_info(void) |
230 | { |
231 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
232 | |
233 | if (!mem->nslabs) { |
234 | pr_warn("No low mem\n"); |
235 | return; |
236 | } |
237 | |
238 | pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, |
239 | (mem->nslabs << IO_TLB_SHIFT) >> 20); |
240 | } |
241 | |
242 | static inline unsigned long io_tlb_offset(unsigned long val) |
243 | { |
244 | return val & (IO_TLB_SEGSIZE - 1); |
245 | } |
246 | |
247 | static inline unsigned long nr_slots(u64 val) |
248 | { |
249 | return DIV_ROUND_UP(val, IO_TLB_SIZE); |
250 | } |
251 | |
252 | /* |
253 | * Early SWIOTLB allocation may be too early to allow an architecture to |
254 | * perform the desired operations. This function allows the architecture to |
255 | * call SWIOTLB when the operations are possible. It needs to be called |
256 | * before the SWIOTLB memory is used. |
257 | */ |
258 | void __init swiotlb_update_mem_attributes(void) |
259 | { |
260 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
261 | unsigned long bytes; |
262 | |
263 | if (!mem->nslabs || mem->late_alloc) |
264 | return; |
265 | bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); |
266 | set_memory_decrypted(addr: (unsigned long)mem->vaddr, numpages: bytes >> PAGE_SHIFT); |
267 | } |
268 | |
269 | static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start, |
270 | unsigned long nslabs, bool late_alloc, unsigned int nareas) |
271 | { |
272 | void *vaddr = phys_to_virt(address: start); |
273 | unsigned long bytes = nslabs << IO_TLB_SHIFT, i; |
274 | |
275 | mem->nslabs = nslabs; |
276 | mem->start = start; |
277 | mem->end = mem->start + bytes; |
278 | mem->late_alloc = late_alloc; |
279 | mem->nareas = nareas; |
280 | mem->area_nslabs = nslabs / mem->nareas; |
281 | |
282 | for (i = 0; i < mem->nareas; i++) { |
283 | spin_lock_init(&mem->areas[i].lock); |
284 | mem->areas[i].index = 0; |
285 | mem->areas[i].used = 0; |
286 | } |
287 | |
288 | for (i = 0; i < mem->nslabs; i++) { |
289 | mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i), |
290 | mem->nslabs - i); |
291 | mem->slots[i].orig_addr = INVALID_PHYS_ADDR; |
292 | mem->slots[i].alloc_size = 0; |
293 | mem->slots[i].pad_slots = 0; |
294 | } |
295 | |
296 | memset(vaddr, 0, bytes); |
297 | mem->vaddr = vaddr; |
298 | return; |
299 | } |
300 | |
301 | /** |
302 | * add_mem_pool() - add a memory pool to the allocator |
303 | * @mem: Software IO TLB allocator. |
304 | * @pool: Memory pool to be added. |
305 | */ |
306 | static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool) |
307 | { |
308 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
309 | spin_lock(lock: &mem->lock); |
310 | list_add_rcu(new: &pool->node, head: &mem->pools); |
311 | mem->nslabs += pool->nslabs; |
312 | spin_unlock(lock: &mem->lock); |
313 | #else |
314 | mem->nslabs = pool->nslabs; |
315 | #endif |
316 | } |
317 | |
318 | static void __init *swiotlb_memblock_alloc(unsigned long nslabs, |
319 | unsigned int flags, |
320 | int (*remap)(void *tlb, unsigned long nslabs)) |
321 | { |
322 | size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT); |
323 | void *tlb; |
324 | |
325 | /* |
326 | * By default allocate the bounce buffer memory from low memory, but |
327 | * allow to pick a location everywhere for hypervisors with guest |
328 | * memory encryption. |
329 | */ |
330 | if (flags & SWIOTLB_ANY) |
331 | tlb = memblock_alloc(size: bytes, PAGE_SIZE); |
332 | else |
333 | tlb = memblock_alloc_low(size: bytes, PAGE_SIZE); |
334 | |
335 | if (!tlb) { |
336 | pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", |
337 | __func__, bytes); |
338 | return NULL; |
339 | } |
340 | |
341 | if (remap && remap(tlb, nslabs) < 0) { |
342 | memblock_free(ptr: tlb, PAGE_ALIGN(bytes)); |
343 | pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); |
344 | return NULL; |
345 | } |
346 | |
347 | return tlb; |
348 | } |
349 | |
350 | /* |
351 | * Statically reserve bounce buffer space and initialize bounce buffer data |
352 | * structures for the software IO TLB used to implement the DMA API. |
353 | */ |
354 | void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, |
355 | int (*remap)(void *tlb, unsigned long nslabs)) |
356 | { |
357 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
358 | unsigned long nslabs; |
359 | unsigned int nareas; |
360 | size_t alloc_size; |
361 | void *tlb; |
362 | |
363 | if (!addressing_limit && !swiotlb_force_bounce) |
364 | return; |
365 | if (swiotlb_force_disable) |
366 | return; |
367 | |
368 | io_tlb_default_mem.force_bounce = |
369 | swiotlb_force_bounce || (flags & SWIOTLB_FORCE); |
370 | |
371 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
372 | if (!remap) |
373 | io_tlb_default_mem.can_grow = true; |
374 | if (flags & SWIOTLB_ANY) |
375 | io_tlb_default_mem.phys_limit = virt_to_phys(address: high_memory - 1); |
376 | else |
377 | io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT; |
378 | #endif |
379 | |
380 | if (!default_nareas) |
381 | swiotlb_adjust_nareas(num_possible_cpus()); |
382 | |
383 | nslabs = default_nslabs; |
384 | nareas = limit_nareas(nareas: default_nareas, nslots: nslabs); |
385 | while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) { |
386 | if (nslabs <= IO_TLB_MIN_SLABS) |
387 | return; |
388 | nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); |
389 | nareas = limit_nareas(nareas, nslots: nslabs); |
390 | } |
391 | |
392 | if (default_nslabs != nslabs) { |
393 | pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", |
394 | default_nslabs, nslabs); |
395 | default_nslabs = nslabs; |
396 | } |
397 | |
398 | alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); |
399 | mem->slots = memblock_alloc(size: alloc_size, PAGE_SIZE); |
400 | if (!mem->slots) { |
401 | pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n", |
402 | __func__, alloc_size, PAGE_SIZE); |
403 | return; |
404 | } |
405 | |
406 | mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), |
407 | nareas), SMP_CACHE_BYTES); |
408 | if (!mem->areas) { |
409 | pr_warn("%s: Failed to allocate mem->areas.\n", __func__); |
410 | return; |
411 | } |
412 | |
413 | swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, late_alloc: false, nareas); |
414 | add_mem_pool(mem: &io_tlb_default_mem, pool: mem); |
415 | |
416 | if (flags & SWIOTLB_VERBOSE) |
417 | swiotlb_print_info(); |
418 | } |
419 | |
420 | void __init swiotlb_init(bool addressing_limit, unsigned int flags) |
421 | { |
422 | swiotlb_init_remap(addressing_limit, flags, NULL); |
423 | } |
424 | |
425 | /* |
426 | * Systems with larger DMA zones (those that don't support ISA) can |
427 | * initialize the swiotlb later using the slab allocator if needed. |
428 | * This should be just like above, but with some error catching. |
429 | */ |
430 | int swiotlb_init_late(size_t size, gfp_t gfp_mask, |
431 | int (*remap)(void *tlb, unsigned long nslabs)) |
432 | { |
433 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
434 | unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); |
435 | unsigned int nareas; |
436 | unsigned char *vstart = NULL; |
437 | unsigned int order, area_order; |
438 | bool retried = false; |
439 | int rc = 0; |
440 | |
441 | if (io_tlb_default_mem.nslabs) |
442 | return 0; |
443 | |
444 | if (swiotlb_force_disable) |
445 | return 0; |
446 | |
447 | io_tlb_default_mem.force_bounce = swiotlb_force_bounce; |
448 | |
449 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
450 | if (!remap) |
451 | io_tlb_default_mem.can_grow = true; |
452 | if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA)) |
453 | io_tlb_default_mem.phys_limit = zone_dma_limit; |
454 | else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32)) |
455 | io_tlb_default_mem.phys_limit = max(DMA_BIT_MASK(32), zone_dma_limit); |
456 | else |
457 | io_tlb_default_mem.phys_limit = virt_to_phys(address: high_memory - 1); |
458 | #endif |
459 | |
460 | if (!default_nareas) |
461 | swiotlb_adjust_nareas(num_possible_cpus()); |
462 | |
463 | retry: |
464 | order = get_order(size: nslabs << IO_TLB_SHIFT); |
465 | nslabs = SLABS_PER_PAGE << order; |
466 | |
467 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
468 | vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, |
469 | order); |
470 | if (vstart) |
471 | break; |
472 | order--; |
473 | nslabs = SLABS_PER_PAGE << order; |
474 | retried = true; |
475 | } |
476 | |
477 | if (!vstart) |
478 | return -ENOMEM; |
479 | |
480 | if (remap) |
481 | rc = remap(vstart, nslabs); |
482 | if (rc) { |
483 | free_pages(addr: (unsigned long)vstart, order); |
484 | |
485 | nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); |
486 | if (nslabs < IO_TLB_MIN_SLABS) |
487 | return rc; |
488 | retried = true; |
489 | goto retry; |
490 | } |
491 | |
492 | if (retried) { |
493 | pr_warn("only able to allocate %ld MB\n", |
494 | (PAGE_SIZE << order) >> 20); |
495 | } |
496 | |
497 | nareas = limit_nareas(nareas: default_nareas, nslots: nslabs); |
498 | area_order = get_order(array_size(sizeof(*mem->areas), nareas)); |
499 | mem->areas = (struct io_tlb_area *) |
500 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order); |
501 | if (!mem->areas) |
502 | goto error_area; |
503 | |
504 | mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
505 | get_order(array_size(sizeof(*mem->slots), nslabs))); |
506 | if (!mem->slots) |
507 | goto error_slots; |
508 | |
509 | set_memory_decrypted(addr: (unsigned long)vstart, |
510 | numpages: (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); |
511 | swiotlb_init_io_tlb_pool(mem, virt_to_phys(address: vstart), nslabs, late_alloc: true, |
512 | nareas); |
513 | add_mem_pool(mem: &io_tlb_default_mem, pool: mem); |
514 | |
515 | swiotlb_print_info(); |
516 | return 0; |
517 | |
518 | error_slots: |
519 | free_pages(addr: (unsigned long)mem->areas, order: area_order); |
520 | error_area: |
521 | free_pages(addr: (unsigned long)vstart, order); |
522 | return -ENOMEM; |
523 | } |
524 | |
525 | void __init swiotlb_exit(void) |
526 | { |
527 | struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; |
528 | unsigned long tbl_vaddr; |
529 | size_t tbl_size, slots_size; |
530 | unsigned int area_order; |
531 | |
532 | if (swiotlb_force_bounce) |
533 | return; |
534 | |
535 | if (!mem->nslabs) |
536 | return; |
537 | |
538 | pr_info("tearing down default memory pool\n"); |
539 | tbl_vaddr = (unsigned long)phys_to_virt(address: mem->start); |
540 | tbl_size = PAGE_ALIGN(mem->end - mem->start); |
541 | slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); |
542 | |
543 | set_memory_encrypted(addr: tbl_vaddr, numpages: tbl_size >> PAGE_SHIFT); |
544 | if (mem->late_alloc) { |
545 | area_order = get_order(array_size(sizeof(*mem->areas), |
546 | mem->nareas)); |
547 | free_pages(addr: (unsigned long)mem->areas, order: area_order); |
548 | free_pages(addr: tbl_vaddr, order: get_order(size: tbl_size)); |
549 | free_pages(addr: (unsigned long)mem->slots, order: get_order(size: slots_size)); |
550 | } else { |
551 | memblock_free_late(__pa(mem->areas), |
552 | array_size(sizeof(*mem->areas), mem->nareas)); |
553 | memblock_free_late(base: mem->start, size: tbl_size); |
554 | memblock_free_late(__pa(mem->slots), size: slots_size); |
555 | } |
556 | |
557 | memset(mem, 0, sizeof(*mem)); |
558 | } |
559 | |
560 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
561 | |
562 | /** |
563 | * alloc_dma_pages() - allocate pages to be used for DMA |
564 | * @gfp: GFP flags for the allocation. |
565 | * @bytes: Size of the buffer. |
566 | * @phys_limit: Maximum allowed physical address of the buffer. |
567 | * |
568 | * Allocate pages from the buddy allocator. If successful, make the allocated |
569 | * pages decrypted that they can be used for DMA. |
570 | * |
571 | * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN) |
572 | * if the allocated physical address was above @phys_limit. |
573 | */ |
574 | static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit) |
575 | { |
576 | unsigned int order = get_order(size: bytes); |
577 | struct page *page; |
578 | phys_addr_t paddr; |
579 | void *vaddr; |
580 | |
581 | page = alloc_pages(gfp, order); |
582 | if (!page) |
583 | return NULL; |
584 | |
585 | paddr = page_to_phys(page); |
586 | if (paddr + bytes - 1 > phys_limit) { |
587 | __free_pages(page, order); |
588 | return ERR_PTR(error: -EAGAIN); |
589 | } |
590 | |
591 | vaddr = phys_to_virt(address: paddr); |
592 | if (set_memory_decrypted(addr: (unsigned long)vaddr, PFN_UP(bytes))) |
593 | goto error; |
594 | return page; |
595 | |
596 | error: |
597 | /* Intentional leak if pages cannot be encrypted again. */ |
598 | if (!set_memory_encrypted(addr: (unsigned long)vaddr, PFN_UP(bytes))) |
599 | __free_pages(page, order); |
600 | return NULL; |
601 | } |
602 | |
603 | /** |
604 | * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer |
605 | * @dev: Device for which a memory pool is allocated. |
606 | * @bytes: Size of the buffer. |
607 | * @phys_limit: Maximum allowed physical address of the buffer. |
608 | * @gfp: GFP flags for the allocation. |
609 | * |
610 | * Return: Allocated pages, or %NULL on allocation failure. |
611 | */ |
612 | static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes, |
613 | u64 phys_limit, gfp_t gfp) |
614 | { |
615 | struct page *page; |
616 | |
617 | /* |
618 | * Allocate from the atomic pools if memory is encrypted and |
619 | * the allocation is atomic, because decrypting may block. |
620 | */ |
621 | if (!gfpflags_allow_blocking(gfp_flags: gfp) && dev && force_dma_unencrypted(dev)) { |
622 | void *vaddr; |
623 | |
624 | if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) |
625 | return NULL; |
626 | |
627 | return dma_alloc_from_pool(dev, size: bytes, cpu_addr: &vaddr, flags: gfp, |
628 | phys_addr_ok: dma_coherent_ok); |
629 | } |
630 | |
631 | gfp &= ~GFP_ZONEMASK; |
632 | if (phys_limit <= zone_dma_limit) |
633 | gfp |= __GFP_DMA; |
634 | else if (phys_limit <= DMA_BIT_MASK(32)) |
635 | gfp |= __GFP_DMA32; |
636 | |
637 | while (IS_ERR(ptr: page = alloc_dma_pages(gfp, bytes, phys_limit))) { |
638 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
639 | phys_limit < DMA_BIT_MASK(64) && |
640 | !(gfp & (__GFP_DMA32 | __GFP_DMA))) |
641 | gfp |= __GFP_DMA32; |
642 | else if (IS_ENABLED(CONFIG_ZONE_DMA) && |
643 | !(gfp & __GFP_DMA)) |
644 | gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA; |
645 | else |
646 | return NULL; |
647 | } |
648 | |
649 | return page; |
650 | } |
651 | |
652 | /** |
653 | * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer |
654 | * @vaddr: Virtual address of the buffer. |
655 | * @bytes: Size of the buffer. |
656 | */ |
657 | static void swiotlb_free_tlb(void *vaddr, size_t bytes) |
658 | { |
659 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
660 | dma_free_from_pool(NULL, start: vaddr, size: bytes)) |
661 | return; |
662 | |
663 | /* Intentional leak if pages cannot be encrypted again. */ |
664 | if (!set_memory_encrypted(addr: (unsigned long)vaddr, PFN_UP(bytes))) |
665 | __free_pages(virt_to_page(vaddr), order: get_order(size: bytes)); |
666 | } |
667 | |
668 | /** |
669 | * swiotlb_alloc_pool() - allocate a new IO TLB memory pool |
670 | * @dev: Device for which a memory pool is allocated. |
671 | * @minslabs: Minimum number of slabs. |
672 | * @nslabs: Desired (maximum) number of slabs. |
673 | * @nareas: Number of areas. |
674 | * @phys_limit: Maximum DMA buffer physical address. |
675 | * @gfp: GFP flags for the allocations. |
676 | * |
677 | * Allocate and initialize a new IO TLB memory pool. The actual number of |
678 | * slabs may be reduced if allocation of @nslabs fails. If even |
679 | * @minslabs cannot be allocated, this function fails. |
680 | * |
681 | * Return: New memory pool, or %NULL on allocation failure. |
682 | */ |
683 | static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev, |
684 | unsigned long minslabs, unsigned long nslabs, |
685 | unsigned int nareas, u64 phys_limit, gfp_t gfp) |
686 | { |
687 | struct io_tlb_pool *pool; |
688 | unsigned int slot_order; |
689 | struct page *tlb; |
690 | size_t pool_size; |
691 | size_t tlb_size; |
692 | |
693 | if (nslabs > SLABS_PER_PAGE << MAX_PAGE_ORDER) { |
694 | nslabs = SLABS_PER_PAGE << MAX_PAGE_ORDER; |
695 | nareas = limit_nareas(nareas, nslots: nslabs); |
696 | } |
697 | |
698 | pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas); |
699 | pool = kzalloc(pool_size, gfp); |
700 | if (!pool) |
701 | goto error; |
702 | pool->areas = (void *)pool + sizeof(*pool); |
703 | |
704 | tlb_size = nslabs << IO_TLB_SHIFT; |
705 | while (!(tlb = swiotlb_alloc_tlb(dev, bytes: tlb_size, phys_limit, gfp))) { |
706 | if (nslabs <= minslabs) |
707 | goto error_tlb; |
708 | nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); |
709 | nareas = limit_nareas(nareas, nslots: nslabs); |
710 | tlb_size = nslabs << IO_TLB_SHIFT; |
711 | } |
712 | |
713 | slot_order = get_order(array_size(sizeof(*pool->slots), nslabs)); |
714 | pool->slots = (struct io_tlb_slot *) |
715 | __get_free_pages(gfp, slot_order); |
716 | if (!pool->slots) |
717 | goto error_slots; |
718 | |
719 | swiotlb_init_io_tlb_pool(mem: pool, page_to_phys(tlb), nslabs, late_alloc: true, nareas); |
720 | return pool; |
721 | |
722 | error_slots: |
723 | swiotlb_free_tlb(page_address(tlb), bytes: tlb_size); |
724 | error_tlb: |
725 | kfree(objp: pool); |
726 | error: |
727 | return NULL; |
728 | } |
729 | |
730 | /** |
731 | * swiotlb_dyn_alloc() - dynamic memory pool allocation worker |
732 | * @work: Pointer to dyn_alloc in struct io_tlb_mem. |
733 | */ |
734 | static void swiotlb_dyn_alloc(struct work_struct *work) |
735 | { |
736 | struct io_tlb_mem *mem = |
737 | container_of(work, struct io_tlb_mem, dyn_alloc); |
738 | struct io_tlb_pool *pool; |
739 | |
740 | pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, nslabs: default_nslabs, |
741 | nareas: default_nareas, phys_limit: mem->phys_limit, GFP_KERNEL); |
742 | if (!pool) { |
743 | pr_warn_ratelimited("Failed to allocate new pool"); |
744 | return; |
745 | } |
746 | |
747 | add_mem_pool(mem, pool); |
748 | } |
749 | |
750 | /** |
751 | * swiotlb_dyn_free() - RCU callback to free a memory pool |
752 | * @rcu: RCU head in the corresponding struct io_tlb_pool. |
753 | */ |
754 | static void swiotlb_dyn_free(struct rcu_head *rcu) |
755 | { |
756 | struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu); |
757 | size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs); |
758 | size_t tlb_size = pool->end - pool->start; |
759 | |
760 | free_pages(addr: (unsigned long)pool->slots, order: get_order(size: slots_size)); |
761 | swiotlb_free_tlb(vaddr: pool->vaddr, bytes: tlb_size); |
762 | kfree(objp: pool); |
763 | } |
764 | |
765 | /** |
766 | * __swiotlb_find_pool() - find the IO TLB pool for a physical address |
767 | * @dev: Device which has mapped the DMA buffer. |
768 | * @paddr: Physical address within the DMA buffer. |
769 | * |
770 | * Find the IO TLB memory pool descriptor which contains the given physical |
771 | * address, if any. This function is for use only when the dev is known to |
772 | * be using swiotlb. Use swiotlb_find_pool() for the more general case |
773 | * when this condition is not met. |
774 | * |
775 | * Return: Memory pool which contains @paddr, or %NULL if none. |
776 | */ |
777 | struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr) |
778 | { |
779 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
780 | struct io_tlb_pool *pool; |
781 | |
782 | rcu_read_lock(); |
783 | list_for_each_entry_rcu(pool, &mem->pools, node) { |
784 | if (paddr >= pool->start && paddr < pool->end) |
785 | goto out; |
786 | } |
787 | |
788 | list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) { |
789 | if (paddr >= pool->start && paddr < pool->end) |
790 | goto out; |
791 | } |
792 | pool = NULL; |
793 | out: |
794 | rcu_read_unlock(); |
795 | return pool; |
796 | } |
797 | |
798 | /** |
799 | * swiotlb_del_pool() - remove an IO TLB pool from a device |
800 | * @dev: Owning device. |
801 | * @pool: Memory pool to be removed. |
802 | */ |
803 | static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool) |
804 | { |
805 | unsigned long flags; |
806 | |
807 | spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); |
808 | list_del_rcu(entry: &pool->node); |
809 | spin_unlock_irqrestore(lock: &dev->dma_io_tlb_lock, flags); |
810 | |
811 | call_rcu(head: &pool->rcu, func: swiotlb_dyn_free); |
812 | } |
813 | |
814 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
815 | |
816 | /** |
817 | * swiotlb_dev_init() - initialize swiotlb fields in &struct device |
818 | * @dev: Device to be initialized. |
819 | */ |
820 | void swiotlb_dev_init(struct device *dev) |
821 | { |
822 | dev->dma_io_tlb_mem = &io_tlb_default_mem; |
823 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
824 | INIT_LIST_HEAD(list: &dev->dma_io_tlb_pools); |
825 | spin_lock_init(&dev->dma_io_tlb_lock); |
826 | dev->dma_uses_io_tlb = false; |
827 | #endif |
828 | } |
829 | |
830 | /** |
831 | * swiotlb_align_offset() - Get required offset into an IO TLB allocation. |
832 | * @dev: Owning device. |
833 | * @align_mask: Allocation alignment mask. |
834 | * @addr: DMA address. |
835 | * |
836 | * Return the minimum offset from the start of an IO TLB allocation which is |
837 | * required for a given buffer address and allocation alignment to keep the |
838 | * device happy. |
839 | * |
840 | * First, the address bits covered by min_align_mask must be identical in the |
841 | * original address and the bounce buffer address. High bits are preserved by |
842 | * choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra |
843 | * padding bytes before the bounce buffer. |
844 | * |
845 | * Second, @align_mask specifies which bits of the first allocated slot must |
846 | * be zero. This may require allocating additional padding slots, and then the |
847 | * offset (in bytes) from the first such padding slot is returned. |
848 | */ |
849 | static unsigned int swiotlb_align_offset(struct device *dev, |
850 | unsigned int align_mask, u64 addr) |
851 | { |
852 | return addr & dma_get_min_align_mask(dev) & |
853 | (align_mask | (IO_TLB_SIZE - 1)); |
854 | } |
855 | |
856 | /* |
857 | * Bounce: copy the swiotlb buffer from or back to the original dma location |
858 | */ |
859 | static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, |
860 | enum dma_data_direction dir, struct io_tlb_pool *mem) |
861 | { |
862 | int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; |
863 | phys_addr_t orig_addr = mem->slots[index].orig_addr; |
864 | size_t alloc_size = mem->slots[index].alloc_size; |
865 | unsigned long pfn = PFN_DOWN(orig_addr); |
866 | unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; |
867 | int tlb_offset; |
868 | |
869 | if (orig_addr == INVALID_PHYS_ADDR) |
870 | return; |
871 | |
872 | /* |
873 | * It's valid for tlb_offset to be negative. This can happen when the |
874 | * "offset" returned by swiotlb_align_offset() is non-zero, and the |
875 | * tlb_addr is pointing within the first "offset" bytes of the second |
876 | * or subsequent slots of the allocated swiotlb area. While it's not |
877 | * valid for tlb_addr to be pointing within the first "offset" bytes |
878 | * of the first slot, there's no way to check for such an error since |
879 | * this function can't distinguish the first slot from the second and |
880 | * subsequent slots. |
881 | */ |
882 | tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) - |
883 | swiotlb_align_offset(dev, align_mask: 0, addr: orig_addr); |
884 | |
885 | orig_addr += tlb_offset; |
886 | alloc_size -= tlb_offset; |
887 | |
888 | if (size > alloc_size) { |
889 | dev_WARN_ONCE(dev, 1, |
890 | "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", |
891 | alloc_size, size); |
892 | size = alloc_size; |
893 | } |
894 | |
895 | if (PageHighMem(pfn_to_page(pfn))) { |
896 | unsigned int offset = orig_addr & ~PAGE_MASK; |
897 | struct page *page; |
898 | unsigned int sz = 0; |
899 | unsigned long flags; |
900 | |
901 | while (size) { |
902 | sz = min_t(size_t, PAGE_SIZE - offset, size); |
903 | |
904 | local_irq_save(flags); |
905 | page = pfn_to_page(pfn); |
906 | if (dir == DMA_TO_DEVICE) |
907 | memcpy_from_page(to: vaddr, page, offset, len: sz); |
908 | else |
909 | memcpy_to_page(page, offset, from: vaddr, len: sz); |
910 | local_irq_restore(flags); |
911 | |
912 | size -= sz; |
913 | pfn++; |
914 | vaddr += sz; |
915 | offset = 0; |
916 | } |
917 | } else if (dir == DMA_TO_DEVICE) { |
918 | memcpy(vaddr, phys_to_virt(orig_addr), size); |
919 | } else { |
920 | memcpy(phys_to_virt(orig_addr), vaddr, size); |
921 | } |
922 | } |
923 | |
924 | static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) |
925 | { |
926 | return start + (idx << IO_TLB_SHIFT); |
927 | } |
928 | |
929 | /* |
930 | * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. |
931 | */ |
932 | static inline unsigned long get_max_slots(unsigned long boundary_mask) |
933 | { |
934 | return (boundary_mask >> IO_TLB_SHIFT) + 1; |
935 | } |
936 | |
937 | static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index) |
938 | { |
939 | if (index >= mem->area_nslabs) |
940 | return 0; |
941 | return index; |
942 | } |
943 | |
944 | /* |
945 | * Track the total used slots with a global atomic value in order to have |
946 | * correct information to determine the high water mark. The mem_used() |
947 | * function gives imprecise results because there's no locking across |
948 | * multiple areas. |
949 | */ |
950 | #ifdef CONFIG_DEBUG_FS |
951 | static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) |
952 | { |
953 | unsigned long old_hiwater, new_used; |
954 | |
955 | new_used = atomic_long_add_return(i: nslots, v: &mem->total_used); |
956 | old_hiwater = atomic_long_read(v: &mem->used_hiwater); |
957 | do { |
958 | if (new_used <= old_hiwater) |
959 | break; |
960 | } while (!atomic_long_try_cmpxchg(v: &mem->used_hiwater, |
961 | old: &old_hiwater, new: new_used)); |
962 | } |
963 | |
964 | static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) |
965 | { |
966 | atomic_long_sub(i: nslots, v: &mem->total_used); |
967 | } |
968 | |
969 | #else /* !CONFIG_DEBUG_FS */ |
970 | static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) |
971 | { |
972 | } |
973 | static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) |
974 | { |
975 | } |
976 | #endif /* CONFIG_DEBUG_FS */ |
977 | |
978 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
979 | #ifdef CONFIG_DEBUG_FS |
980 | static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots) |
981 | { |
982 | atomic_long_add(i: nslots, v: &mem->transient_nslabs); |
983 | } |
984 | |
985 | static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots) |
986 | { |
987 | atomic_long_sub(i: nslots, v: &mem->transient_nslabs); |
988 | } |
989 | |
990 | #else /* !CONFIG_DEBUG_FS */ |
991 | static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots) |
992 | { |
993 | } |
994 | static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots) |
995 | { |
996 | } |
997 | #endif /* CONFIG_DEBUG_FS */ |
998 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
999 | |
1000 | /** |
1001 | * swiotlb_search_pool_area() - search one memory area in one pool |
1002 | * @dev: Device which maps the buffer. |
1003 | * @pool: Memory pool to be searched. |
1004 | * @area_index: Index of the IO TLB memory area to be searched. |
1005 | * @orig_addr: Original (non-bounced) IO buffer address. |
1006 | * @alloc_size: Total requested size of the bounce buffer, |
1007 | * including initial alignment padding. |
1008 | * @alloc_align_mask: Required alignment of the allocated buffer. |
1009 | * |
1010 | * Find a suitable sequence of IO TLB entries for the request and allocate |
1011 | * a buffer from the given IO TLB memory area. |
1012 | * This function takes care of locking. |
1013 | * |
1014 | * Return: Index of the first allocated slot, or -1 on error. |
1015 | */ |
1016 | static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool, |
1017 | int area_index, phys_addr_t orig_addr, size_t alloc_size, |
1018 | unsigned int alloc_align_mask) |
1019 | { |
1020 | struct io_tlb_area *area = pool->areas + area_index; |
1021 | unsigned long boundary_mask = dma_get_seg_boundary(dev); |
1022 | dma_addr_t tbl_dma_addr = |
1023 | phys_to_dma_unencrypted(dev, paddr: pool->start) & boundary_mask; |
1024 | unsigned long max_slots = get_max_slots(boundary_mask); |
1025 | unsigned int iotlb_align_mask = dma_get_min_align_mask(dev); |
1026 | unsigned int nslots = nr_slots(val: alloc_size), stride; |
1027 | unsigned int offset = swiotlb_align_offset(dev, align_mask: 0, addr: orig_addr); |
1028 | unsigned int index, slots_checked, count = 0, i; |
1029 | unsigned long flags; |
1030 | unsigned int slot_base; |
1031 | unsigned int slot_index; |
1032 | |
1033 | BUG_ON(!nslots); |
1034 | BUG_ON(area_index >= pool->nareas); |
1035 | |
1036 | /* |
1037 | * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be |
1038 | * page-aligned in the absence of any other alignment requirements. |
1039 | * 'alloc_align_mask' was later introduced to specify the alignment |
1040 | * explicitly, however this is passed as zero for streaming mappings |
1041 | * and so we preserve the old behaviour there in case any drivers are |
1042 | * relying on it. |
1043 | */ |
1044 | if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE) |
1045 | alloc_align_mask = PAGE_SIZE - 1; |
1046 | |
1047 | /* |
1048 | * Ensure that the allocation is at least slot-aligned and update |
1049 | * 'iotlb_align_mask' to ignore bits that will be preserved when |
1050 | * offsetting into the allocation. |
1051 | */ |
1052 | alloc_align_mask |= (IO_TLB_SIZE - 1); |
1053 | iotlb_align_mask &= ~alloc_align_mask; |
1054 | |
1055 | /* |
1056 | * For mappings with an alignment requirement don't bother looping to |
1057 | * unaligned slots once we found an aligned one. |
1058 | */ |
1059 | stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask)); |
1060 | |
1061 | spin_lock_irqsave(&area->lock, flags); |
1062 | if (unlikely(nslots > pool->area_nslabs - area->used)) |
1063 | goto not_found; |
1064 | |
1065 | slot_base = area_index * pool->area_nslabs; |
1066 | index = area->index; |
1067 | |
1068 | for (slots_checked = 0; slots_checked < pool->area_nslabs; ) { |
1069 | phys_addr_t tlb_addr; |
1070 | |
1071 | slot_index = slot_base + index; |
1072 | tlb_addr = slot_addr(start: tbl_dma_addr, idx: slot_index); |
1073 | |
1074 | if ((tlb_addr & alloc_align_mask) || |
1075 | (orig_addr && (tlb_addr & iotlb_align_mask) != |
1076 | (orig_addr & iotlb_align_mask))) { |
1077 | index = wrap_area_index(mem: pool, index: index + 1); |
1078 | slots_checked++; |
1079 | continue; |
1080 | } |
1081 | |
1082 | if (!iommu_is_span_boundary(index: slot_index, nr: nslots, |
1083 | shift: nr_slots(val: tbl_dma_addr), |
1084 | boundary_size: max_slots)) { |
1085 | if (pool->slots[slot_index].list >= nslots) |
1086 | goto found; |
1087 | } |
1088 | index = wrap_area_index(mem: pool, index: index + stride); |
1089 | slots_checked += stride; |
1090 | } |
1091 | |
1092 | not_found: |
1093 | spin_unlock_irqrestore(lock: &area->lock, flags); |
1094 | return -1; |
1095 | |
1096 | found: |
1097 | /* |
1098 | * If we find a slot that indicates we have 'nslots' number of |
1099 | * contiguous buffers, we allocate the buffers from that slot onwards |
1100 | * and set the list of free entries to '0' indicating unavailable. |
1101 | */ |
1102 | for (i = slot_index; i < slot_index + nslots; i++) { |
1103 | pool->slots[i].list = 0; |
1104 | pool->slots[i].alloc_size = alloc_size - (offset + |
1105 | ((i - slot_index) << IO_TLB_SHIFT)); |
1106 | } |
1107 | for (i = slot_index - 1; |
1108 | io_tlb_offset(val: i) != IO_TLB_SEGSIZE - 1 && |
1109 | pool->slots[i].list; i--) |
1110 | pool->slots[i].list = ++count; |
1111 | |
1112 | /* |
1113 | * Update the indices to avoid searching in the next round. |
1114 | */ |
1115 | area->index = wrap_area_index(mem: pool, index: index + nslots); |
1116 | area->used += nslots; |
1117 | spin_unlock_irqrestore(lock: &area->lock, flags); |
1118 | |
1119 | inc_used_and_hiwater(mem: dev->dma_io_tlb_mem, nslots); |
1120 | return slot_index; |
1121 | } |
1122 | |
1123 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1124 | |
1125 | /** |
1126 | * swiotlb_search_area() - search one memory area in all pools |
1127 | * @dev: Device which maps the buffer. |
1128 | * @start_cpu: Start CPU number. |
1129 | * @cpu_offset: Offset from @start_cpu. |
1130 | * @orig_addr: Original (non-bounced) IO buffer address. |
1131 | * @alloc_size: Total requested size of the bounce buffer, |
1132 | * including initial alignment padding. |
1133 | * @alloc_align_mask: Required alignment of the allocated buffer. |
1134 | * @retpool: Used memory pool, updated on return. |
1135 | * |
1136 | * Search one memory area in all pools for a sequence of slots that match the |
1137 | * allocation constraints. |
1138 | * |
1139 | * Return: Index of the first allocated slot, or -1 on error. |
1140 | */ |
1141 | static int swiotlb_search_area(struct device *dev, int start_cpu, |
1142 | int cpu_offset, phys_addr_t orig_addr, size_t alloc_size, |
1143 | unsigned int alloc_align_mask, struct io_tlb_pool **retpool) |
1144 | { |
1145 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1146 | struct io_tlb_pool *pool; |
1147 | int area_index; |
1148 | int index = -1; |
1149 | |
1150 | rcu_read_lock(); |
1151 | list_for_each_entry_rcu(pool, &mem->pools, node) { |
1152 | if (cpu_offset >= pool->nareas) |
1153 | continue; |
1154 | area_index = (start_cpu + cpu_offset) & (pool->nareas - 1); |
1155 | index = swiotlb_search_pool_area(dev, pool, area_index, |
1156 | orig_addr, alloc_size, |
1157 | alloc_align_mask); |
1158 | if (index >= 0) { |
1159 | *retpool = pool; |
1160 | break; |
1161 | } |
1162 | } |
1163 | rcu_read_unlock(); |
1164 | return index; |
1165 | } |
1166 | |
1167 | /** |
1168 | * swiotlb_find_slots() - search for slots in the whole swiotlb |
1169 | * @dev: Device which maps the buffer. |
1170 | * @orig_addr: Original (non-bounced) IO buffer address. |
1171 | * @alloc_size: Total requested size of the bounce buffer, |
1172 | * including initial alignment padding. |
1173 | * @alloc_align_mask: Required alignment of the allocated buffer. |
1174 | * @retpool: Used memory pool, updated on return. |
1175 | * |
1176 | * Search through the whole software IO TLB to find a sequence of slots that |
1177 | * match the allocation constraints. |
1178 | * |
1179 | * Return: Index of the first allocated slot, or -1 on error. |
1180 | */ |
1181 | static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, |
1182 | size_t alloc_size, unsigned int alloc_align_mask, |
1183 | struct io_tlb_pool **retpool) |
1184 | { |
1185 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1186 | struct io_tlb_pool *pool; |
1187 | unsigned long nslabs; |
1188 | unsigned long flags; |
1189 | u64 phys_limit; |
1190 | int cpu, i; |
1191 | int index; |
1192 | |
1193 | if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE) |
1194 | return -1; |
1195 | |
1196 | cpu = raw_smp_processor_id(); |
1197 | for (i = 0; i < default_nareas; ++i) { |
1198 | index = swiotlb_search_area(dev, start_cpu: cpu, cpu_offset: i, orig_addr, alloc_size, |
1199 | alloc_align_mask, retpool: &pool); |
1200 | if (index >= 0) |
1201 | goto found; |
1202 | } |
1203 | |
1204 | if (!mem->can_grow) |
1205 | return -1; |
1206 | |
1207 | schedule_work(work: &mem->dyn_alloc); |
1208 | |
1209 | nslabs = nr_slots(val: alloc_size); |
1210 | phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit); |
1211 | pool = swiotlb_alloc_pool(dev, minslabs: nslabs, nslabs, nareas: 1, phys_limit, |
1212 | GFP_NOWAIT | __GFP_NOWARN); |
1213 | if (!pool) |
1214 | return -1; |
1215 | |
1216 | index = swiotlb_search_pool_area(dev, pool, area_index: 0, orig_addr, |
1217 | alloc_size, alloc_align_mask); |
1218 | if (index < 0) { |
1219 | swiotlb_dyn_free(rcu: &pool->rcu); |
1220 | return -1; |
1221 | } |
1222 | |
1223 | pool->transient = true; |
1224 | spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); |
1225 | list_add_rcu(new: &pool->node, head: &dev->dma_io_tlb_pools); |
1226 | spin_unlock_irqrestore(lock: &dev->dma_io_tlb_lock, flags); |
1227 | inc_transient_used(mem, nslots: pool->nslabs); |
1228 | |
1229 | found: |
1230 | WRITE_ONCE(dev->dma_uses_io_tlb, true); |
1231 | |
1232 | /* |
1233 | * The general barrier orders reads and writes against a presumed store |
1234 | * of the SWIOTLB buffer address by a device driver (to a driver private |
1235 | * data structure). It serves two purposes. |
1236 | * |
1237 | * First, the store to dev->dma_uses_io_tlb must be ordered before the |
1238 | * presumed store. This guarantees that the returned buffer address |
1239 | * cannot be passed to another CPU before updating dev->dma_uses_io_tlb. |
1240 | * |
1241 | * Second, the load from mem->pools must be ordered before the same |
1242 | * presumed store. This guarantees that the returned buffer address |
1243 | * cannot be observed by another CPU before an update of the RCU list |
1244 | * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy |
1245 | * atomicity). |
1246 | * |
1247 | * See also the comment in swiotlb_find_pool(). |
1248 | */ |
1249 | smp_mb(); |
1250 | |
1251 | *retpool = pool; |
1252 | return index; |
1253 | } |
1254 | |
1255 | #else /* !CONFIG_SWIOTLB_DYNAMIC */ |
1256 | |
1257 | static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, |
1258 | size_t alloc_size, unsigned int alloc_align_mask, |
1259 | struct io_tlb_pool **retpool) |
1260 | { |
1261 | struct io_tlb_pool *pool; |
1262 | int start, i; |
1263 | int index; |
1264 | |
1265 | *retpool = pool = &dev->dma_io_tlb_mem->defpool; |
1266 | i = start = raw_smp_processor_id() & (pool->nareas - 1); |
1267 | do { |
1268 | index = swiotlb_search_pool_area(dev, pool, i, orig_addr, |
1269 | alloc_size, alloc_align_mask); |
1270 | if (index >= 0) |
1271 | return index; |
1272 | if (++i >= pool->nareas) |
1273 | i = 0; |
1274 | } while (i != start); |
1275 | return -1; |
1276 | } |
1277 | |
1278 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
1279 | |
1280 | #ifdef CONFIG_DEBUG_FS |
1281 | |
1282 | /** |
1283 | * mem_used() - get number of used slots in an allocator |
1284 | * @mem: Software IO TLB allocator. |
1285 | * |
1286 | * The result is accurate in this version of the function, because an atomic |
1287 | * counter is available if CONFIG_DEBUG_FS is set. |
1288 | * |
1289 | * Return: Number of used slots. |
1290 | */ |
1291 | static unsigned long mem_used(struct io_tlb_mem *mem) |
1292 | { |
1293 | return atomic_long_read(v: &mem->total_used); |
1294 | } |
1295 | |
1296 | #else /* !CONFIG_DEBUG_FS */ |
1297 | |
1298 | /** |
1299 | * mem_pool_used() - get number of used slots in a memory pool |
1300 | * @pool: Software IO TLB memory pool. |
1301 | * |
1302 | * The result is not accurate, see mem_used(). |
1303 | * |
1304 | * Return: Approximate number of used slots. |
1305 | */ |
1306 | static unsigned long mem_pool_used(struct io_tlb_pool *pool) |
1307 | { |
1308 | int i; |
1309 | unsigned long used = 0; |
1310 | |
1311 | for (i = 0; i < pool->nareas; i++) |
1312 | used += pool->areas[i].used; |
1313 | return used; |
1314 | } |
1315 | |
1316 | /** |
1317 | * mem_used() - get number of used slots in an allocator |
1318 | * @mem: Software IO TLB allocator. |
1319 | * |
1320 | * The result is not accurate, because there is no locking of individual |
1321 | * areas. |
1322 | * |
1323 | * Return: Approximate number of used slots. |
1324 | */ |
1325 | static unsigned long mem_used(struct io_tlb_mem *mem) |
1326 | { |
1327 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1328 | struct io_tlb_pool *pool; |
1329 | unsigned long used = 0; |
1330 | |
1331 | rcu_read_lock(); |
1332 | list_for_each_entry_rcu(pool, &mem->pools, node) |
1333 | used += mem_pool_used(pool); |
1334 | rcu_read_unlock(); |
1335 | |
1336 | return used; |
1337 | #else |
1338 | return mem_pool_used(&mem->defpool); |
1339 | #endif |
1340 | } |
1341 | |
1342 | #endif /* CONFIG_DEBUG_FS */ |
1343 | |
1344 | /** |
1345 | * swiotlb_tbl_map_single() - bounce buffer map a single contiguous physical area |
1346 | * @dev: Device which maps the buffer. |
1347 | * @orig_addr: Original (non-bounced) physical IO buffer address |
1348 | * @mapping_size: Requested size of the actual bounce buffer, excluding |
1349 | * any pre- or post-padding for alignment |
1350 | * @alloc_align_mask: Required start and end alignment of the allocated buffer |
1351 | * @dir: DMA direction |
1352 | * @attrs: Optional DMA attributes for the map operation |
1353 | * |
1354 | * Find and allocate a suitable sequence of IO TLB slots for the request. |
1355 | * The allocated space starts at an alignment specified by alloc_align_mask, |
1356 | * and the size of the allocated space is rounded up so that the total amount |
1357 | * of allocated space is a multiple of (alloc_align_mask + 1). If |
1358 | * alloc_align_mask is zero, the allocated space may be at any alignment and |
1359 | * the size is not rounded up. |
1360 | * |
1361 | * The returned address is within the allocated space and matches the bits |
1362 | * of orig_addr that are specified in the DMA min_align_mask for the device. As |
1363 | * such, this returned address may be offset from the beginning of the allocated |
1364 | * space. The bounce buffer space starting at the returned address for |
1365 | * mapping_size bytes is initialized to the contents of the original IO buffer |
1366 | * area. Any pre-padding (due to an offset) and any post-padding (due to |
1367 | * rounding-up the size) is not initialized. |
1368 | */ |
1369 | phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, |
1370 | size_t mapping_size, unsigned int alloc_align_mask, |
1371 | enum dma_data_direction dir, unsigned long attrs) |
1372 | { |
1373 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1374 | unsigned int offset; |
1375 | struct io_tlb_pool *pool; |
1376 | unsigned int i; |
1377 | size_t size; |
1378 | int index; |
1379 | phys_addr_t tlb_addr; |
1380 | unsigned short pad_slots; |
1381 | |
1382 | if (!mem || !mem->nslabs) { |
1383 | dev_warn_ratelimited(dev, |
1384 | "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); |
1385 | return (phys_addr_t)DMA_MAPPING_ERROR; |
1386 | } |
1387 | |
1388 | if (cc_platform_has(attr: CC_ATTR_MEM_ENCRYPT)) |
1389 | pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); |
1390 | |
1391 | /* |
1392 | * The default swiotlb memory pool is allocated with PAGE_SIZE |
1393 | * alignment. If a mapping is requested with larger alignment, |
1394 | * the mapping may be unable to use the initial slot(s) in all |
1395 | * sets of IO_TLB_SEGSIZE slots. In such case, a mapping request |
1396 | * of or near the maximum mapping size would always fail. |
1397 | */ |
1398 | dev_WARN_ONCE(dev, alloc_align_mask > ~PAGE_MASK, |
1399 | "Alloc alignment may prevent fulfilling requests with max mapping_size\n"); |
1400 | |
1401 | offset = swiotlb_align_offset(dev, align_mask: alloc_align_mask, addr: orig_addr); |
1402 | size = ALIGN(mapping_size + offset, alloc_align_mask + 1); |
1403 | index = swiotlb_find_slots(dev, orig_addr, alloc_size: size, alloc_align_mask, retpool: &pool); |
1404 | if (index == -1) { |
1405 | if (!(attrs & DMA_ATTR_NO_WARN)) |
1406 | dev_warn_ratelimited(dev, |
1407 | "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", |
1408 | size, mem->nslabs, mem_used(mem)); |
1409 | return (phys_addr_t)DMA_MAPPING_ERROR; |
1410 | } |
1411 | |
1412 | /* |
1413 | * If dma_skip_sync was set, reset it on first SWIOTLB buffer |
1414 | * mapping to always sync SWIOTLB buffers. |
1415 | */ |
1416 | dma_reset_need_sync(dev); |
1417 | |
1418 | /* |
1419 | * Save away the mapping from the original address to the DMA address. |
1420 | * This is needed when we sync the memory. Then we sync the buffer if |
1421 | * needed. |
1422 | */ |
1423 | pad_slots = offset >> IO_TLB_SHIFT; |
1424 | offset &= (IO_TLB_SIZE - 1); |
1425 | index += pad_slots; |
1426 | pool->slots[index].pad_slots = pad_slots; |
1427 | for (i = 0; i < (nr_slots(val: size) - pad_slots); i++) |
1428 | pool->slots[index + i].orig_addr = slot_addr(start: orig_addr, idx: i); |
1429 | tlb_addr = slot_addr(start: pool->start, idx: index) + offset; |
1430 | /* |
1431 | * When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy |
1432 | * the original buffer to the TLB buffer before initiating DMA in order |
1433 | * to preserve the original's data if the device does a partial write, |
1434 | * i.e. if the device doesn't overwrite the entire buffer. Preserving |
1435 | * the original data, even if it's garbage, is necessary to match |
1436 | * hardware behavior. Use of swiotlb is supposed to be transparent, |
1437 | * i.e. swiotlb must not corrupt memory by clobbering unwritten bytes. |
1438 | */ |
1439 | swiotlb_bounce(dev, tlb_addr, size: mapping_size, dir: DMA_TO_DEVICE, mem: pool); |
1440 | return tlb_addr; |
1441 | } |
1442 | |
1443 | static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr, |
1444 | struct io_tlb_pool *mem) |
1445 | { |
1446 | unsigned long flags; |
1447 | unsigned int offset = swiotlb_align_offset(dev, align_mask: 0, addr: tlb_addr); |
1448 | int index, nslots, aindex; |
1449 | struct io_tlb_area *area; |
1450 | int count, i; |
1451 | |
1452 | index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; |
1453 | index -= mem->slots[index].pad_slots; |
1454 | nslots = nr_slots(val: mem->slots[index].alloc_size + offset); |
1455 | aindex = index / mem->area_nslabs; |
1456 | area = &mem->areas[aindex]; |
1457 | |
1458 | /* |
1459 | * Return the buffer to the free list by setting the corresponding |
1460 | * entries to indicate the number of contiguous entries available. |
1461 | * While returning the entries to the free list, we merge the entries |
1462 | * with slots below and above the pool being returned. |
1463 | */ |
1464 | BUG_ON(aindex >= mem->nareas); |
1465 | |
1466 | spin_lock_irqsave(&area->lock, flags); |
1467 | if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) |
1468 | count = mem->slots[index + nslots].list; |
1469 | else |
1470 | count = 0; |
1471 | |
1472 | /* |
1473 | * Step 1: return the slots to the free list, merging the slots with |
1474 | * superceeding slots |
1475 | */ |
1476 | for (i = index + nslots - 1; i >= index; i--) { |
1477 | mem->slots[i].list = ++count; |
1478 | mem->slots[i].orig_addr = INVALID_PHYS_ADDR; |
1479 | mem->slots[i].alloc_size = 0; |
1480 | mem->slots[i].pad_slots = 0; |
1481 | } |
1482 | |
1483 | /* |
1484 | * Step 2: merge the returned slots with the preceding slots, if |
1485 | * available (non zero) |
1486 | */ |
1487 | for (i = index - 1; |
1488 | io_tlb_offset(val: i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; |
1489 | i--) |
1490 | mem->slots[i].list = ++count; |
1491 | area->used -= nslots; |
1492 | spin_unlock_irqrestore(lock: &area->lock, flags); |
1493 | |
1494 | dec_used(mem: dev->dma_io_tlb_mem, nslots); |
1495 | } |
1496 | |
1497 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1498 | |
1499 | /** |
1500 | * swiotlb_del_transient() - delete a transient memory pool |
1501 | * @dev: Device which mapped the buffer. |
1502 | * @tlb_addr: Physical address within a bounce buffer. |
1503 | * @pool: Pointer to the transient memory pool to be checked and deleted. |
1504 | * |
1505 | * Check whether the address belongs to a transient SWIOTLB memory pool. |
1506 | * If yes, then delete the pool. |
1507 | * |
1508 | * Return: %true if @tlb_addr belonged to a transient pool that was released. |
1509 | */ |
1510 | static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr, |
1511 | struct io_tlb_pool *pool) |
1512 | { |
1513 | if (!pool->transient) |
1514 | return false; |
1515 | |
1516 | dec_used(mem: dev->dma_io_tlb_mem, nslots: pool->nslabs); |
1517 | swiotlb_del_pool(dev, pool); |
1518 | dec_transient_used(mem: dev->dma_io_tlb_mem, nslots: pool->nslabs); |
1519 | return true; |
1520 | } |
1521 | |
1522 | #else /* !CONFIG_SWIOTLB_DYNAMIC */ |
1523 | |
1524 | static inline bool swiotlb_del_transient(struct device *dev, |
1525 | phys_addr_t tlb_addr, struct io_tlb_pool *pool) |
1526 | { |
1527 | return false; |
1528 | } |
1529 | |
1530 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
1531 | |
1532 | /* |
1533 | * tlb_addr is the physical address of the bounce buffer to unmap. |
1534 | */ |
1535 | void __swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, |
1536 | size_t mapping_size, enum dma_data_direction dir, |
1537 | unsigned long attrs, struct io_tlb_pool *pool) |
1538 | { |
1539 | /* |
1540 | * First, sync the memory before unmapping the entry |
1541 | */ |
1542 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && |
1543 | (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) |
1544 | swiotlb_bounce(dev, tlb_addr, size: mapping_size, |
1545 | dir: DMA_FROM_DEVICE, mem: pool); |
1546 | |
1547 | if (swiotlb_del_transient(dev, tlb_addr, pool)) |
1548 | return; |
1549 | swiotlb_release_slots(dev, tlb_addr, mem: pool); |
1550 | } |
1551 | |
1552 | void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, |
1553 | size_t size, enum dma_data_direction dir, |
1554 | struct io_tlb_pool *pool) |
1555 | { |
1556 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
1557 | swiotlb_bounce(dev, tlb_addr, size, dir: DMA_TO_DEVICE, mem: pool); |
1558 | else |
1559 | BUG_ON(dir != DMA_FROM_DEVICE); |
1560 | } |
1561 | |
1562 | void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, |
1563 | size_t size, enum dma_data_direction dir, |
1564 | struct io_tlb_pool *pool) |
1565 | { |
1566 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
1567 | swiotlb_bounce(dev, tlb_addr, size, dir: DMA_FROM_DEVICE, mem: pool); |
1568 | else |
1569 | BUG_ON(dir != DMA_TO_DEVICE); |
1570 | } |
1571 | |
1572 | /* |
1573 | * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing |
1574 | * to the device copy the data into it as well. |
1575 | */ |
1576 | dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, |
1577 | enum dma_data_direction dir, unsigned long attrs) |
1578 | { |
1579 | phys_addr_t swiotlb_addr; |
1580 | dma_addr_t dma_addr; |
1581 | |
1582 | trace_swiotlb_bounced(dev, dev_addr: phys_to_dma(dev, paddr), size); |
1583 | |
1584 | swiotlb_addr = swiotlb_tbl_map_single(dev, orig_addr: paddr, mapping_size: size, alloc_align_mask: 0, dir, attrs); |
1585 | if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) |
1586 | return DMA_MAPPING_ERROR; |
1587 | |
1588 | /* Ensure that the address returned is DMA'ble */ |
1589 | dma_addr = phys_to_dma_unencrypted(dev, paddr: swiotlb_addr); |
1590 | if (unlikely(!dma_capable(dev, dma_addr, size, true))) { |
1591 | __swiotlb_tbl_unmap_single(dev, tlb_addr: swiotlb_addr, mapping_size: size, dir, |
1592 | attrs: attrs | DMA_ATTR_SKIP_CPU_SYNC, |
1593 | pool: swiotlb_find_pool(dev, paddr: swiotlb_addr)); |
1594 | dev_WARN_ONCE(dev, 1, |
1595 | "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", |
1596 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); |
1597 | return DMA_MAPPING_ERROR; |
1598 | } |
1599 | |
1600 | if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
1601 | arch_sync_dma_for_device(paddr: swiotlb_addr, size, dir); |
1602 | return dma_addr; |
1603 | } |
1604 | |
1605 | size_t swiotlb_max_mapping_size(struct device *dev) |
1606 | { |
1607 | int min_align_mask = dma_get_min_align_mask(dev); |
1608 | int min_align = 0; |
1609 | |
1610 | /* |
1611 | * swiotlb_find_slots() skips slots according to |
1612 | * min align mask. This affects max mapping size. |
1613 | * Take it into acount here. |
1614 | */ |
1615 | if (min_align_mask) |
1616 | min_align = roundup(min_align_mask, IO_TLB_SIZE); |
1617 | |
1618 | return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; |
1619 | } |
1620 | |
1621 | /** |
1622 | * is_swiotlb_allocated() - check if the default software IO TLB is initialized |
1623 | */ |
1624 | bool is_swiotlb_allocated(void) |
1625 | { |
1626 | return io_tlb_default_mem.nslabs; |
1627 | } |
1628 | |
1629 | bool is_swiotlb_active(struct device *dev) |
1630 | { |
1631 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1632 | |
1633 | return mem && mem->nslabs; |
1634 | } |
1635 | |
1636 | /** |
1637 | * default_swiotlb_base() - get the base address of the default SWIOTLB |
1638 | * |
1639 | * Get the lowest physical address used by the default software IO TLB pool. |
1640 | */ |
1641 | phys_addr_t default_swiotlb_base(void) |
1642 | { |
1643 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1644 | io_tlb_default_mem.can_grow = false; |
1645 | #endif |
1646 | return io_tlb_default_mem.defpool.start; |
1647 | } |
1648 | |
1649 | /** |
1650 | * default_swiotlb_limit() - get the address limit of the default SWIOTLB |
1651 | * |
1652 | * Get the highest physical address used by the default software IO TLB pool. |
1653 | */ |
1654 | phys_addr_t default_swiotlb_limit(void) |
1655 | { |
1656 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1657 | return io_tlb_default_mem.phys_limit; |
1658 | #else |
1659 | return io_tlb_default_mem.defpool.end - 1; |
1660 | #endif |
1661 | } |
1662 | |
1663 | #ifdef CONFIG_DEBUG_FS |
1664 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1665 | static unsigned long mem_transient_used(struct io_tlb_mem *mem) |
1666 | { |
1667 | return atomic_long_read(v: &mem->transient_nslabs); |
1668 | } |
1669 | |
1670 | static int io_tlb_transient_used_get(void *data, u64 *val) |
1671 | { |
1672 | struct io_tlb_mem *mem = data; |
1673 | |
1674 | *val = mem_transient_used(mem); |
1675 | return 0; |
1676 | } |
1677 | |
1678 | DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_transient_used, io_tlb_transient_used_get, |
1679 | NULL, "%llu\n"); |
1680 | #endif /* CONFIG_SWIOTLB_DYNAMIC */ |
1681 | |
1682 | static int io_tlb_used_get(void *data, u64 *val) |
1683 | { |
1684 | struct io_tlb_mem *mem = data; |
1685 | |
1686 | *val = mem_used(mem); |
1687 | return 0; |
1688 | } |
1689 | |
1690 | static int io_tlb_hiwater_get(void *data, u64 *val) |
1691 | { |
1692 | struct io_tlb_mem *mem = data; |
1693 | |
1694 | *val = atomic_long_read(v: &mem->used_hiwater); |
1695 | return 0; |
1696 | } |
1697 | |
1698 | static int io_tlb_hiwater_set(void *data, u64 val) |
1699 | { |
1700 | struct io_tlb_mem *mem = data; |
1701 | |
1702 | /* Only allow setting to zero */ |
1703 | if (val != 0) |
1704 | return -EINVAL; |
1705 | |
1706 | atomic_long_set(v: &mem->used_hiwater, i: val); |
1707 | return 0; |
1708 | } |
1709 | |
1710 | DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n"); |
1711 | DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get, |
1712 | io_tlb_hiwater_set, "%llu\n"); |
1713 | |
1714 | static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, |
1715 | const char *dirname) |
1716 | { |
1717 | mem->debugfs = debugfs_create_dir(name: dirname, parent: io_tlb_default_mem.debugfs); |
1718 | if (!mem->nslabs) |
1719 | return; |
1720 | |
1721 | debugfs_create_ulong(name: "io_tlb_nslabs", mode: 0400, parent: mem->debugfs, value: &mem->nslabs); |
1722 | debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem, |
1723 | &fops_io_tlb_used); |
1724 | debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem, |
1725 | &fops_io_tlb_hiwater); |
1726 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1727 | debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs, |
1728 | mem, &fops_io_tlb_transient_used); |
1729 | #endif |
1730 | } |
1731 | |
1732 | static int __init swiotlb_create_default_debugfs(void) |
1733 | { |
1734 | swiotlb_create_debugfs_files(mem: &io_tlb_default_mem, dirname: "swiotlb"); |
1735 | return 0; |
1736 | } |
1737 | |
1738 | late_initcall(swiotlb_create_default_debugfs); |
1739 | |
1740 | #else /* !CONFIG_DEBUG_FS */ |
1741 | |
1742 | static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, |
1743 | const char *dirname) |
1744 | { |
1745 | } |
1746 | |
1747 | #endif /* CONFIG_DEBUG_FS */ |
1748 | |
1749 | #ifdef CONFIG_DMA_RESTRICTED_POOL |
1750 | |
1751 | struct page *swiotlb_alloc(struct device *dev, size_t size) |
1752 | { |
1753 | struct io_tlb_mem *mem = dev->dma_io_tlb_mem; |
1754 | struct io_tlb_pool *pool; |
1755 | phys_addr_t tlb_addr; |
1756 | unsigned int align; |
1757 | int index; |
1758 | |
1759 | if (!mem) |
1760 | return NULL; |
1761 | |
1762 | align = (1 << (get_order(size) + PAGE_SHIFT)) - 1; |
1763 | index = swiotlb_find_slots(dev, orig_addr: 0, alloc_size: size, alloc_align_mask: align, retpool: &pool); |
1764 | if (index == -1) |
1765 | return NULL; |
1766 | |
1767 | tlb_addr = slot_addr(start: pool->start, idx: index); |
1768 | if (unlikely(!PAGE_ALIGNED(tlb_addr))) { |
1769 | dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n", |
1770 | &tlb_addr); |
1771 | swiotlb_release_slots(dev, tlb_addr, mem: pool); |
1772 | return NULL; |
1773 | } |
1774 | |
1775 | return pfn_to_page(PFN_DOWN(tlb_addr)); |
1776 | } |
1777 | |
1778 | bool swiotlb_free(struct device *dev, struct page *page, size_t size) |
1779 | { |
1780 | phys_addr_t tlb_addr = page_to_phys(page); |
1781 | struct io_tlb_pool *pool; |
1782 | |
1783 | pool = swiotlb_find_pool(dev, paddr: tlb_addr); |
1784 | if (!pool) |
1785 | return false; |
1786 | |
1787 | swiotlb_release_slots(dev, tlb_addr, mem: pool); |
1788 | |
1789 | return true; |
1790 | } |
1791 | |
1792 | static int rmem_swiotlb_device_init(struct reserved_mem *rmem, |
1793 | struct device *dev) |
1794 | { |
1795 | struct io_tlb_mem *mem = rmem->priv; |
1796 | unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; |
1797 | |
1798 | /* Set Per-device io tlb area to one */ |
1799 | unsigned int nareas = 1; |
1800 | |
1801 | if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { |
1802 | dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping."); |
1803 | return -EINVAL; |
1804 | } |
1805 | |
1806 | /* |
1807 | * Since multiple devices can share the same pool, the private data, |
1808 | * io_tlb_mem struct, will be initialized by the first device attached |
1809 | * to it. |
1810 | */ |
1811 | if (!mem) { |
1812 | struct io_tlb_pool *pool; |
1813 | |
1814 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
1815 | if (!mem) |
1816 | return -ENOMEM; |
1817 | pool = &mem->defpool; |
1818 | |
1819 | pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL); |
1820 | if (!pool->slots) { |
1821 | kfree(objp: mem); |
1822 | return -ENOMEM; |
1823 | } |
1824 | |
1825 | pool->areas = kcalloc(nareas, sizeof(*pool->areas), |
1826 | GFP_KERNEL); |
1827 | if (!pool->areas) { |
1828 | kfree(objp: pool->slots); |
1829 | kfree(objp: mem); |
1830 | return -ENOMEM; |
1831 | } |
1832 | |
1833 | set_memory_decrypted(addr: (unsigned long)phys_to_virt(address: rmem->base), |
1834 | numpages: rmem->size >> PAGE_SHIFT); |
1835 | swiotlb_init_io_tlb_pool(mem: pool, start: rmem->base, nslabs, |
1836 | late_alloc: false, nareas); |
1837 | mem->force_bounce = true; |
1838 | mem->for_alloc = true; |
1839 | #ifdef CONFIG_SWIOTLB_DYNAMIC |
1840 | spin_lock_init(&mem->lock); |
1841 | INIT_LIST_HEAD_RCU(list: &mem->pools); |
1842 | #endif |
1843 | add_mem_pool(mem, pool); |
1844 | |
1845 | rmem->priv = mem; |
1846 | |
1847 | swiotlb_create_debugfs_files(mem, dirname: rmem->name); |
1848 | } |
1849 | |
1850 | dev->dma_io_tlb_mem = mem; |
1851 | |
1852 | return 0; |
1853 | } |
1854 | |
1855 | static void rmem_swiotlb_device_release(struct reserved_mem *rmem, |
1856 | struct device *dev) |
1857 | { |
1858 | dev->dma_io_tlb_mem = &io_tlb_default_mem; |
1859 | } |
1860 | |
1861 | static const struct reserved_mem_ops rmem_swiotlb_ops = { |
1862 | .device_init = rmem_swiotlb_device_init, |
1863 | .device_release = rmem_swiotlb_device_release, |
1864 | }; |
1865 | |
1866 | static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) |
1867 | { |
1868 | unsigned long node = rmem->fdt_node; |
1869 | |
1870 | if (of_get_flat_dt_prop(node, name: "reusable", NULL) || |
1871 | of_get_flat_dt_prop(node, name: "linux,cma-default", NULL) || |
1872 | of_get_flat_dt_prop(node, name: "linux,dma-default", NULL) || |
1873 | of_get_flat_dt_prop(node, name: "no-map", NULL)) |
1874 | return -EINVAL; |
1875 | |
1876 | rmem->ops = &rmem_swiotlb_ops; |
1877 | pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", |
1878 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
1879 | return 0; |
1880 | } |
1881 | |
1882 | RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); |
1883 | #endif /* CONFIG_DMA_RESTRICTED_POOL */ |
1884 |
Definitions
- io_tlb_slot
- swiotlb_force_bounce
- swiotlb_force_disable
- io_tlb_default_mem
- default_nslabs
- default_nareas
- io_tlb_area
- round_up_default_nslabs
- swiotlb_adjust_nareas
- limit_nareas
- setup_io_tlb_npages
- swiotlb_size_or_default
- swiotlb_adjust_size
- swiotlb_print_info
- io_tlb_offset
- nr_slots
- swiotlb_update_mem_attributes
- swiotlb_init_io_tlb_pool
- add_mem_pool
- swiotlb_memblock_alloc
- swiotlb_init_remap
- swiotlb_init
- swiotlb_init_late
- swiotlb_exit
- alloc_dma_pages
- swiotlb_alloc_tlb
- swiotlb_free_tlb
- swiotlb_alloc_pool
- swiotlb_dyn_alloc
- swiotlb_dyn_free
- __swiotlb_find_pool
- swiotlb_del_pool
- swiotlb_dev_init
- swiotlb_align_offset
- swiotlb_bounce
- slot_addr
- get_max_slots
- wrap_area_index
- inc_used_and_hiwater
- dec_used
- inc_transient_used
- dec_transient_used
- swiotlb_search_pool_area
- swiotlb_search_area
- swiotlb_find_slots
- mem_used
- swiotlb_tbl_map_single
- swiotlb_release_slots
- swiotlb_del_transient
- __swiotlb_tbl_unmap_single
- __swiotlb_sync_single_for_device
- __swiotlb_sync_single_for_cpu
- swiotlb_map
- swiotlb_max_mapping_size
- is_swiotlb_allocated
- is_swiotlb_active
- default_swiotlb_base
- default_swiotlb_limit
- mem_transient_used
- io_tlb_transient_used_get
- fops_io_tlb_transient_used
- io_tlb_used_get
- io_tlb_hiwater_get
- io_tlb_hiwater_set
- fops_io_tlb_used
- fops_io_tlb_hiwater
- swiotlb_create_debugfs_files
- swiotlb_create_default_debugfs
- swiotlb_alloc
- swiotlb_free
- rmem_swiotlb_device_init
- rmem_swiotlb_device_release
- rmem_swiotlb_ops
Improve your Profiling and Debugging skills
Find out more