1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * arch-independent dma-mapping routines |
4 | * |
5 | * Copyright (c) 2006 SUSE Linux Products GmbH |
6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> |
7 | */ |
8 | #include <linux/memblock.h> /* for max_pfn */ |
9 | #include <linux/acpi.h> |
10 | #include <linux/dma-map-ops.h> |
11 | #include <linux/export.h> |
12 | #include <linux/gfp.h> |
13 | #include <linux/kmsan.h> |
14 | #include <linux/of_device.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/vmalloc.h> |
17 | #include "debug.h" |
18 | #include "direct.h" |
19 | |
20 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
21 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
22 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) |
23 | bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT); |
24 | #endif |
25 | |
26 | /* |
27 | * Managed DMA API |
28 | */ |
29 | struct dma_devres { |
30 | size_t size; |
31 | void *vaddr; |
32 | dma_addr_t dma_handle; |
33 | unsigned long attrs; |
34 | }; |
35 | |
36 | static void dmam_release(struct device *dev, void *res) |
37 | { |
38 | struct dma_devres *this = res; |
39 | |
40 | dma_free_attrs(dev, size: this->size, cpu_addr: this->vaddr, dma_handle: this->dma_handle, |
41 | attrs: this->attrs); |
42 | } |
43 | |
44 | static int dmam_match(struct device *dev, void *res, void *match_data) |
45 | { |
46 | struct dma_devres *this = res, *match = match_data; |
47 | |
48 | if (this->vaddr == match->vaddr) { |
49 | WARN_ON(this->size != match->size || |
50 | this->dma_handle != match->dma_handle); |
51 | return 1; |
52 | } |
53 | return 0; |
54 | } |
55 | |
56 | /** |
57 | * dmam_free_coherent - Managed dma_free_coherent() |
58 | * @dev: Device to free coherent memory for |
59 | * @size: Size of allocation |
60 | * @vaddr: Virtual address of the memory to free |
61 | * @dma_handle: DMA handle of the memory to free |
62 | * |
63 | * Managed dma_free_coherent(). |
64 | */ |
65 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
66 | dma_addr_t dma_handle) |
67 | { |
68 | struct dma_devres match_data = { size, vaddr, dma_handle }; |
69 | |
70 | dma_free_coherent(dev, size, cpu_addr: vaddr, dma_handle); |
71 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
72 | } |
73 | EXPORT_SYMBOL(dmam_free_coherent); |
74 | |
75 | /** |
76 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
77 | * @dev: Device to allocate non_coherent memory for |
78 | * @size: Size of allocation |
79 | * @dma_handle: Out argument for allocated DMA handle |
80 | * @gfp: Allocation flags |
81 | * @attrs: Flags in the DMA_ATTR_* namespace. |
82 | * |
83 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
84 | * automatically released on driver detach. |
85 | * |
86 | * RETURNS: |
87 | * Pointer to allocated memory on success, NULL on failure. |
88 | */ |
89 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
90 | gfp_t gfp, unsigned long attrs) |
91 | { |
92 | struct dma_devres *dr; |
93 | void *vaddr; |
94 | |
95 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
96 | if (!dr) |
97 | return NULL; |
98 | |
99 | vaddr = dma_alloc_attrs(dev, size, dma_handle, flag: gfp, attrs); |
100 | if (!vaddr) { |
101 | devres_free(res: dr); |
102 | return NULL; |
103 | } |
104 | |
105 | dr->vaddr = vaddr; |
106 | dr->dma_handle = *dma_handle; |
107 | dr->size = size; |
108 | dr->attrs = attrs; |
109 | |
110 | devres_add(dev, res: dr); |
111 | |
112 | return vaddr; |
113 | } |
114 | EXPORT_SYMBOL(dmam_alloc_attrs); |
115 | |
116 | static bool dma_go_direct(struct device *dev, dma_addr_t mask, |
117 | const struct dma_map_ops *ops) |
118 | { |
119 | if (likely(!ops)) |
120 | return true; |
121 | #ifdef CONFIG_DMA_OPS_BYPASS |
122 | if (dev->dma_ops_bypass) |
123 | return min_not_zero(mask, dev->bus_dma_limit) >= |
124 | dma_direct_get_required_mask(dev); |
125 | #endif |
126 | return false; |
127 | } |
128 | |
129 | |
130 | /* |
131 | * Check if the devices uses a direct mapping for streaming DMA operations. |
132 | * This allows IOMMU drivers to set a bypass mode if the DMA mask is large |
133 | * enough. |
134 | */ |
135 | static inline bool dma_alloc_direct(struct device *dev, |
136 | const struct dma_map_ops *ops) |
137 | { |
138 | return dma_go_direct(dev, mask: dev->coherent_dma_mask, ops); |
139 | } |
140 | |
141 | static inline bool dma_map_direct(struct device *dev, |
142 | const struct dma_map_ops *ops) |
143 | { |
144 | return dma_go_direct(dev, mask: *dev->dma_mask, ops); |
145 | } |
146 | |
147 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
148 | size_t offset, size_t size, enum dma_data_direction dir, |
149 | unsigned long attrs) |
150 | { |
151 | const struct dma_map_ops *ops = get_dma_ops(dev); |
152 | dma_addr_t addr; |
153 | |
154 | BUG_ON(!valid_dma_direction(dir)); |
155 | |
156 | if (WARN_ON_ONCE(!dev->dma_mask)) |
157 | return DMA_MAPPING_ERROR; |
158 | |
159 | if (dma_map_direct(dev, ops) || |
160 | arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) |
161 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); |
162 | else |
163 | addr = ops->map_page(dev, page, offset, size, dir, attrs); |
164 | kmsan_handle_dma(page, offset, size, dir); |
165 | debug_dma_map_page(dev, page, offset, size, direction: dir, dma_addr: addr, attrs); |
166 | |
167 | return addr; |
168 | } |
169 | EXPORT_SYMBOL(dma_map_page_attrs); |
170 | |
171 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, |
172 | enum dma_data_direction dir, unsigned long attrs) |
173 | { |
174 | const struct dma_map_ops *ops = get_dma_ops(dev); |
175 | |
176 | BUG_ON(!valid_dma_direction(dir)); |
177 | if (dma_map_direct(dev, ops) || |
178 | arch_dma_unmap_page_direct(dev, addr + size)) |
179 | dma_direct_unmap_page(dev, addr, size, dir, attrs); |
180 | else if (ops->unmap_page) |
181 | ops->unmap_page(dev, addr, size, dir, attrs); |
182 | debug_dma_unmap_page(dev, addr, size, direction: dir); |
183 | } |
184 | EXPORT_SYMBOL(dma_unmap_page_attrs); |
185 | |
186 | static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
187 | int nents, enum dma_data_direction dir, unsigned long attrs) |
188 | { |
189 | const struct dma_map_ops *ops = get_dma_ops(dev); |
190 | int ents; |
191 | |
192 | BUG_ON(!valid_dma_direction(dir)); |
193 | |
194 | if (WARN_ON_ONCE(!dev->dma_mask)) |
195 | return 0; |
196 | |
197 | if (dma_map_direct(dev, ops) || |
198 | arch_dma_map_sg_direct(dev, sg, nents)) |
199 | ents = dma_direct_map_sg(dev, sgl: sg, nents, dir, attrs); |
200 | else |
201 | ents = ops->map_sg(dev, sg, nents, dir, attrs); |
202 | |
203 | if (ents > 0) { |
204 | kmsan_handle_dma_sg(sg, nents, dir); |
205 | debug_dma_map_sg(dev, sg, nents, mapped_ents: ents, direction: dir, attrs); |
206 | } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && |
207 | ents != -EIO && ents != -EREMOTEIO)) { |
208 | return -EIO; |
209 | } |
210 | |
211 | return ents; |
212 | } |
213 | |
214 | /** |
215 | * dma_map_sg_attrs - Map the given buffer for DMA |
216 | * @dev: The device for which to perform the DMA operation |
217 | * @sg: The sg_table object describing the buffer |
218 | * @nents: Number of entries to map |
219 | * @dir: DMA direction |
220 | * @attrs: Optional DMA attributes for the map operation |
221 | * |
222 | * Maps a buffer described by a scatterlist passed in the sg argument with |
223 | * nents segments for the @dir DMA operation by the @dev device. |
224 | * |
225 | * Returns the number of mapped entries (which can be less than nents) |
226 | * on success. Zero is returned for any error. |
227 | * |
228 | * dma_unmap_sg_attrs() should be used to unmap the buffer with the |
229 | * original sg and original nents (not the value returned by this funciton). |
230 | */ |
231 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
232 | int nents, enum dma_data_direction dir, unsigned long attrs) |
233 | { |
234 | int ret; |
235 | |
236 | ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); |
237 | if (ret < 0) |
238 | return 0; |
239 | return ret; |
240 | } |
241 | EXPORT_SYMBOL(dma_map_sg_attrs); |
242 | |
243 | /** |
244 | * dma_map_sgtable - Map the given buffer for DMA |
245 | * @dev: The device for which to perform the DMA operation |
246 | * @sgt: The sg_table object describing the buffer |
247 | * @dir: DMA direction |
248 | * @attrs: Optional DMA attributes for the map operation |
249 | * |
250 | * Maps a buffer described by a scatterlist stored in the given sg_table |
251 | * object for the @dir DMA operation by the @dev device. After success, the |
252 | * ownership for the buffer is transferred to the DMA domain. One has to |
253 | * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the |
254 | * ownership of the buffer back to the CPU domain before touching the |
255 | * buffer by the CPU. |
256 | * |
257 | * Returns 0 on success or a negative error code on error. The following |
258 | * error codes are supported with the given meaning: |
259 | * |
260 | * -EINVAL An invalid argument, unaligned access or other error |
261 | * in usage. Will not succeed if retried. |
262 | * -ENOMEM Insufficient resources (like memory or IOVA space) to |
263 | * complete the mapping. Should succeed if retried later. |
264 | * -EIO Legacy error code with an unknown meaning. eg. this is |
265 | * returned if a lower level call returned |
266 | * DMA_MAPPING_ERROR. |
267 | * -EREMOTEIO The DMA device cannot access P2PDMA memory specified |
268 | * in the sg_table. This will not succeed if retried. |
269 | */ |
270 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
271 | enum dma_data_direction dir, unsigned long attrs) |
272 | { |
273 | int nents; |
274 | |
275 | nents = __dma_map_sg_attrs(dev, sg: sgt->sgl, nents: sgt->orig_nents, dir, attrs); |
276 | if (nents < 0) |
277 | return nents; |
278 | sgt->nents = nents; |
279 | return 0; |
280 | } |
281 | EXPORT_SYMBOL_GPL(dma_map_sgtable); |
282 | |
283 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
284 | int nents, enum dma_data_direction dir, |
285 | unsigned long attrs) |
286 | { |
287 | const struct dma_map_ops *ops = get_dma_ops(dev); |
288 | |
289 | BUG_ON(!valid_dma_direction(dir)); |
290 | debug_dma_unmap_sg(dev, sglist: sg, nelems: nents, dir); |
291 | if (dma_map_direct(dev, ops) || |
292 | arch_dma_unmap_sg_direct(dev, sg, nents)) |
293 | dma_direct_unmap_sg(dev, sgl: sg, nents, dir, attrs); |
294 | else if (ops->unmap_sg) |
295 | ops->unmap_sg(dev, sg, nents, dir, attrs); |
296 | } |
297 | EXPORT_SYMBOL(dma_unmap_sg_attrs); |
298 | |
299 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, |
300 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
301 | { |
302 | const struct dma_map_ops *ops = get_dma_ops(dev); |
303 | dma_addr_t addr = DMA_MAPPING_ERROR; |
304 | |
305 | BUG_ON(!valid_dma_direction(dir)); |
306 | |
307 | if (WARN_ON_ONCE(!dev->dma_mask)) |
308 | return DMA_MAPPING_ERROR; |
309 | |
310 | if (dma_map_direct(dev, ops)) |
311 | addr = dma_direct_map_resource(dev, paddr: phys_addr, size, dir, attrs); |
312 | else if (ops->map_resource) |
313 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); |
314 | |
315 | debug_dma_map_resource(dev, addr: phys_addr, size, direction: dir, dma_addr: addr, attrs); |
316 | return addr; |
317 | } |
318 | EXPORT_SYMBOL(dma_map_resource); |
319 | |
320 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, |
321 | enum dma_data_direction dir, unsigned long attrs) |
322 | { |
323 | const struct dma_map_ops *ops = get_dma_ops(dev); |
324 | |
325 | BUG_ON(!valid_dma_direction(dir)); |
326 | if (!dma_map_direct(dev, ops) && ops->unmap_resource) |
327 | ops->unmap_resource(dev, addr, size, dir, attrs); |
328 | debug_dma_unmap_resource(dev, dma_addr: addr, size, direction: dir); |
329 | } |
330 | EXPORT_SYMBOL(dma_unmap_resource); |
331 | |
332 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
333 | enum dma_data_direction dir) |
334 | { |
335 | const struct dma_map_ops *ops = get_dma_ops(dev); |
336 | |
337 | BUG_ON(!valid_dma_direction(dir)); |
338 | if (dma_map_direct(dev, ops)) |
339 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
340 | else if (ops->sync_single_for_cpu) |
341 | ops->sync_single_for_cpu(dev, addr, size, dir); |
342 | debug_dma_sync_single_for_cpu(dev, dma_handle: addr, size, direction: dir); |
343 | } |
344 | EXPORT_SYMBOL(dma_sync_single_for_cpu); |
345 | |
346 | void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
347 | size_t size, enum dma_data_direction dir) |
348 | { |
349 | const struct dma_map_ops *ops = get_dma_ops(dev); |
350 | |
351 | BUG_ON(!valid_dma_direction(dir)); |
352 | if (dma_map_direct(dev, ops)) |
353 | dma_direct_sync_single_for_device(dev, addr, size, dir); |
354 | else if (ops->sync_single_for_device) |
355 | ops->sync_single_for_device(dev, addr, size, dir); |
356 | debug_dma_sync_single_for_device(dev, dma_handle: addr, size, direction: dir); |
357 | } |
358 | EXPORT_SYMBOL(dma_sync_single_for_device); |
359 | |
360 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
361 | int nelems, enum dma_data_direction dir) |
362 | { |
363 | const struct dma_map_ops *ops = get_dma_ops(dev); |
364 | |
365 | BUG_ON(!valid_dma_direction(dir)); |
366 | if (dma_map_direct(dev, ops)) |
367 | dma_direct_sync_sg_for_cpu(dev, sgl: sg, nents: nelems, dir); |
368 | else if (ops->sync_sg_for_cpu) |
369 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
370 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, direction: dir); |
371 | } |
372 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
373 | |
374 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
375 | int nelems, enum dma_data_direction dir) |
376 | { |
377 | const struct dma_map_ops *ops = get_dma_ops(dev); |
378 | |
379 | BUG_ON(!valid_dma_direction(dir)); |
380 | if (dma_map_direct(dev, ops)) |
381 | dma_direct_sync_sg_for_device(dev, sgl: sg, nents: nelems, dir); |
382 | else if (ops->sync_sg_for_device) |
383 | ops->sync_sg_for_device(dev, sg, nelems, dir); |
384 | debug_dma_sync_sg_for_device(dev, sg, nelems, direction: dir); |
385 | } |
386 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
387 | |
388 | /* |
389 | * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems |
390 | * that the intention is to allow exporting memory allocated via the |
391 | * coherent DMA APIs through the dma_buf API, which only accepts a |
392 | * scattertable. This presents a couple of problems: |
393 | * 1. Not all memory allocated via the coherent DMA APIs is backed by |
394 | * a struct page |
395 | * 2. Passing coherent DMA memory into the streaming APIs is not allowed |
396 | * as we will try to flush the memory through a different alias to that |
397 | * actually being used (and the flushes are redundant.) |
398 | */ |
399 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
400 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
401 | unsigned long attrs) |
402 | { |
403 | const struct dma_map_ops *ops = get_dma_ops(dev); |
404 | |
405 | if (dma_alloc_direct(dev, ops)) |
406 | return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, |
407 | size, attrs); |
408 | if (!ops->get_sgtable) |
409 | return -ENXIO; |
410 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); |
411 | } |
412 | EXPORT_SYMBOL(dma_get_sgtable_attrs); |
413 | |
414 | #ifdef CONFIG_MMU |
415 | /* |
416 | * Return the page attributes used for mapping dma_alloc_* memory, either in |
417 | * kernel space if remapping is needed, or to userspace through dma_mmap_*. |
418 | */ |
419 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) |
420 | { |
421 | if (dev_is_dma_coherent(dev)) |
422 | return prot; |
423 | #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE |
424 | if (attrs & DMA_ATTR_WRITE_COMBINE) |
425 | return pgprot_writecombine(prot); |
426 | #endif |
427 | return pgprot_dmacoherent(prot); |
428 | } |
429 | #endif /* CONFIG_MMU */ |
430 | |
431 | /** |
432 | * dma_can_mmap - check if a given device supports dma_mmap_* |
433 | * @dev: device to check |
434 | * |
435 | * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to |
436 | * map DMA allocations to userspace. |
437 | */ |
438 | bool dma_can_mmap(struct device *dev) |
439 | { |
440 | const struct dma_map_ops *ops = get_dma_ops(dev); |
441 | |
442 | if (dma_alloc_direct(dev, ops)) |
443 | return dma_direct_can_mmap(dev); |
444 | return ops->mmap != NULL; |
445 | } |
446 | EXPORT_SYMBOL_GPL(dma_can_mmap); |
447 | |
448 | /** |
449 | * dma_mmap_attrs - map a coherent DMA allocation into user space |
450 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
451 | * @vma: vm_area_struct describing requested user mapping |
452 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs |
453 | * @dma_addr: device-view address returned from dma_alloc_attrs |
454 | * @size: size of memory originally requested in dma_alloc_attrs |
455 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs |
456 | * |
457 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user |
458 | * space. The coherent DMA buffer must not be freed by the driver until the |
459 | * user space mapping has been released. |
460 | */ |
461 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
462 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
463 | unsigned long attrs) |
464 | { |
465 | const struct dma_map_ops *ops = get_dma_ops(dev); |
466 | |
467 | if (dma_alloc_direct(dev, ops)) |
468 | return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, |
469 | attrs); |
470 | if (!ops->mmap) |
471 | return -ENXIO; |
472 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
473 | } |
474 | EXPORT_SYMBOL(dma_mmap_attrs); |
475 | |
476 | u64 dma_get_required_mask(struct device *dev) |
477 | { |
478 | const struct dma_map_ops *ops = get_dma_ops(dev); |
479 | |
480 | if (dma_alloc_direct(dev, ops)) |
481 | return dma_direct_get_required_mask(dev); |
482 | if (ops->get_required_mask) |
483 | return ops->get_required_mask(dev); |
484 | |
485 | /* |
486 | * We require every DMA ops implementation to at least support a 32-bit |
487 | * DMA mask (and use bounce buffering if that isn't supported in |
488 | * hardware). As the direct mapping code has its own routine to |
489 | * actually report an optimal mask we default to 32-bit here as that |
490 | * is the right thing for most IOMMUs, and at least not actively |
491 | * harmful in general. |
492 | */ |
493 | return DMA_BIT_MASK(32); |
494 | } |
495 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
496 | |
497 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
498 | gfp_t flag, unsigned long attrs) |
499 | { |
500 | const struct dma_map_ops *ops = get_dma_ops(dev); |
501 | void *cpu_addr; |
502 | |
503 | WARN_ON_ONCE(!dev->coherent_dma_mask); |
504 | |
505 | /* |
506 | * DMA allocations can never be turned back into a page pointer, so |
507 | * requesting compound pages doesn't make sense (and can't even be |
508 | * supported at all by various backends). |
509 | */ |
510 | if (WARN_ON_ONCE(flag & __GFP_COMP)) |
511 | return NULL; |
512 | |
513 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, ret: &cpu_addr)) |
514 | return cpu_addr; |
515 | |
516 | /* let the implementation decide on the zone to allocate from: */ |
517 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
518 | |
519 | if (dma_alloc_direct(dev, ops)) |
520 | cpu_addr = dma_direct_alloc(dev, size, dma_handle, gfp: flag, attrs); |
521 | else if (ops->alloc) |
522 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
523 | else |
524 | return NULL; |
525 | |
526 | debug_dma_alloc_coherent(dev, size, dma_addr: *dma_handle, virt: cpu_addr, attrs); |
527 | return cpu_addr; |
528 | } |
529 | EXPORT_SYMBOL(dma_alloc_attrs); |
530 | |
531 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
532 | dma_addr_t dma_handle, unsigned long attrs) |
533 | { |
534 | const struct dma_map_ops *ops = get_dma_ops(dev); |
535 | |
536 | if (dma_release_from_dev_coherent(dev, order: get_order(size), vaddr: cpu_addr)) |
537 | return; |
538 | /* |
539 | * On non-coherent platforms which implement DMA-coherent buffers via |
540 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting |
541 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to |
542 | * sleep on some machines, and b) an indication that the driver is |
543 | * probably misusing the coherent API anyway. |
544 | */ |
545 | WARN_ON(irqs_disabled()); |
546 | |
547 | if (!cpu_addr) |
548 | return; |
549 | |
550 | debug_dma_free_coherent(dev, size, virt: cpu_addr, addr: dma_handle); |
551 | if (dma_alloc_direct(dev, ops)) |
552 | dma_direct_free(dev, size, cpu_addr, dma_addr: dma_handle, attrs); |
553 | else if (ops->free) |
554 | ops->free(dev, size, cpu_addr, dma_handle, attrs); |
555 | } |
556 | EXPORT_SYMBOL(dma_free_attrs); |
557 | |
558 | static struct page *__dma_alloc_pages(struct device *dev, size_t size, |
559 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
560 | { |
561 | const struct dma_map_ops *ops = get_dma_ops(dev); |
562 | |
563 | if (WARN_ON_ONCE(!dev->coherent_dma_mask)) |
564 | return NULL; |
565 | if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) |
566 | return NULL; |
567 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) |
568 | return NULL; |
569 | |
570 | size = PAGE_ALIGN(size); |
571 | if (dma_alloc_direct(dev, ops)) |
572 | return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); |
573 | if (!ops->alloc_pages) |
574 | return NULL; |
575 | return ops->alloc_pages(dev, size, dma_handle, dir, gfp); |
576 | } |
577 | |
578 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
579 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
580 | { |
581 | struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); |
582 | |
583 | if (page) |
584 | debug_dma_map_page(dev, page, offset: 0, size, direction: dir, dma_addr: *dma_handle, attrs: 0); |
585 | return page; |
586 | } |
587 | EXPORT_SYMBOL_GPL(dma_alloc_pages); |
588 | |
589 | static void __dma_free_pages(struct device *dev, size_t size, struct page *page, |
590 | dma_addr_t dma_handle, enum dma_data_direction dir) |
591 | { |
592 | const struct dma_map_ops *ops = get_dma_ops(dev); |
593 | |
594 | size = PAGE_ALIGN(size); |
595 | if (dma_alloc_direct(dev, ops)) |
596 | dma_direct_free_pages(dev, size, page, dma_addr: dma_handle, dir); |
597 | else if (ops->free_pages) |
598 | ops->free_pages(dev, size, page, dma_handle, dir); |
599 | } |
600 | |
601 | void dma_free_pages(struct device *dev, size_t size, struct page *page, |
602 | dma_addr_t dma_handle, enum dma_data_direction dir) |
603 | { |
604 | debug_dma_unmap_page(dev, addr: dma_handle, size, direction: dir); |
605 | __dma_free_pages(dev, size, page, dma_handle, dir); |
606 | } |
607 | EXPORT_SYMBOL_GPL(dma_free_pages); |
608 | |
609 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, |
610 | size_t size, struct page *page) |
611 | { |
612 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
613 | |
614 | if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) |
615 | return -ENXIO; |
616 | return remap_pfn_range(vma, addr: vma->vm_start, |
617 | page_to_pfn(page) + vma->vm_pgoff, |
618 | size: vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); |
619 | } |
620 | EXPORT_SYMBOL_GPL(dma_mmap_pages); |
621 | |
622 | static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, |
623 | enum dma_data_direction dir, gfp_t gfp) |
624 | { |
625 | struct sg_table *sgt; |
626 | struct page *page; |
627 | |
628 | sgt = kmalloc(size: sizeof(*sgt), flags: gfp); |
629 | if (!sgt) |
630 | return NULL; |
631 | if (sg_alloc_table(sgt, 1, gfp)) |
632 | goto out_free_sgt; |
633 | page = __dma_alloc_pages(dev, size, dma_handle: &sgt->sgl->dma_address, dir, gfp); |
634 | if (!page) |
635 | goto out_free_table; |
636 | sg_set_page(sg: sgt->sgl, page, PAGE_ALIGN(size), offset: 0); |
637 | sg_dma_len(sgt->sgl) = sgt->sgl->length; |
638 | return sgt; |
639 | out_free_table: |
640 | sg_free_table(sgt); |
641 | out_free_sgt: |
642 | kfree(objp: sgt); |
643 | return NULL; |
644 | } |
645 | |
646 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, |
647 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) |
648 | { |
649 | const struct dma_map_ops *ops = get_dma_ops(dev); |
650 | struct sg_table *sgt; |
651 | |
652 | if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) |
653 | return NULL; |
654 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) |
655 | return NULL; |
656 | |
657 | if (ops && ops->alloc_noncontiguous) |
658 | sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); |
659 | else |
660 | sgt = alloc_single_sgt(dev, size, dir, gfp); |
661 | |
662 | if (sgt) { |
663 | sgt->nents = 1; |
664 | debug_dma_map_sg(dev, sg: sgt->sgl, nents: sgt->orig_nents, mapped_ents: 1, direction: dir, attrs); |
665 | } |
666 | return sgt; |
667 | } |
668 | EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); |
669 | |
670 | static void free_single_sgt(struct device *dev, size_t size, |
671 | struct sg_table *sgt, enum dma_data_direction dir) |
672 | { |
673 | __dma_free_pages(dev, size, page: sg_page(sg: sgt->sgl), dma_handle: sgt->sgl->dma_address, |
674 | dir); |
675 | sg_free_table(sgt); |
676 | kfree(objp: sgt); |
677 | } |
678 | |
679 | void dma_free_noncontiguous(struct device *dev, size_t size, |
680 | struct sg_table *sgt, enum dma_data_direction dir) |
681 | { |
682 | const struct dma_map_ops *ops = get_dma_ops(dev); |
683 | |
684 | debug_dma_unmap_sg(dev, sglist: sgt->sgl, nelems: sgt->orig_nents, dir); |
685 | if (ops && ops->free_noncontiguous) |
686 | ops->free_noncontiguous(dev, size, sgt, dir); |
687 | else |
688 | free_single_sgt(dev, size, sgt, dir); |
689 | } |
690 | EXPORT_SYMBOL_GPL(dma_free_noncontiguous); |
691 | |
692 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, |
693 | struct sg_table *sgt) |
694 | { |
695 | const struct dma_map_ops *ops = get_dma_ops(dev); |
696 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
697 | |
698 | if (ops && ops->alloc_noncontiguous) |
699 | return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); |
700 | return page_address(sg_page(sgt->sgl)); |
701 | } |
702 | EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); |
703 | |
704 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) |
705 | { |
706 | const struct dma_map_ops *ops = get_dma_ops(dev); |
707 | |
708 | if (ops && ops->alloc_noncontiguous) |
709 | vunmap(addr: vaddr); |
710 | } |
711 | EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); |
712 | |
713 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, |
714 | size_t size, struct sg_table *sgt) |
715 | { |
716 | const struct dma_map_ops *ops = get_dma_ops(dev); |
717 | |
718 | if (ops && ops->alloc_noncontiguous) { |
719 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
720 | |
721 | if (vma->vm_pgoff >= count || |
722 | vma_pages(vma) > count - vma->vm_pgoff) |
723 | return -ENXIO; |
724 | return vm_map_pages(vma, sgt_handle(sgt)->pages, num: count); |
725 | } |
726 | return dma_mmap_pages(dev, vma, size, sg_page(sg: sgt->sgl)); |
727 | } |
728 | EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); |
729 | |
730 | static int dma_supported(struct device *dev, u64 mask) |
731 | { |
732 | const struct dma_map_ops *ops = get_dma_ops(dev); |
733 | |
734 | /* |
735 | * ->dma_supported sets the bypass flag, so we must always call |
736 | * into the method here unless the device is truly direct mapped. |
737 | */ |
738 | if (!ops) |
739 | return dma_direct_supported(dev, mask); |
740 | if (!ops->dma_supported) |
741 | return 1; |
742 | return ops->dma_supported(dev, mask); |
743 | } |
744 | |
745 | bool dma_pci_p2pdma_supported(struct device *dev) |
746 | { |
747 | const struct dma_map_ops *ops = get_dma_ops(dev); |
748 | |
749 | /* if ops is not set, dma direct will be used which supports P2PDMA */ |
750 | if (!ops) |
751 | return true; |
752 | |
753 | /* |
754 | * Note: dma_ops_bypass is not checked here because P2PDMA should |
755 | * not be used with dma mapping ops that do not have support even |
756 | * if the specific device is bypassing them. |
757 | */ |
758 | |
759 | return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED; |
760 | } |
761 | EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); |
762 | |
763 | int dma_set_mask(struct device *dev, u64 mask) |
764 | { |
765 | /* |
766 | * Truncate the mask to the actually supported dma_addr_t width to |
767 | * avoid generating unsupportable addresses. |
768 | */ |
769 | mask = (dma_addr_t)mask; |
770 | |
771 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
772 | return -EIO; |
773 | |
774 | arch_dma_set_mask(dev, mask); |
775 | *dev->dma_mask = mask; |
776 | return 0; |
777 | } |
778 | EXPORT_SYMBOL(dma_set_mask); |
779 | |
780 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
781 | { |
782 | /* |
783 | * Truncate the mask to the actually supported dma_addr_t width to |
784 | * avoid generating unsupportable addresses. |
785 | */ |
786 | mask = (dma_addr_t)mask; |
787 | |
788 | if (!dma_supported(dev, mask)) |
789 | return -EIO; |
790 | |
791 | dev->coherent_dma_mask = mask; |
792 | return 0; |
793 | } |
794 | EXPORT_SYMBOL(dma_set_coherent_mask); |
795 | |
796 | /** |
797 | * dma_addressing_limited - return if the device is addressing limited |
798 | * @dev: device to check |
799 | * |
800 | * Return %true if the devices DMA mask is too small to address all memory in |
801 | * the system, else %false. Lack of addressing bits is the prime reason for |
802 | * bounce buffering, but might not be the only one. |
803 | */ |
804 | bool dma_addressing_limited(struct device *dev) |
805 | { |
806 | const struct dma_map_ops *ops = get_dma_ops(dev); |
807 | |
808 | if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < |
809 | dma_get_required_mask(dev)) |
810 | return true; |
811 | |
812 | if (unlikely(ops)) |
813 | return false; |
814 | return !dma_direct_all_ram_mapped(dev); |
815 | } |
816 | EXPORT_SYMBOL_GPL(dma_addressing_limited); |
817 | |
818 | size_t dma_max_mapping_size(struct device *dev) |
819 | { |
820 | const struct dma_map_ops *ops = get_dma_ops(dev); |
821 | size_t size = SIZE_MAX; |
822 | |
823 | if (dma_map_direct(dev, ops)) |
824 | size = dma_direct_max_mapping_size(dev); |
825 | else if (ops && ops->max_mapping_size) |
826 | size = ops->max_mapping_size(dev); |
827 | |
828 | return size; |
829 | } |
830 | EXPORT_SYMBOL_GPL(dma_max_mapping_size); |
831 | |
832 | size_t dma_opt_mapping_size(struct device *dev) |
833 | { |
834 | const struct dma_map_ops *ops = get_dma_ops(dev); |
835 | size_t size = SIZE_MAX; |
836 | |
837 | if (ops && ops->opt_mapping_size) |
838 | size = ops->opt_mapping_size(); |
839 | |
840 | return min(dma_max_mapping_size(dev), size); |
841 | } |
842 | EXPORT_SYMBOL_GPL(dma_opt_mapping_size); |
843 | |
844 | bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
845 | { |
846 | const struct dma_map_ops *ops = get_dma_ops(dev); |
847 | |
848 | if (dma_map_direct(dev, ops)) |
849 | return dma_direct_need_sync(dev, dma_addr); |
850 | return ops->sync_single_for_cpu || ops->sync_single_for_device; |
851 | } |
852 | EXPORT_SYMBOL_GPL(dma_need_sync); |
853 | |
854 | unsigned long dma_get_merge_boundary(struct device *dev) |
855 | { |
856 | const struct dma_map_ops *ops = get_dma_ops(dev); |
857 | |
858 | if (!ops || !ops->get_merge_boundary) |
859 | return 0; /* can't merge */ |
860 | |
861 | return ops->get_merge_boundary(dev); |
862 | } |
863 | EXPORT_SYMBOL_GPL(dma_get_merge_boundary); |
864 | |