1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_DMA_MAPPING_H |
3 | #define _LINUX_DMA_MAPPING_H |
4 | |
5 | #include <linux/cache.h> |
6 | #include <linux/sizes.h> |
7 | #include <linux/string.h> |
8 | #include <linux/device.h> |
9 | #include <linux/err.h> |
10 | #include <linux/dma-direction.h> |
11 | #include <linux/scatterlist.h> |
12 | #include <linux/bug.h> |
13 | #include <linux/mem_encrypt.h> |
14 | |
15 | /** |
16 | * List of possible attributes associated with a DMA mapping. The semantics |
17 | * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. |
18 | */ |
19 | |
20 | /* |
21 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping |
22 | * may be weakly ordered, that is that reads and writes may pass each other. |
23 | */ |
24 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) |
25 | /* |
26 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be |
27 | * buffered to improve performance. |
28 | */ |
29 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) |
30 | /* |
31 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel |
32 | * virtual mapping for the allocated buffer. |
33 | */ |
34 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) |
35 | /* |
36 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of |
37 | * the CPU cache for the given buffer assuming that it has been already |
38 | * transferred to 'device' domain. |
39 | */ |
40 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) |
41 | /* |
42 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer |
43 | * in physical memory. |
44 | */ |
45 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) |
46 | /* |
47 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem |
48 | * that it's probably not worth the time to try to allocate memory to in a way |
49 | * that gives better TLB efficiency. |
50 | */ |
51 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) |
52 | /* |
53 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress |
54 | * allocation failure reports (similarly to __GFP_NOWARN). |
55 | */ |
56 | #define DMA_ATTR_NO_WARN (1UL << 8) |
57 | |
58 | /* |
59 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully |
60 | * accessible at an elevated privilege level (and ideally inaccessible or |
61 | * at least read-only at lesser-privileged levels). |
62 | */ |
63 | #define DMA_ATTR_PRIVILEGED (1UL << 9) |
64 | |
65 | /* |
66 | * A dma_addr_t can hold any valid DMA or bus address for the platform. It can |
67 | * be given to a device to use as a DMA source or target. It is specific to a |
68 | * given device and there may be a translation between the CPU physical address |
69 | * space and the bus address space. |
70 | * |
71 | * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not |
72 | * be used directly in drivers, but checked for using dma_mapping_error() |
73 | * instead. |
74 | */ |
75 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
76 | |
77 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
78 | |
79 | #ifdef CONFIG_DMA_API_DEBUG |
80 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
81 | void debug_dma_map_single(struct device *dev, const void *addr, |
82 | unsigned long len); |
83 | #else |
84 | static inline void debug_dma_mapping_error(struct device *dev, |
85 | dma_addr_t dma_addr) |
86 | { |
87 | } |
88 | static inline void debug_dma_map_single(struct device *dev, const void *addr, |
89 | unsigned long len) |
90 | { |
91 | } |
92 | #endif /* CONFIG_DMA_API_DEBUG */ |
93 | |
94 | #ifdef CONFIG_HAS_DMA |
95 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
96 | { |
97 | debug_dma_mapping_error(dev, dma_addr); |
98 | |
99 | if (unlikely(dma_addr == DMA_MAPPING_ERROR)) |
100 | return -ENOMEM; |
101 | return 0; |
102 | } |
103 | |
104 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
105 | size_t offset, size_t size, enum dma_data_direction dir, |
106 | unsigned long attrs); |
107 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, |
108 | enum dma_data_direction dir, unsigned long attrs); |
109 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
110 | int nents, enum dma_data_direction dir, unsigned long attrs); |
111 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
112 | int nents, enum dma_data_direction dir, |
113 | unsigned long attrs); |
114 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
115 | enum dma_data_direction dir, unsigned long attrs); |
116 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, |
117 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
118 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, |
119 | enum dma_data_direction dir, unsigned long attrs); |
120 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
121 | enum dma_data_direction dir); |
122 | void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
123 | size_t size, enum dma_data_direction dir); |
124 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
125 | int nelems, enum dma_data_direction dir); |
126 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
127 | int nelems, enum dma_data_direction dir); |
128 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
129 | gfp_t flag, unsigned long attrs); |
130 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
131 | dma_addr_t dma_handle, unsigned long attrs); |
132 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
133 | gfp_t gfp, unsigned long attrs); |
134 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
135 | dma_addr_t dma_handle); |
136 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
137 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
138 | unsigned long attrs); |
139 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
140 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
141 | unsigned long attrs); |
142 | bool dma_can_mmap(struct device *dev); |
143 | bool dma_pci_p2pdma_supported(struct device *dev); |
144 | int dma_set_mask(struct device *dev, u64 mask); |
145 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
146 | u64 dma_get_required_mask(struct device *dev); |
147 | bool dma_addressing_limited(struct device *dev); |
148 | size_t dma_max_mapping_size(struct device *dev); |
149 | size_t dma_opt_mapping_size(struct device *dev); |
150 | bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); |
151 | unsigned long dma_get_merge_boundary(struct device *dev); |
152 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, |
153 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); |
154 | void dma_free_noncontiguous(struct device *dev, size_t size, |
155 | struct sg_table *sgt, enum dma_data_direction dir); |
156 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, |
157 | struct sg_table *sgt); |
158 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); |
159 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, |
160 | size_t size, struct sg_table *sgt); |
161 | #else /* CONFIG_HAS_DMA */ |
162 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
163 | struct page *page, size_t offset, size_t size, |
164 | enum dma_data_direction dir, unsigned long attrs) |
165 | { |
166 | return DMA_MAPPING_ERROR; |
167 | } |
168 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, |
169 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
170 | { |
171 | } |
172 | static inline unsigned int dma_map_sg_attrs(struct device *dev, |
173 | struct scatterlist *sg, int nents, enum dma_data_direction dir, |
174 | unsigned long attrs) |
175 | { |
176 | return 0; |
177 | } |
178 | static inline void dma_unmap_sg_attrs(struct device *dev, |
179 | struct scatterlist *sg, int nents, enum dma_data_direction dir, |
180 | unsigned long attrs) |
181 | { |
182 | } |
183 | static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
184 | enum dma_data_direction dir, unsigned long attrs) |
185 | { |
186 | return -EOPNOTSUPP; |
187 | } |
188 | static inline dma_addr_t dma_map_resource(struct device *dev, |
189 | phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, |
190 | unsigned long attrs) |
191 | { |
192 | return DMA_MAPPING_ERROR; |
193 | } |
194 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, |
195 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
196 | { |
197 | } |
198 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
199 | size_t size, enum dma_data_direction dir) |
200 | { |
201 | } |
202 | static inline void dma_sync_single_for_device(struct device *dev, |
203 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
204 | { |
205 | } |
206 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
207 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
208 | { |
209 | } |
210 | static inline void dma_sync_sg_for_device(struct device *dev, |
211 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
212 | { |
213 | } |
214 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
215 | { |
216 | return -ENOMEM; |
217 | } |
218 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
219 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
220 | { |
221 | return NULL; |
222 | } |
223 | static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
224 | dma_addr_t dma_handle, unsigned long attrs) |
225 | { |
226 | } |
227 | static inline void *dmam_alloc_attrs(struct device *dev, size_t size, |
228 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
229 | { |
230 | return NULL; |
231 | } |
232 | static inline void dmam_free_coherent(struct device *dev, size_t size, |
233 | void *vaddr, dma_addr_t dma_handle) |
234 | { |
235 | } |
236 | static inline int dma_get_sgtable_attrs(struct device *dev, |
237 | struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, |
238 | size_t size, unsigned long attrs) |
239 | { |
240 | return -ENXIO; |
241 | } |
242 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
243 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
244 | unsigned long attrs) |
245 | { |
246 | return -ENXIO; |
247 | } |
248 | static inline bool dma_can_mmap(struct device *dev) |
249 | { |
250 | return false; |
251 | } |
252 | static inline bool dma_pci_p2pdma_supported(struct device *dev) |
253 | { |
254 | return false; |
255 | } |
256 | static inline int dma_set_mask(struct device *dev, u64 mask) |
257 | { |
258 | return -EIO; |
259 | } |
260 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
261 | { |
262 | return -EIO; |
263 | } |
264 | static inline u64 dma_get_required_mask(struct device *dev) |
265 | { |
266 | return 0; |
267 | } |
268 | static inline bool dma_addressing_limited(struct device *dev) |
269 | { |
270 | return false; |
271 | } |
272 | static inline size_t dma_max_mapping_size(struct device *dev) |
273 | { |
274 | return 0; |
275 | } |
276 | static inline size_t dma_opt_mapping_size(struct device *dev) |
277 | { |
278 | return 0; |
279 | } |
280 | static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
281 | { |
282 | return false; |
283 | } |
284 | static inline unsigned long dma_get_merge_boundary(struct device *dev) |
285 | { |
286 | return 0; |
287 | } |
288 | static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, |
289 | size_t size, enum dma_data_direction dir, gfp_t gfp, |
290 | unsigned long attrs) |
291 | { |
292 | return NULL; |
293 | } |
294 | static inline void dma_free_noncontiguous(struct device *dev, size_t size, |
295 | struct sg_table *sgt, enum dma_data_direction dir) |
296 | { |
297 | } |
298 | static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, |
299 | struct sg_table *sgt) |
300 | { |
301 | return NULL; |
302 | } |
303 | static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) |
304 | { |
305 | } |
306 | static inline int dma_mmap_noncontiguous(struct device *dev, |
307 | struct vm_area_struct *vma, size_t size, struct sg_table *sgt) |
308 | { |
309 | return -EINVAL; |
310 | } |
311 | #endif /* CONFIG_HAS_DMA */ |
312 | |
313 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
314 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); |
315 | void dma_free_pages(struct device *dev, size_t size, struct page *page, |
316 | dma_addr_t dma_handle, enum dma_data_direction dir); |
317 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, |
318 | size_t size, struct page *page); |
319 | |
320 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
321 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
322 | { |
323 | struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); |
324 | return page ? page_address(page) : NULL; |
325 | } |
326 | |
327 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
328 | void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) |
329 | { |
330 | dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); |
331 | } |
332 | |
333 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
334 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
335 | { |
336 | /* DMA must never operate on areas that might be remapped. */ |
337 | if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), |
338 | "rejecting DMA map of vmalloc memory\n" )) |
339 | return DMA_MAPPING_ERROR; |
340 | debug_dma_map_single(dev, addr: ptr, len: size); |
341 | return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), |
342 | size, dir, attrs); |
343 | } |
344 | |
345 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
346 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
347 | { |
348 | return dma_unmap_page_attrs(dev, addr, size, dir, attrs); |
349 | } |
350 | |
351 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
352 | dma_addr_t addr, unsigned long offset, size_t size, |
353 | enum dma_data_direction dir) |
354 | { |
355 | return dma_sync_single_for_cpu(dev, addr: addr + offset, size, dir); |
356 | } |
357 | |
358 | static inline void dma_sync_single_range_for_device(struct device *dev, |
359 | dma_addr_t addr, unsigned long offset, size_t size, |
360 | enum dma_data_direction dir) |
361 | { |
362 | return dma_sync_single_for_device(dev, addr: addr + offset, size, dir); |
363 | } |
364 | |
365 | /** |
366 | * dma_unmap_sgtable - Unmap the given buffer for DMA |
367 | * @dev: The device for which to perform the DMA operation |
368 | * @sgt: The sg_table object describing the buffer |
369 | * @dir: DMA direction |
370 | * @attrs: Optional DMA attributes for the unmap operation |
371 | * |
372 | * Unmaps a buffer described by a scatterlist stored in the given sg_table |
373 | * object for the @dir DMA operation by the @dev device. After this function |
374 | * the ownership of the buffer is transferred back to the CPU domain. |
375 | */ |
376 | static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, |
377 | enum dma_data_direction dir, unsigned long attrs) |
378 | { |
379 | dma_unmap_sg_attrs(dev, sg: sgt->sgl, nents: sgt->orig_nents, dir, attrs); |
380 | } |
381 | |
382 | /** |
383 | * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access |
384 | * @dev: The device for which to perform the DMA operation |
385 | * @sgt: The sg_table object describing the buffer |
386 | * @dir: DMA direction |
387 | * |
388 | * Performs the needed cache synchronization and moves the ownership of the |
389 | * buffer back to the CPU domain, so it is safe to perform any access to it |
390 | * by the CPU. Before doing any further DMA operations, one has to transfer |
391 | * the ownership of the buffer back to the DMA domain by calling the |
392 | * dma_sync_sgtable_for_device(). |
393 | */ |
394 | static inline void dma_sync_sgtable_for_cpu(struct device *dev, |
395 | struct sg_table *sgt, enum dma_data_direction dir) |
396 | { |
397 | dma_sync_sg_for_cpu(dev, sg: sgt->sgl, nelems: sgt->orig_nents, dir); |
398 | } |
399 | |
400 | /** |
401 | * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA |
402 | * @dev: The device for which to perform the DMA operation |
403 | * @sgt: The sg_table object describing the buffer |
404 | * @dir: DMA direction |
405 | * |
406 | * Performs the needed cache synchronization and moves the ownership of the |
407 | * buffer back to the DMA domain, so it is safe to perform the DMA operation. |
408 | * Once finished, one has to call dma_sync_sgtable_for_cpu() or |
409 | * dma_unmap_sgtable(). |
410 | */ |
411 | static inline void dma_sync_sgtable_for_device(struct device *dev, |
412 | struct sg_table *sgt, enum dma_data_direction dir) |
413 | { |
414 | dma_sync_sg_for_device(dev, sg: sgt->sgl, nelems: sgt->orig_nents, dir); |
415 | } |
416 | |
417 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
418 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) |
419 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) |
420 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) |
421 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
422 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) |
423 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
424 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
425 | |
426 | bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); |
427 | |
428 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
429 | dma_addr_t *dma_handle, gfp_t gfp) |
430 | { |
431 | return dma_alloc_attrs(dev, size, dma_handle, flag: gfp, |
432 | attrs: (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
433 | } |
434 | |
435 | static inline void dma_free_coherent(struct device *dev, size_t size, |
436 | void *cpu_addr, dma_addr_t dma_handle) |
437 | { |
438 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs: 0); |
439 | } |
440 | |
441 | |
442 | static inline u64 dma_get_mask(struct device *dev) |
443 | { |
444 | if (dev->dma_mask && *dev->dma_mask) |
445 | return *dev->dma_mask; |
446 | return DMA_BIT_MASK(32); |
447 | } |
448 | |
449 | /* |
450 | * Set both the DMA mask and the coherent DMA mask to the same thing. |
451 | * Note that we don't check the return value from dma_set_coherent_mask() |
452 | * as the DMA API guarantees that the coherent DMA mask can be set to |
453 | * the same or smaller than the streaming DMA mask. |
454 | */ |
455 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) |
456 | { |
457 | int rc = dma_set_mask(dev, mask); |
458 | if (rc == 0) |
459 | dma_set_coherent_mask(dev, mask); |
460 | return rc; |
461 | } |
462 | |
463 | /* |
464 | * Similar to the above, except it deals with the case where the device |
465 | * does not have dev->dma_mask appropriately setup. |
466 | */ |
467 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) |
468 | { |
469 | dev->dma_mask = &dev->coherent_dma_mask; |
470 | return dma_set_mask_and_coherent(dev, mask); |
471 | } |
472 | |
473 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
474 | { |
475 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
476 | return dev->dma_parms->max_segment_size; |
477 | return SZ_64K; |
478 | } |
479 | |
480 | static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) |
481 | { |
482 | if (dev->dma_parms) { |
483 | dev->dma_parms->max_segment_size = size; |
484 | return 0; |
485 | } |
486 | return -EIO; |
487 | } |
488 | |
489 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
490 | { |
491 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
492 | return dev->dma_parms->segment_boundary_mask; |
493 | return ULONG_MAX; |
494 | } |
495 | |
496 | /** |
497 | * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units |
498 | * @dev: device to guery the boundary for |
499 | * @page_shift: ilog() of the IOMMU page size |
500 | * |
501 | * Return the segment boundary in IOMMU page units (which may be different from |
502 | * the CPU page size) for the passed in device. |
503 | * |
504 | * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for |
505 | * non-DMA API callers. |
506 | */ |
507 | static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, |
508 | unsigned int page_shift) |
509 | { |
510 | if (!dev) |
511 | return (U32_MAX >> page_shift) + 1; |
512 | return (dma_get_seg_boundary(dev) >> page_shift) + 1; |
513 | } |
514 | |
515 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) |
516 | { |
517 | if (dev->dma_parms) { |
518 | dev->dma_parms->segment_boundary_mask = mask; |
519 | return 0; |
520 | } |
521 | return -EIO; |
522 | } |
523 | |
524 | static inline unsigned int dma_get_min_align_mask(struct device *dev) |
525 | { |
526 | if (dev->dma_parms) |
527 | return dev->dma_parms->min_align_mask; |
528 | return 0; |
529 | } |
530 | |
531 | static inline int dma_set_min_align_mask(struct device *dev, |
532 | unsigned int min_align_mask) |
533 | { |
534 | if (WARN_ON_ONCE(!dev->dma_parms)) |
535 | return -EIO; |
536 | dev->dma_parms->min_align_mask = min_align_mask; |
537 | return 0; |
538 | } |
539 | |
540 | #ifndef dma_get_cache_alignment |
541 | static inline int dma_get_cache_alignment(void) |
542 | { |
543 | #ifdef ARCH_HAS_DMA_MINALIGN |
544 | return ARCH_DMA_MINALIGN; |
545 | #endif |
546 | return 1; |
547 | } |
548 | #endif |
549 | |
550 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
551 | dma_addr_t *dma_handle, gfp_t gfp) |
552 | { |
553 | return dmam_alloc_attrs(dev, size, dma_handle, gfp, |
554 | attrs: (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
555 | } |
556 | |
557 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
558 | dma_addr_t *dma_addr, gfp_t gfp) |
559 | { |
560 | unsigned long attrs = DMA_ATTR_WRITE_COMBINE; |
561 | |
562 | if (gfp & __GFP_NOWARN) |
563 | attrs |= DMA_ATTR_NO_WARN; |
564 | |
565 | return dma_alloc_attrs(dev, size, dma_handle: dma_addr, flag: gfp, attrs); |
566 | } |
567 | |
568 | static inline void dma_free_wc(struct device *dev, size_t size, |
569 | void *cpu_addr, dma_addr_t dma_addr) |
570 | { |
571 | return dma_free_attrs(dev, size, cpu_addr, dma_handle: dma_addr, |
572 | DMA_ATTR_WRITE_COMBINE); |
573 | } |
574 | |
575 | static inline int dma_mmap_wc(struct device *dev, |
576 | struct vm_area_struct *vma, |
577 | void *cpu_addr, dma_addr_t dma_addr, |
578 | size_t size) |
579 | { |
580 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
581 | DMA_ATTR_WRITE_COMBINE); |
582 | } |
583 | |
584 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
585 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
586 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME |
587 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) |
588 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) |
589 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) |
590 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) |
591 | #else |
592 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) |
593 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) |
594 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) |
595 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) |
596 | #define dma_unmap_len(PTR, LEN_NAME) (0) |
597 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) |
598 | #endif |
599 | |
600 | #endif /* _LINUX_DMA_MAPPING_H */ |
601 | |