1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
4
5#include <linux/sizes.h>
6#include <linux/string.h>
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/dma-direction.h>
10#include <linux/scatterlist.h>
11#include <linux/bug.h>
12#include <linux/mem_encrypt.h>
13
14/**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
17 */
18
19/*
20 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
21 * may be weakly ordered, that is that reads and writes may pass each other.
22 */
23#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
24/*
25 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
26 * buffered to improve performance.
27 */
28#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
29/*
30 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
31 * virtual mapping for the allocated buffer.
32 */
33#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
34/*
35 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
36 * the CPU cache for the given buffer assuming that it has been already
37 * transferred to 'device' domain.
38 */
39#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
40/*
41 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
42 * in physical memory.
43 */
44#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
45/*
46 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
47 * that it's probably not worth the time to try to allocate memory to in a way
48 * that gives better TLB efficiency.
49 */
50#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
51/*
52 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
53 * allocation failure reports (similarly to __GFP_NOWARN).
54 */
55#define DMA_ATTR_NO_WARN (1UL << 8)
56
57/*
58 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
59 * accessible at an elevated privilege level (and ideally inaccessible or
60 * at least read-only at lesser-privileged levels).
61 */
62#define DMA_ATTR_PRIVILEGED (1UL << 9)
63
64/*
65 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
66 * be given to a device to use as a DMA source or target. It is specific to a
67 * given device and there may be a translation between the CPU physical address
68 * space and the bus address space.
69 *
70 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
71 * be used directly in drivers, but checked for using dma_mapping_error()
72 * instead.
73 */
74#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
75
76#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
77
78#ifdef CONFIG_DMA_API_DEBUG
79void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
80void debug_dma_map_single(struct device *dev, const void *addr,
81 unsigned long len);
82#else
83static inline void debug_dma_mapping_error(struct device *dev,
84 dma_addr_t dma_addr)
85{
86}
87static inline void debug_dma_map_single(struct device *dev, const void *addr,
88 unsigned long len)
89{
90}
91#endif /* CONFIG_DMA_API_DEBUG */
92
93#ifdef CONFIG_HAS_DMA
94static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
95{
96 debug_dma_mapping_error(dev, dma_addr);
97
98 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
99 return -ENOMEM;
100 return 0;
101}
102
103dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
104 size_t offset, size_t size, enum dma_data_direction dir,
105 unsigned long attrs);
106void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
107 enum dma_data_direction dir, unsigned long attrs);
108unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
109 int nents, enum dma_data_direction dir, unsigned long attrs);
110void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
111 int nents, enum dma_data_direction dir,
112 unsigned long attrs);
113int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
114 enum dma_data_direction dir, unsigned long attrs);
115dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
116 size_t size, enum dma_data_direction dir, unsigned long attrs);
117void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
118 enum dma_data_direction dir, unsigned long attrs);
119void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
120 enum dma_data_direction dir);
121void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
122 size_t size, enum dma_data_direction dir);
123void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
124 int nelems, enum dma_data_direction dir);
125void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
126 int nelems, enum dma_data_direction dir);
127void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
128 gfp_t flag, unsigned long attrs);
129void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
130 dma_addr_t dma_handle, unsigned long attrs);
131void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
132 gfp_t gfp, unsigned long attrs);
133void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
134 dma_addr_t dma_handle);
135int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
136 void *cpu_addr, dma_addr_t dma_addr, size_t size,
137 unsigned long attrs);
138int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
139 void *cpu_addr, dma_addr_t dma_addr, size_t size,
140 unsigned long attrs);
141bool dma_can_mmap(struct device *dev);
142int dma_supported(struct device *dev, u64 mask);
143bool dma_pci_p2pdma_supported(struct device *dev);
144int dma_set_mask(struct device *dev, u64 mask);
145int dma_set_coherent_mask(struct device *dev, u64 mask);
146u64 dma_get_required_mask(struct device *dev);
147size_t dma_max_mapping_size(struct device *dev);
148size_t dma_opt_mapping_size(struct device *dev);
149bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
150unsigned long dma_get_merge_boundary(struct device *dev);
151struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
152 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
153void dma_free_noncontiguous(struct device *dev, size_t size,
154 struct sg_table *sgt, enum dma_data_direction dir);
155void *dma_vmap_noncontiguous(struct device *dev, size_t size,
156 struct sg_table *sgt);
157void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
158int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
159 size_t size, struct sg_table *sgt);
160#else /* CONFIG_HAS_DMA */
161static inline dma_addr_t dma_map_page_attrs(struct device *dev,
162 struct page *page, size_t offset, size_t size,
163 enum dma_data_direction dir, unsigned long attrs)
164{
165 return DMA_MAPPING_ERROR;
166}
167static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
168 size_t size, enum dma_data_direction dir, unsigned long attrs)
169{
170}
171static inline unsigned int dma_map_sg_attrs(struct device *dev,
172 struct scatterlist *sg, int nents, enum dma_data_direction dir,
173 unsigned long attrs)
174{
175 return 0;
176}
177static inline void dma_unmap_sg_attrs(struct device *dev,
178 struct scatterlist *sg, int nents, enum dma_data_direction dir,
179 unsigned long attrs)
180{
181}
182static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
183 enum dma_data_direction dir, unsigned long attrs)
184{
185 return -EOPNOTSUPP;
186}
187static inline dma_addr_t dma_map_resource(struct device *dev,
188 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
189 unsigned long attrs)
190{
191 return DMA_MAPPING_ERROR;
192}
193static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
194 size_t size, enum dma_data_direction dir, unsigned long attrs)
195{
196}
197static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
198 size_t size, enum dma_data_direction dir)
199{
200}
201static inline void dma_sync_single_for_device(struct device *dev,
202 dma_addr_t addr, size_t size, enum dma_data_direction dir)
203{
204}
205static inline void dma_sync_sg_for_cpu(struct device *dev,
206 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
207{
208}
209static inline void dma_sync_sg_for_device(struct device *dev,
210 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
211{
212}
213static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
214{
215 return -ENOMEM;
216}
217static inline void *dma_alloc_attrs(struct device *dev, size_t size,
218 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
219{
220 return NULL;
221}
222static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
223 dma_addr_t dma_handle, unsigned long attrs)
224{
225}
226static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
227 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
228{
229 return NULL;
230}
231static inline void dmam_free_coherent(struct device *dev, size_t size,
232 void *vaddr, dma_addr_t dma_handle)
233{
234}
235static inline int dma_get_sgtable_attrs(struct device *dev,
236 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
237 size_t size, unsigned long attrs)
238{
239 return -ENXIO;
240}
241static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
242 void *cpu_addr, dma_addr_t dma_addr, size_t size,
243 unsigned long attrs)
244{
245 return -ENXIO;
246}
247static inline bool dma_can_mmap(struct device *dev)
248{
249 return false;
250}
251static inline int dma_supported(struct device *dev, u64 mask)
252{
253 return 0;
254}
255static inline bool dma_pci_p2pdma_supported(struct device *dev)
256{
257 return false;
258}
259static inline int dma_set_mask(struct device *dev, u64 mask)
260{
261 return -EIO;
262}
263static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
264{
265 return -EIO;
266}
267static inline u64 dma_get_required_mask(struct device *dev)
268{
269 return 0;
270}
271static inline size_t dma_max_mapping_size(struct device *dev)
272{
273 return 0;
274}
275static inline size_t dma_opt_mapping_size(struct device *dev)
276{
277 return 0;
278}
279static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
280{
281 return false;
282}
283static inline unsigned long dma_get_merge_boundary(struct device *dev)
284{
285 return 0;
286}
287static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
288 size_t size, enum dma_data_direction dir, gfp_t gfp,
289 unsigned long attrs)
290{
291 return NULL;
292}
293static inline void dma_free_noncontiguous(struct device *dev, size_t size,
294 struct sg_table *sgt, enum dma_data_direction dir)
295{
296}
297static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
298 struct sg_table *sgt)
299{
300 return NULL;
301}
302static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
303{
304}
305static inline int dma_mmap_noncontiguous(struct device *dev,
306 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
307{
308 return -EINVAL;
309}
310#endif /* CONFIG_HAS_DMA */
311
312struct page *dma_alloc_pages(struct device *dev, size_t size,
313 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
314void dma_free_pages(struct device *dev, size_t size, struct page *page,
315 dma_addr_t dma_handle, enum dma_data_direction dir);
316int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
317 size_t size, struct page *page);
318
319static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
320 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
321{
322 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
323 return page ? page_address(page) : NULL;
324}
325
326static inline void dma_free_noncoherent(struct device *dev, size_t size,
327 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
328{
329 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
330}
331
332static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
333 size_t size, enum dma_data_direction dir, unsigned long attrs)
334{
335 /* DMA must never operate on areas that might be remapped. */
336 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
337 "rejecting DMA map of vmalloc memory\n"))
338 return DMA_MAPPING_ERROR;
339 debug_dma_map_single(dev, ptr, size);
340 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
341 size, dir, attrs);
342}
343
344static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
345 size_t size, enum dma_data_direction dir, unsigned long attrs)
346{
347 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
348}
349
350static inline void dma_sync_single_range_for_cpu(struct device *dev,
351 dma_addr_t addr, unsigned long offset, size_t size,
352 enum dma_data_direction dir)
353{
354 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
355}
356
357static inline void dma_sync_single_range_for_device(struct device *dev,
358 dma_addr_t addr, unsigned long offset, size_t size,
359 enum dma_data_direction dir)
360{
361 return dma_sync_single_for_device(dev, addr + offset, size, dir);
362}
363
364/**
365 * dma_unmap_sgtable - Unmap the given buffer for DMA
366 * @dev: The device for which to perform the DMA operation
367 * @sgt: The sg_table object describing the buffer
368 * @dir: DMA direction
369 * @attrs: Optional DMA attributes for the unmap operation
370 *
371 * Unmaps a buffer described by a scatterlist stored in the given sg_table
372 * object for the @dir DMA operation by the @dev device. After this function
373 * the ownership of the buffer is transferred back to the CPU domain.
374 */
375static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
376 enum dma_data_direction dir, unsigned long attrs)
377{
378 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
379}
380
381/**
382 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
383 * @dev: The device for which to perform the DMA operation
384 * @sgt: The sg_table object describing the buffer
385 * @dir: DMA direction
386 *
387 * Performs the needed cache synchronization and moves the ownership of the
388 * buffer back to the CPU domain, so it is safe to perform any access to it
389 * by the CPU. Before doing any further DMA operations, one has to transfer
390 * the ownership of the buffer back to the DMA domain by calling the
391 * dma_sync_sgtable_for_device().
392 */
393static inline void dma_sync_sgtable_for_cpu(struct device *dev,
394 struct sg_table *sgt, enum dma_data_direction dir)
395{
396 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
397}
398
399/**
400 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
401 * @dev: The device for which to perform the DMA operation
402 * @sgt: The sg_table object describing the buffer
403 * @dir: DMA direction
404 *
405 * Performs the needed cache synchronization and moves the ownership of the
406 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
407 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
408 * dma_unmap_sgtable().
409 */
410static inline void dma_sync_sgtable_for_device(struct device *dev,
411 struct sg_table *sgt, enum dma_data_direction dir)
412{
413 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
414}
415
416#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
417#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
418#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
419#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
420#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
421#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
422#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
423#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
424
425static inline void *dma_alloc_coherent(struct device *dev, size_t size,
426 dma_addr_t *dma_handle, gfp_t gfp)
427{
428 return dma_alloc_attrs(dev, size, dma_handle, gfp,
429 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
430}
431
432static inline void dma_free_coherent(struct device *dev, size_t size,
433 void *cpu_addr, dma_addr_t dma_handle)
434{
435 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
436}
437
438
439static inline u64 dma_get_mask(struct device *dev)
440{
441 if (dev->dma_mask && *dev->dma_mask)
442 return *dev->dma_mask;
443 return DMA_BIT_MASK(32);
444}
445
446/*
447 * Set both the DMA mask and the coherent DMA mask to the same thing.
448 * Note that we don't check the return value from dma_set_coherent_mask()
449 * as the DMA API guarantees that the coherent DMA mask can be set to
450 * the same or smaller than the streaming DMA mask.
451 */
452static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
453{
454 int rc = dma_set_mask(dev, mask);
455 if (rc == 0)
456 dma_set_coherent_mask(dev, mask);
457 return rc;
458}
459
460/*
461 * Similar to the above, except it deals with the case where the device
462 * does not have dev->dma_mask appropriately setup.
463 */
464static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
465{
466 dev->dma_mask = &dev->coherent_dma_mask;
467 return dma_set_mask_and_coherent(dev, mask);
468}
469
470/**
471 * dma_addressing_limited - return if the device is addressing limited
472 * @dev: device to check
473 *
474 * Return %true if the devices DMA mask is too small to address all memory in
475 * the system, else %false. Lack of addressing bits is the prime reason for
476 * bounce buffering, but might not be the only one.
477 */
478static inline bool dma_addressing_limited(struct device *dev)
479{
480 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
481 dma_get_required_mask(dev);
482}
483
484static inline unsigned int dma_get_max_seg_size(struct device *dev)
485{
486 if (dev->dma_parms && dev->dma_parms->max_segment_size)
487 return dev->dma_parms->max_segment_size;
488 return SZ_64K;
489}
490
491static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
492{
493 if (dev->dma_parms) {
494 dev->dma_parms->max_segment_size = size;
495 return 0;
496 }
497 return -EIO;
498}
499
500static inline unsigned long dma_get_seg_boundary(struct device *dev)
501{
502 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
503 return dev->dma_parms->segment_boundary_mask;
504 return ULONG_MAX;
505}
506
507/**
508 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
509 * @dev: device to guery the boundary for
510 * @page_shift: ilog() of the IOMMU page size
511 *
512 * Return the segment boundary in IOMMU page units (which may be different from
513 * the CPU page size) for the passed in device.
514 *
515 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
516 * non-DMA API callers.
517 */
518static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
519 unsigned int page_shift)
520{
521 if (!dev)
522 return (U32_MAX >> page_shift) + 1;
523 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
524}
525
526static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
527{
528 if (dev->dma_parms) {
529 dev->dma_parms->segment_boundary_mask = mask;
530 return 0;
531 }
532 return -EIO;
533}
534
535static inline unsigned int dma_get_min_align_mask(struct device *dev)
536{
537 if (dev->dma_parms)
538 return dev->dma_parms->min_align_mask;
539 return 0;
540}
541
542static inline int dma_set_min_align_mask(struct device *dev,
543 unsigned int min_align_mask)
544{
545 if (WARN_ON_ONCE(!dev->dma_parms))
546 return -EIO;
547 dev->dma_parms->min_align_mask = min_align_mask;
548 return 0;
549}
550
551static inline int dma_get_cache_alignment(void)
552{
553#ifdef ARCH_DMA_MINALIGN
554 return ARCH_DMA_MINALIGN;
555#endif
556 return 1;
557}
558
559static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
560 dma_addr_t *dma_handle, gfp_t gfp)
561{
562 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
563 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
564}
565
566static inline void *dma_alloc_wc(struct device *dev, size_t size,
567 dma_addr_t *dma_addr, gfp_t gfp)
568{
569 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
570
571 if (gfp & __GFP_NOWARN)
572 attrs |= DMA_ATTR_NO_WARN;
573
574 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
575}
576
577static inline void dma_free_wc(struct device *dev, size_t size,
578 void *cpu_addr, dma_addr_t dma_addr)
579{
580 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
581 DMA_ATTR_WRITE_COMBINE);
582}
583
584static inline int dma_mmap_wc(struct device *dev,
585 struct vm_area_struct *vma,
586 void *cpu_addr, dma_addr_t dma_addr,
587 size_t size)
588{
589 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
590 DMA_ATTR_WRITE_COMBINE);
591}
592
593#ifdef CONFIG_NEED_DMA_MAP_STATE
594#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
595#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
596#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
597#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
598#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
599#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
600#else
601#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
602#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
603#define dma_unmap_addr(PTR, ADDR_NAME) (0)
604#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
605#define dma_unmap_len(PTR, LEN_NAME) (0)
606#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
607#endif
608
609#endif /* _LINUX_DMA_MAPPING_H */
610

source code of linux/include/linux/dma-mapping.h