1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * This header is for implementations of dma_map_ops and related code. |
4 | * It should not be included in drivers just using the DMA API. |
5 | */ |
6 | #ifndef _LINUX_DMA_MAP_OPS_H |
7 | #define _LINUX_DMA_MAP_OPS_H |
8 | |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/pgtable.h> |
11 | #include <linux/slab.h> |
12 | |
13 | struct cma; |
14 | |
15 | /* |
16 | * Values for struct dma_map_ops.flags: |
17 | * |
18 | * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can |
19 | * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. |
20 | */ |
21 | #define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) |
22 | |
23 | struct dma_map_ops { |
24 | unsigned int flags; |
25 | |
26 | void *(*alloc)(struct device *dev, size_t size, |
27 | dma_addr_t *dma_handle, gfp_t gfp, |
28 | unsigned long attrs); |
29 | void (*free)(struct device *dev, size_t size, void *vaddr, |
30 | dma_addr_t dma_handle, unsigned long attrs); |
31 | struct page *(*alloc_pages)(struct device *dev, size_t size, |
32 | dma_addr_t *dma_handle, enum dma_data_direction dir, |
33 | gfp_t gfp); |
34 | void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, |
35 | dma_addr_t dma_handle, enum dma_data_direction dir); |
36 | struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, |
37 | enum dma_data_direction dir, gfp_t gfp, |
38 | unsigned long attrs); |
39 | void (*free_noncontiguous)(struct device *dev, size_t size, |
40 | struct sg_table *sgt, enum dma_data_direction dir); |
41 | int (*mmap)(struct device *, struct vm_area_struct *, |
42 | void *, dma_addr_t, size_t, unsigned long attrs); |
43 | |
44 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, |
45 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
46 | unsigned long attrs); |
47 | |
48 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
49 | unsigned long offset, size_t size, |
50 | enum dma_data_direction dir, unsigned long attrs); |
51 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
52 | size_t size, enum dma_data_direction dir, |
53 | unsigned long attrs); |
54 | /* |
55 | * map_sg should return a negative error code on error. See |
56 | * dma_map_sgtable() for a list of appropriate error codes |
57 | * and their meanings. |
58 | */ |
59 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, |
60 | enum dma_data_direction dir, unsigned long attrs); |
61 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, |
62 | enum dma_data_direction dir, unsigned long attrs); |
63 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
64 | size_t size, enum dma_data_direction dir, |
65 | unsigned long attrs); |
66 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, |
67 | size_t size, enum dma_data_direction dir, |
68 | unsigned long attrs); |
69 | void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, |
70 | size_t size, enum dma_data_direction dir); |
71 | void (*sync_single_for_device)(struct device *dev, |
72 | dma_addr_t dma_handle, size_t size, |
73 | enum dma_data_direction dir); |
74 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, |
75 | int nents, enum dma_data_direction dir); |
76 | void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, |
77 | int nents, enum dma_data_direction dir); |
78 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, |
79 | enum dma_data_direction direction); |
80 | int (*dma_supported)(struct device *dev, u64 mask); |
81 | u64 (*get_required_mask)(struct device *dev); |
82 | size_t (*max_mapping_size)(struct device *dev); |
83 | size_t (*opt_mapping_size)(void); |
84 | unsigned long (*get_merge_boundary)(struct device *dev); |
85 | }; |
86 | |
87 | #ifdef CONFIG_DMA_OPS |
88 | #include <asm/dma-mapping.h> |
89 | |
90 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
91 | { |
92 | if (dev->dma_ops) |
93 | return dev->dma_ops; |
94 | return get_arch_dma_ops(); |
95 | } |
96 | |
97 | static inline void set_dma_ops(struct device *dev, |
98 | const struct dma_map_ops *dma_ops) |
99 | { |
100 | dev->dma_ops = dma_ops; |
101 | } |
102 | #else /* CONFIG_DMA_OPS */ |
103 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
104 | { |
105 | return NULL; |
106 | } |
107 | static inline void set_dma_ops(struct device *dev, |
108 | const struct dma_map_ops *dma_ops) |
109 | { |
110 | } |
111 | #endif /* CONFIG_DMA_OPS */ |
112 | |
113 | #ifdef CONFIG_DMA_CMA |
114 | extern struct cma *dma_contiguous_default_area; |
115 | |
116 | static inline struct cma *dev_get_cma_area(struct device *dev) |
117 | { |
118 | if (dev && dev->cma_area) |
119 | return dev->cma_area; |
120 | return dma_contiguous_default_area; |
121 | } |
122 | |
123 | void dma_contiguous_reserve(phys_addr_t addr_limit); |
124 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
125 | phys_addr_t limit, struct cma **res_cma, bool fixed); |
126 | |
127 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
128 | unsigned int order, bool no_warn); |
129 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
130 | int count); |
131 | struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); |
132 | void dma_free_contiguous(struct device *dev, struct page *page, size_t size); |
133 | |
134 | void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); |
135 | #else /* CONFIG_DMA_CMA */ |
136 | static inline struct cma *dev_get_cma_area(struct device *dev) |
137 | { |
138 | return NULL; |
139 | } |
140 | static inline void dma_contiguous_reserve(phys_addr_t limit) |
141 | { |
142 | } |
143 | static inline int dma_contiguous_reserve_area(phys_addr_t size, |
144 | phys_addr_t base, phys_addr_t limit, struct cma **res_cma, |
145 | bool fixed) |
146 | { |
147 | return -ENOSYS; |
148 | } |
149 | static inline struct page *dma_alloc_from_contiguous(struct device *dev, |
150 | size_t count, unsigned int order, bool no_warn) |
151 | { |
152 | return NULL; |
153 | } |
154 | static inline bool dma_release_from_contiguous(struct device *dev, |
155 | struct page *pages, int count) |
156 | { |
157 | return false; |
158 | } |
159 | /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ |
160 | static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, |
161 | gfp_t gfp) |
162 | { |
163 | return NULL; |
164 | } |
165 | static inline void dma_free_contiguous(struct device *dev, struct page *page, |
166 | size_t size) |
167 | { |
168 | __free_pages(page, get_order(size)); |
169 | } |
170 | #endif /* CONFIG_DMA_CMA*/ |
171 | |
172 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
173 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
174 | dma_addr_t device_addr, size_t size); |
175 | void dma_release_coherent_memory(struct device *dev); |
176 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
177 | dma_addr_t *dma_handle, void **ret); |
178 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
179 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
180 | void *cpu_addr, size_t size, int *ret); |
181 | #else |
182 | static inline int dma_declare_coherent_memory(struct device *dev, |
183 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) |
184 | { |
185 | return -ENOSYS; |
186 | } |
187 | |
188 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
189 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) |
190 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) |
191 | static inline void dma_release_coherent_memory(struct device *dev) { } |
192 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ |
193 | |
194 | #ifdef CONFIG_DMA_GLOBAL_POOL |
195 | void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, |
196 | dma_addr_t *dma_handle); |
197 | int dma_release_from_global_coherent(int order, void *vaddr); |
198 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, |
199 | size_t size, int *ret); |
200 | int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); |
201 | #else |
202 | static inline void *dma_alloc_from_global_coherent(struct device *dev, |
203 | ssize_t size, dma_addr_t *dma_handle) |
204 | { |
205 | return NULL; |
206 | } |
207 | static inline int dma_release_from_global_coherent(int order, void *vaddr) |
208 | { |
209 | return 0; |
210 | } |
211 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, |
212 | void *cpu_addr, size_t size, int *ret) |
213 | { |
214 | return 0; |
215 | } |
216 | #endif /* CONFIG_DMA_GLOBAL_POOL */ |
217 | |
218 | /* |
219 | * This is the actual return value from the ->alloc_noncontiguous method. |
220 | * The users of the DMA API should only care about the sg_table, but to make |
221 | * the DMA-API internal vmaping and freeing easier we stash away the page |
222 | * array as well (except for the fallback case). This can go away any time, |
223 | * e.g. when a vmap-variant that takes a scatterlist comes along. |
224 | */ |
225 | struct dma_sgt_handle { |
226 | struct sg_table sgt; |
227 | struct page **pages; |
228 | }; |
229 | #define sgt_handle(sgt) \ |
230 | container_of((sgt), struct dma_sgt_handle, sgt) |
231 | |
232 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
233 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
234 | unsigned long attrs); |
235 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
236 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
237 | unsigned long attrs); |
238 | struct page *dma_common_alloc_pages(struct device *dev, size_t size, |
239 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); |
240 | void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr, |
241 | dma_addr_t dma_handle, enum dma_data_direction dir); |
242 | |
243 | struct page **dma_common_find_pages(void *cpu_addr); |
244 | void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, |
245 | const void *caller); |
246 | void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, |
247 | const void *caller); |
248 | void dma_common_free_remap(void *cpu_addr, size_t size); |
249 | |
250 | struct page *dma_alloc_from_pool(struct device *dev, size_t size, |
251 | void **cpu_addr, gfp_t flags, |
252 | bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); |
253 | bool dma_free_from_pool(struct device *dev, void *start, size_t size); |
254 | |
255 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, |
256 | dma_addr_t dma_start, u64 size); |
257 | |
258 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
259 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
260 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) |
261 | extern bool dma_default_coherent; |
262 | static inline bool dev_is_dma_coherent(struct device *dev) |
263 | { |
264 | return dev->dma_coherent; |
265 | } |
266 | #else |
267 | #define dma_default_coherent true |
268 | |
269 | static inline bool dev_is_dma_coherent(struct device *dev) |
270 | { |
271 | return true; |
272 | } |
273 | #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ |
274 | |
275 | /* |
276 | * Check whether potential kmalloc() buffers are safe for non-coherent DMA. |
277 | */ |
278 | static inline bool dma_kmalloc_safe(struct device *dev, |
279 | enum dma_data_direction dir) |
280 | { |
281 | /* |
282 | * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc() |
283 | * caches have already been aligned to a DMA-safe size. |
284 | */ |
285 | if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) |
286 | return true; |
287 | |
288 | /* |
289 | * kmalloc() buffers are DMA-safe irrespective of size if the device |
290 | * is coherent or the direction is DMA_TO_DEVICE (non-desctructive |
291 | * cache maintenance and benign cache line evictions). |
292 | */ |
293 | if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE) |
294 | return true; |
295 | |
296 | return false; |
297 | } |
298 | |
299 | /* |
300 | * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is |
301 | * sufficiently aligned for non-coherent DMA. |
302 | */ |
303 | static inline bool dma_kmalloc_size_aligned(size_t size) |
304 | { |
305 | /* |
306 | * Larger kmalloc() sizes are guaranteed to be aligned to |
307 | * ARCH_DMA_MINALIGN. |
308 | */ |
309 | if (size >= 2 * ARCH_DMA_MINALIGN || |
310 | IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment())) |
311 | return true; |
312 | |
313 | return false; |
314 | } |
315 | |
316 | /* |
317 | * Check whether the given object size may have originated from a kmalloc() |
318 | * buffer with a slab alignment below the DMA-safe alignment and needs |
319 | * bouncing for non-coherent DMA. The pointer alignment is not considered and |
320 | * in-structure DMA-safe offsets are the responsibility of the caller. Such |
321 | * code should use the static ARCH_DMA_MINALIGN for compiler annotations. |
322 | * |
323 | * The heuristics can have false positives, bouncing unnecessarily, though the |
324 | * buffers would be small. False negatives are theoretically possible if, for |
325 | * example, multiple small kmalloc() buffers are coalesced into a larger |
326 | * buffer that passes the alignment check. There are no such known constructs |
327 | * in the kernel. |
328 | */ |
329 | static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size, |
330 | enum dma_data_direction dir) |
331 | { |
332 | return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size); |
333 | } |
334 | |
335 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
336 | gfp_t gfp, unsigned long attrs); |
337 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, |
338 | dma_addr_t dma_addr, unsigned long attrs); |
339 | |
340 | #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK |
341 | void arch_dma_set_mask(struct device *dev, u64 mask); |
342 | #else |
343 | #define arch_dma_set_mask(dev, mask) do { } while (0) |
344 | #endif |
345 | |
346 | #ifdef CONFIG_MMU |
347 | /* |
348 | * Page protection so that devices that can't snoop CPU caches can use the |
349 | * memory coherently. We default to pgprot_noncached which is usually used |
350 | * for ioremap as a safe bet, but architectures can override this with less |
351 | * strict semantics if possible. |
352 | */ |
353 | #ifndef pgprot_dmacoherent |
354 | #define pgprot_dmacoherent(prot) pgprot_noncached(prot) |
355 | #endif |
356 | |
357 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); |
358 | #else |
359 | static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, |
360 | unsigned long attrs) |
361 | { |
362 | return prot; /* no protection bits supported without page tables */ |
363 | } |
364 | #endif /* CONFIG_MMU */ |
365 | |
366 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE |
367 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
368 | enum dma_data_direction dir); |
369 | #else |
370 | static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
371 | enum dma_data_direction dir) |
372 | { |
373 | } |
374 | #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ |
375 | |
376 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU |
377 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
378 | enum dma_data_direction dir); |
379 | #else |
380 | static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
381 | enum dma_data_direction dir) |
382 | { |
383 | } |
384 | #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ |
385 | |
386 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL |
387 | void arch_sync_dma_for_cpu_all(void); |
388 | #else |
389 | static inline void arch_sync_dma_for_cpu_all(void) |
390 | { |
391 | } |
392 | #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ |
393 | |
394 | #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT |
395 | void arch_dma_prep_coherent(struct page *page, size_t size); |
396 | #else |
397 | static inline void arch_dma_prep_coherent(struct page *page, size_t size) |
398 | { |
399 | } |
400 | #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ |
401 | |
402 | #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN |
403 | void arch_dma_mark_clean(phys_addr_t paddr, size_t size); |
404 | #else |
405 | static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) |
406 | { |
407 | } |
408 | #endif /* ARCH_HAS_DMA_MARK_CLEAN */ |
409 | |
410 | void *arch_dma_set_uncached(void *addr, size_t size); |
411 | void arch_dma_clear_uncached(void *addr, size_t size); |
412 | |
413 | #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT |
414 | bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); |
415 | bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); |
416 | bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, |
417 | int nents); |
418 | bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, |
419 | int nents); |
420 | #else |
421 | #define arch_dma_map_page_direct(d, a) (false) |
422 | #define arch_dma_unmap_page_direct(d, a) (false) |
423 | #define arch_dma_map_sg_direct(d, s, n) (false) |
424 | #define arch_dma_unmap_sg_direct(d, s, n) (false) |
425 | #endif |
426 | |
427 | #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS |
428 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
429 | const struct iommu_ops *iommu, bool coherent); |
430 | #else |
431 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
432 | u64 size, const struct iommu_ops *iommu, bool coherent) |
433 | { |
434 | } |
435 | #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ |
436 | |
437 | #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS |
438 | void arch_teardown_dma_ops(struct device *dev); |
439 | #else |
440 | static inline void arch_teardown_dma_ops(struct device *dev) |
441 | { |
442 | } |
443 | #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ |
444 | |
445 | #ifdef CONFIG_DMA_API_DEBUG |
446 | void dma_debug_add_bus(struct bus_type *bus); |
447 | void debug_dma_dump_mappings(struct device *dev); |
448 | #else |
449 | static inline void dma_debug_add_bus(struct bus_type *bus) |
450 | { |
451 | } |
452 | static inline void debug_dma_dump_mappings(struct device *dev) |
453 | { |
454 | } |
455 | #endif /* CONFIG_DMA_API_DEBUG */ |
456 | |
457 | extern const struct dma_map_ops dma_dummy_ops; |
458 | |
459 | enum pci_p2pdma_map_type { |
460 | /* |
461 | * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping |
462 | * type hasn't been calculated yet. Functions that return this enum |
463 | * never return this value. |
464 | */ |
465 | PCI_P2PDMA_MAP_UNKNOWN = 0, |
466 | |
467 | /* |
468 | * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will |
469 | * traverse the host bridge and the host bridge is not in the |
470 | * allowlist. DMA Mapping routines should return an error when |
471 | * this is returned. |
472 | */ |
473 | PCI_P2PDMA_MAP_NOT_SUPPORTED, |
474 | |
475 | /* |
476 | * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to |
477 | * each other directly through a PCI switch and the transaction will |
478 | * not traverse the host bridge. Such a mapping should program |
479 | * the DMA engine with PCI bus addresses. |
480 | */ |
481 | PCI_P2PDMA_MAP_BUS_ADDR, |
482 | |
483 | /* |
484 | * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk |
485 | * to each other, but the transaction traverses a host bridge on the |
486 | * allowlist. In this case, a normal mapping either with CPU physical |
487 | * addresses (in the case of dma-direct) or IOVA addresses (in the |
488 | * case of IOMMUs) should be used to program the DMA engine. |
489 | */ |
490 | PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, |
491 | }; |
492 | |
493 | struct pci_p2pdma_map_state { |
494 | struct dev_pagemap *pgmap; |
495 | int map; |
496 | u64 bus_off; |
497 | }; |
498 | |
499 | #ifdef CONFIG_PCI_P2PDMA |
500 | enum pci_p2pdma_map_type |
501 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, |
502 | struct scatterlist *sg); |
503 | #else /* CONFIG_PCI_P2PDMA */ |
504 | static inline enum pci_p2pdma_map_type |
505 | pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, |
506 | struct scatterlist *sg) |
507 | { |
508 | return PCI_P2PDMA_MAP_NOT_SUPPORTED; |
509 | } |
510 | #endif /* CONFIG_PCI_P2PDMA */ |
511 | |
512 | #endif /* _LINUX_DMA_MAP_OPS_H */ |
513 | |