1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Helpers for DMA ops implementations. These generally rely on the fact that |
4 | * the allocated memory contains normal pages in the direct kernel mapping. |
5 | */ |
6 | #include <linux/dma-map-ops.h> |
7 | #include <linux/iommu-dma.h> |
8 | |
9 | static struct page *dma_common_vaddr_to_page(void *cpu_addr) |
10 | { |
11 | if (is_vmalloc_addr(x: cpu_addr)) |
12 | return vmalloc_to_page(addr: cpu_addr); |
13 | return virt_to_page(cpu_addr); |
14 | } |
15 | |
16 | /* |
17 | * Create scatter-list for the already allocated DMA buffer. |
18 | */ |
19 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
20 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
21 | unsigned long attrs) |
22 | { |
23 | struct page *page = dma_common_vaddr_to_page(cpu_addr); |
24 | int ret; |
25 | |
26 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
27 | if (!ret) |
28 | sg_set_page(sg: sgt->sgl, page, PAGE_ALIGN(size), offset: 0); |
29 | return ret; |
30 | } |
31 | |
32 | /* |
33 | * Create userspace mapping for the DMA-coherent memory. |
34 | */ |
35 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
36 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
37 | unsigned long attrs) |
38 | { |
39 | #ifdef CONFIG_MMU |
40 | unsigned long user_count = vma_pages(vma); |
41 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
42 | unsigned long off = vma->vm_pgoff; |
43 | struct page *page = dma_common_vaddr_to_page(cpu_addr); |
44 | int ret = -ENXIO; |
45 | |
46 | vma->vm_page_prot = dma_pgprot(dev, prot: vma->vm_page_prot, attrs); |
47 | |
48 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, ret: &ret)) |
49 | return ret; |
50 | |
51 | if (off >= count || user_count > count - off) |
52 | return -ENXIO; |
53 | |
54 | return remap_pfn_range(vma, addr: vma->vm_start, |
55 | page_to_pfn(page) + vma->vm_pgoff, |
56 | size: user_count << PAGE_SHIFT, vma->vm_page_prot); |
57 | #else |
58 | return -ENXIO; |
59 | #endif /* CONFIG_MMU */ |
60 | } |
61 | |
62 | struct page *dma_common_alloc_pages(struct device *dev, size_t size, |
63 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
64 | { |
65 | const struct dma_map_ops *ops = get_dma_ops(dev); |
66 | struct page *page; |
67 | |
68 | page = dma_alloc_contiguous(dev, size, gfp); |
69 | if (!page) |
70 | page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); |
71 | if (!page) |
72 | return NULL; |
73 | |
74 | if (use_dma_iommu(dev)) |
75 | *dma_handle = iommu_dma_map_page(dev, page, offset: 0, size, dir, |
76 | DMA_ATTR_SKIP_CPU_SYNC); |
77 | else |
78 | *dma_handle = ops->map_page(dev, page, 0, size, dir, |
79 | DMA_ATTR_SKIP_CPU_SYNC); |
80 | if (*dma_handle == DMA_MAPPING_ERROR) { |
81 | dma_free_contiguous(dev, page, size); |
82 | return NULL; |
83 | } |
84 | |
85 | memset(page_address(page), 0, size); |
86 | return page; |
87 | } |
88 | |
89 | void dma_common_free_pages(struct device *dev, size_t size, struct page *page, |
90 | dma_addr_t dma_handle, enum dma_data_direction dir) |
91 | { |
92 | const struct dma_map_ops *ops = get_dma_ops(dev); |
93 | |
94 | if (use_dma_iommu(dev)) |
95 | iommu_dma_unmap_page(dev, dma_handle, size, dir, |
96 | DMA_ATTR_SKIP_CPU_SYNC); |
97 | else if (ops->unmap_page) |
98 | ops->unmap_page(dev, dma_handle, size, dir, |
99 | DMA_ATTR_SKIP_CPU_SYNC); |
100 | dma_free_contiguous(dev, page, size); |
101 | } |
102 | |