1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
4 | * |
5 | * Provide default implementations of the DMA mapping callbacks for |
6 | * busses using the iommu infrastructure |
7 | */ |
8 | |
9 | #include <linux/dma-direct.h> |
10 | #include <linux/pci.h> |
11 | #include <asm/iommu.h> |
12 | |
13 | #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT |
14 | #define can_map_direct(dev, addr) \ |
15 | ((dev)->bus_dma_limit >= phys_to_dma((dev), (addr))) |
16 | |
17 | bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr) |
18 | { |
19 | if (likely(!dev->bus_dma_limit)) |
20 | return false; |
21 | |
22 | return can_map_direct(dev, addr); |
23 | } |
24 | |
25 | #define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset) |
26 | |
27 | bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle) |
28 | { |
29 | if (likely(!dev->bus_dma_limit)) |
30 | return false; |
31 | |
32 | return is_direct_handle(dev, dma_handle); |
33 | } |
34 | |
35 | bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, |
36 | int nents) |
37 | { |
38 | struct scatterlist *s; |
39 | int i; |
40 | |
41 | if (likely(!dev->bus_dma_limit)) |
42 | return false; |
43 | |
44 | for_each_sg(sg, s, nents, i) { |
45 | if (!can_map_direct(dev, sg_phys(s) + s->offset + s->length)) |
46 | return false; |
47 | } |
48 | |
49 | return true; |
50 | } |
51 | |
52 | bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, |
53 | int nents) |
54 | { |
55 | struct scatterlist *s; |
56 | int i; |
57 | |
58 | if (likely(!dev->bus_dma_limit)) |
59 | return false; |
60 | |
61 | for_each_sg(sg, s, nents, i) { |
62 | if (!is_direct_handle(dev, s->dma_address + s->length)) |
63 | return false; |
64 | } |
65 | |
66 | return true; |
67 | } |
68 | #endif /* CONFIG_ARCH_HAS_DMA_MAP_DIRECT */ |
69 | |
70 | /* |
71 | * Generic iommu implementation |
72 | */ |
73 | |
74 | /* Allocates a contiguous real buffer and creates mappings over it. |
75 | * Returns the virtual address of the buffer and sets dma_handle |
76 | * to the dma address (mapping) of the first page. |
77 | */ |
78 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t *dma_handle, gfp_t flag, |
80 | unsigned long attrs) |
81 | { |
82 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, |
83 | dma_handle, dev->coherent_dma_mask, flag, |
84 | dev_to_node(dev)); |
85 | } |
86 | |
87 | static void dma_iommu_free_coherent(struct device *dev, size_t size, |
88 | void *vaddr, dma_addr_t dma_handle, |
89 | unsigned long attrs) |
90 | { |
91 | iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); |
92 | } |
93 | |
94 | /* Creates TCEs for a user provided buffer. The user buffer must be |
95 | * contiguous real kernel storage (not vmalloc). The address passed here |
96 | * comprises a page address and offset into that page. The dma_addr_t |
97 | * returned will point to the same byte within the page as was passed in. |
98 | */ |
99 | static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, |
100 | unsigned long offset, size_t size, |
101 | enum dma_data_direction direction, |
102 | unsigned long attrs) |
103 | { |
104 | return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, |
105 | size, dma_get_mask(dev), direction, attrs); |
106 | } |
107 | |
108 | |
109 | static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, |
110 | size_t size, enum dma_data_direction direction, |
111 | unsigned long attrs) |
112 | { |
113 | iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, |
114 | attrs); |
115 | } |
116 | |
117 | |
118 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
119 | int nelems, enum dma_data_direction direction, |
120 | unsigned long attrs) |
121 | { |
122 | return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, |
123 | dma_get_mask(dev), direction, attrs); |
124 | } |
125 | |
126 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, |
127 | int nelems, enum dma_data_direction direction, |
128 | unsigned long attrs) |
129 | { |
130 | ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, |
131 | direction, attrs); |
132 | } |
133 | |
134 | static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) |
135 | { |
136 | struct pci_dev *pdev = to_pci_dev(dev); |
137 | struct pci_controller *phb = pci_bus_to_host(pdev->bus); |
138 | |
139 | if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported) |
140 | return false; |
141 | return phb->controller_ops.iommu_bypass_supported(pdev, mask); |
142 | } |
143 | |
144 | /* We support DMA to/from any memory page via the iommu */ |
145 | int dma_iommu_dma_supported(struct device *dev, u64 mask) |
146 | { |
147 | struct iommu_table *tbl; |
148 | |
149 | if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { |
150 | /* |
151 | * dma_iommu_bypass_supported() sets dma_max when there is |
152 | * 1:1 mapping but it is somehow limited. |
153 | * ibm,pmemory is one example. |
154 | */ |
155 | dev->dma_ops_bypass = dev->bus_dma_limit == 0; |
156 | if (!dev->dma_ops_bypass) |
157 | dev_warn(dev, |
158 | "iommu: 64-bit OK but direct DMA is limited by %llx\n" , |
159 | dev->bus_dma_limit); |
160 | else |
161 | dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n" ); |
162 | return 1; |
163 | } |
164 | |
165 | tbl = get_iommu_table_base(dev); |
166 | |
167 | if (!tbl) { |
168 | dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n" , mask); |
169 | return 0; |
170 | } |
171 | |
172 | if (tbl->it_offset > (mask >> tbl->it_page_shift)) { |
173 | dev_info(dev, "Warning: IOMMU offset too big for device mask\n" ); |
174 | dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n" , |
175 | mask, tbl->it_offset << tbl->it_page_shift); |
176 | return 0; |
177 | } |
178 | |
179 | dev_dbg(dev, "iommu: not 64-bit, using default ops\n" ); |
180 | dev->dma_ops_bypass = false; |
181 | return 1; |
182 | } |
183 | |
184 | u64 dma_iommu_get_required_mask(struct device *dev) |
185 | { |
186 | struct iommu_table *tbl = get_iommu_table_base(dev); |
187 | u64 mask; |
188 | |
189 | if (dev_is_pci(dev)) { |
190 | u64 bypass_mask = dma_direct_get_required_mask(dev); |
191 | |
192 | if (dma_iommu_dma_supported(dev, mask: bypass_mask)) { |
193 | dev_info(dev, "%s: returning bypass mask 0x%llx\n" , __func__, bypass_mask); |
194 | return bypass_mask; |
195 | } |
196 | } |
197 | |
198 | if (!tbl) |
199 | return 0; |
200 | |
201 | mask = 1ULL << (fls_long(l: tbl->it_offset + tbl->it_size) + |
202 | tbl->it_page_shift - 1); |
203 | mask += mask - 1; |
204 | |
205 | return mask; |
206 | } |
207 | |
208 | const struct dma_map_ops dma_iommu_ops = { |
209 | .alloc = dma_iommu_alloc_coherent, |
210 | .free = dma_iommu_free_coherent, |
211 | .map_sg = dma_iommu_map_sg, |
212 | .unmap_sg = dma_iommu_unmap_sg, |
213 | .dma_supported = dma_iommu_dma_supported, |
214 | .map_page = dma_iommu_map_page, |
215 | .unmap_page = dma_iommu_unmap_page, |
216 | .get_required_mask = dma_iommu_get_required_mask, |
217 | .mmap = dma_common_mmap, |
218 | .get_sgtable = dma_common_get_sgtable, |
219 | .alloc_pages = dma_common_alloc_pages, |
220 | .free_pages = dma_common_free_pages, |
221 | }; |
222 | |