1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * CPU-agnostic ARM page table allocator. |
4 | * |
5 | * ARMv7 Short-descriptor format, supporting |
6 | * - Basic memory attributes |
7 | * - Simplified access permissions (AP[2:1] model) |
8 | * - Backwards-compatible TEX remap |
9 | * - Large pages/supersections (if indicated by the caller) |
10 | * |
11 | * Not supporting: |
12 | * - Legacy access permissions (AP[2:0] model) |
13 | * |
14 | * Almost certainly never supporting: |
15 | * - PXN |
16 | * - Domains |
17 | * |
18 | * Copyright (C) 2014-2015 ARM Limited |
19 | * Copyright (c) 2014-2015 MediaTek Inc. |
20 | */ |
21 | |
22 | #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt |
23 | |
24 | #include <linux/atomic.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/gfp.h> |
27 | #include <linux/io-pgtable.h> |
28 | #include <linux/iommu.h> |
29 | #include <linux/kernel.h> |
30 | #include <linux/kmemleak.h> |
31 | #include <linux/sizes.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/spinlock.h> |
34 | #include <linux/types.h> |
35 | |
36 | #include <asm/barrier.h> |
37 | |
38 | /* Struct accessors */ |
39 | #define io_pgtable_to_data(x) \ |
40 | container_of((x), struct arm_v7s_io_pgtable, iop) |
41 | |
42 | #define io_pgtable_ops_to_data(x) \ |
43 | io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) |
44 | |
45 | /* |
46 | * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2, |
47 | * and 12 bits in a page. |
48 | * MediaTek extend 2 bits to reach 34bits, 14 bits at lvl1 and 8 bits at lvl2. |
49 | */ |
50 | #define ARM_V7S_ADDR_BITS 32 |
51 | #define _ARM_V7S_LVL_BITS(lvl, cfg) ((lvl) == 1 ? ((cfg)->ias - 20) : 8) |
52 | #define ARM_V7S_LVL_SHIFT(lvl) ((lvl) == 1 ? 20 : 12) |
53 | #define ARM_V7S_TABLE_SHIFT 10 |
54 | |
55 | #define ARM_V7S_PTES_PER_LVL(lvl, cfg) (1 << _ARM_V7S_LVL_BITS(lvl, cfg)) |
56 | #define ARM_V7S_TABLE_SIZE(lvl, cfg) \ |
57 | (ARM_V7S_PTES_PER_LVL(lvl, cfg) * sizeof(arm_v7s_iopte)) |
58 | |
59 | #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl)) |
60 | #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl))) |
61 | #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT)) |
62 | #define _ARM_V7S_IDX_MASK(lvl, cfg) (ARM_V7S_PTES_PER_LVL(lvl, cfg) - 1) |
63 | #define ARM_V7S_LVL_IDX(addr, lvl, cfg) ({ \ |
64 | int _l = lvl; \ |
65 | ((addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l, cfg); \ |
66 | }) |
67 | |
68 | /* |
69 | * Large page/supersection entries are effectively a block of 16 page/section |
70 | * entries, along the lines of the LPAE contiguous hint, but all with the |
71 | * same output address. For want of a better common name we'll call them |
72 | * "contiguous" versions of their respective page/section entries here, but |
73 | * noting the distinction (WRT to TLB maintenance) that they represent *one* |
74 | * entry repeated 16 times, not 16 separate entries (as in the LPAE case). |
75 | */ |
76 | #define ARM_V7S_CONT_PAGES 16 |
77 | |
78 | /* PTE type bits: these are all mixed up with XN/PXN bits in most cases */ |
79 | #define ARM_V7S_PTE_TYPE_TABLE 0x1 |
80 | #define ARM_V7S_PTE_TYPE_PAGE 0x2 |
81 | #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1 |
82 | |
83 | #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0) |
84 | #define ARM_V7S_PTE_IS_TABLE(pte, lvl) \ |
85 | ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE)) |
86 | |
87 | /* Page table bits */ |
88 | #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl))) |
89 | #define ARM_V7S_ATTR_B BIT(2) |
90 | #define ARM_V7S_ATTR_C BIT(3) |
91 | #define ARM_V7S_ATTR_NS_TABLE BIT(3) |
92 | #define ARM_V7S_ATTR_NS_SECTION BIT(19) |
93 | |
94 | #define ARM_V7S_CONT_SECTION BIT(18) |
95 | #define ARM_V7S_CONT_PAGE_XN_SHIFT 15 |
96 | |
97 | /* |
98 | * The attribute bits are consistently ordered*, but occupy bits [17:10] of |
99 | * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual |
100 | * fields relative to that 8-bit block, plus a total shift relative to the PTE. |
101 | */ |
102 | #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6) |
103 | |
104 | #define ARM_V7S_ATTR_MASK 0xff |
105 | #define ARM_V7S_ATTR_AP0 BIT(0) |
106 | #define ARM_V7S_ATTR_AP1 BIT(1) |
107 | #define ARM_V7S_ATTR_AP2 BIT(5) |
108 | #define ARM_V7S_ATTR_S BIT(6) |
109 | #define ARM_V7S_ATTR_NG BIT(7) |
110 | #define ARM_V7S_TEX_SHIFT 2 |
111 | #define ARM_V7S_TEX_MASK 0x7 |
112 | #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) |
113 | |
114 | /* MediaTek extend the bits below for PA 32bit/33bit/34bit */ |
115 | #define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9) |
116 | #define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4) |
117 | #define ARM_V7S_ATTR_MTK_PA_BIT34 BIT(5) |
118 | |
119 | /* *well, except for TEX on level 2 large pages, of course :( */ |
120 | #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 |
121 | #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT) |
122 | |
123 | /* Simplified access permissions */ |
124 | #define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0 |
125 | #define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1 |
126 | #define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2 |
127 | |
128 | /* Register bits */ |
129 | #define ARM_V7S_RGN_NC 0 |
130 | #define ARM_V7S_RGN_WBWA 1 |
131 | #define ARM_V7S_RGN_WT 2 |
132 | #define ARM_V7S_RGN_WB 3 |
133 | |
134 | #define ARM_V7S_PRRR_TYPE_DEVICE 1 |
135 | #define ARM_V7S_PRRR_TYPE_NORMAL 2 |
136 | #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2)) |
137 | #define ARM_V7S_PRRR_DS0 BIT(16) |
138 | #define ARM_V7S_PRRR_DS1 BIT(17) |
139 | #define ARM_V7S_PRRR_NS0 BIT(18) |
140 | #define ARM_V7S_PRRR_NS1 BIT(19) |
141 | #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24) |
142 | |
143 | #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2)) |
144 | #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16)) |
145 | |
146 | #define ARM_V7S_TTBR_S BIT(1) |
147 | #define ARM_V7S_TTBR_NOS BIT(5) |
148 | #define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3) |
149 | #define ARM_V7S_TTBR_IRGN_ATTR(attr) \ |
150 | ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1)) |
151 | |
152 | #ifdef CONFIG_ZONE_DMA32 |
153 | #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32 |
154 | #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32 |
155 | #else |
156 | #define ARM_V7S_TABLE_GFP_DMA GFP_DMA |
157 | #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA |
158 | #endif |
159 | |
160 | typedef u32 arm_v7s_iopte; |
161 | |
162 | static bool selftest_running; |
163 | |
164 | struct arm_v7s_io_pgtable { |
165 | struct io_pgtable iop; |
166 | |
167 | arm_v7s_iopte *pgd; |
168 | struct kmem_cache *l2_tables; |
169 | spinlock_t split_lock; |
170 | }; |
171 | |
172 | static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl); |
173 | |
174 | static dma_addr_t __arm_v7s_dma_addr(void *pages) |
175 | { |
176 | return (dma_addr_t)virt_to_phys(address: pages); |
177 | } |
178 | |
179 | static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg) |
180 | { |
181 | return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && |
182 | (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT); |
183 | } |
184 | |
185 | static arm_v7s_iopte to_mtk_iopte(phys_addr_t paddr, arm_v7s_iopte pte) |
186 | { |
187 | if (paddr & BIT_ULL(32)) |
188 | pte |= ARM_V7S_ATTR_MTK_PA_BIT32; |
189 | if (paddr & BIT_ULL(33)) |
190 | pte |= ARM_V7S_ATTR_MTK_PA_BIT33; |
191 | if (paddr & BIT_ULL(34)) |
192 | pte |= ARM_V7S_ATTR_MTK_PA_BIT34; |
193 | return pte; |
194 | } |
195 | |
196 | static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl, |
197 | struct io_pgtable_cfg *cfg) |
198 | { |
199 | arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl); |
200 | |
201 | if (arm_v7s_is_mtk_enabled(cfg)) |
202 | return to_mtk_iopte(paddr, pte); |
203 | |
204 | return pte; |
205 | } |
206 | |
207 | static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl, |
208 | struct io_pgtable_cfg *cfg) |
209 | { |
210 | arm_v7s_iopte mask; |
211 | phys_addr_t paddr; |
212 | |
213 | if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) |
214 | mask = ARM_V7S_TABLE_MASK; |
215 | else if (arm_v7s_pte_is_cont(pte, lvl)) |
216 | mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES; |
217 | else |
218 | mask = ARM_V7S_LVL_MASK(lvl); |
219 | |
220 | paddr = pte & mask; |
221 | if (!arm_v7s_is_mtk_enabled(cfg)) |
222 | return paddr; |
223 | |
224 | if (pte & ARM_V7S_ATTR_MTK_PA_BIT32) |
225 | paddr |= BIT_ULL(32); |
226 | if (pte & ARM_V7S_ATTR_MTK_PA_BIT33) |
227 | paddr |= BIT_ULL(33); |
228 | if (pte & ARM_V7S_ATTR_MTK_PA_BIT34) |
229 | paddr |= BIT_ULL(34); |
230 | return paddr; |
231 | } |
232 | |
233 | static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl, |
234 | struct arm_v7s_io_pgtable *data) |
235 | { |
236 | return phys_to_virt(address: iopte_to_paddr(pte, lvl, cfg: &data->iop.cfg)); |
237 | } |
238 | |
239 | static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, |
240 | struct arm_v7s_io_pgtable *data) |
241 | { |
242 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
243 | struct device *dev = cfg->iommu_dev; |
244 | phys_addr_t phys; |
245 | dma_addr_t dma; |
246 | size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg); |
247 | void *table = NULL; |
248 | gfp_t gfp_l1; |
249 | |
250 | /* |
251 | * ARM_MTK_TTBR_EXT extend the translation table base support larger |
252 | * memory address. |
253 | */ |
254 | gfp_l1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ? |
255 | GFP_KERNEL : ARM_V7S_TABLE_GFP_DMA; |
256 | |
257 | if (lvl == 1) |
258 | table = (void *)__get_free_pages(gfp_mask: gfp_l1 | __GFP_ZERO, order: get_order(size)); |
259 | else if (lvl == 2) |
260 | table = kmem_cache_zalloc(k: data->l2_tables, flags: gfp); |
261 | |
262 | if (!table) |
263 | return NULL; |
264 | |
265 | phys = virt_to_phys(address: table); |
266 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ? |
267 | phys >= (1ULL << cfg->oas) : phys != (arm_v7s_iopte)phys) { |
268 | /* Doesn't fit in PTE */ |
269 | dev_err(dev, "Page table does not fit in PTE: %pa" , &phys); |
270 | goto out_free; |
271 | } |
272 | if (!cfg->coherent_walk) { |
273 | dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); |
274 | if (dma_mapping_error(dev, dma_addr: dma)) |
275 | goto out_free; |
276 | /* |
277 | * We depend on the IOMMU being able to work with any physical |
278 | * address directly, so if the DMA layer suggests otherwise by |
279 | * translating or truncating them, that bodes very badly... |
280 | */ |
281 | if (dma != phys) |
282 | goto out_unmap; |
283 | } |
284 | if (lvl == 2) |
285 | kmemleak_ignore(ptr: table); |
286 | return table; |
287 | |
288 | out_unmap: |
289 | dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n" ); |
290 | dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); |
291 | out_free: |
292 | if (lvl == 1) |
293 | free_pages(addr: (unsigned long)table, order: get_order(size)); |
294 | else |
295 | kmem_cache_free(s: data->l2_tables, objp: table); |
296 | return NULL; |
297 | } |
298 | |
299 | static void __arm_v7s_free_table(void *table, int lvl, |
300 | struct arm_v7s_io_pgtable *data) |
301 | { |
302 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
303 | struct device *dev = cfg->iommu_dev; |
304 | size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg); |
305 | |
306 | if (!cfg->coherent_walk) |
307 | dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, |
308 | DMA_TO_DEVICE); |
309 | if (lvl == 1) |
310 | free_pages(addr: (unsigned long)table, order: get_order(size)); |
311 | else |
312 | kmem_cache_free(s: data->l2_tables, objp: table); |
313 | } |
314 | |
315 | static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, |
316 | struct io_pgtable_cfg *cfg) |
317 | { |
318 | if (cfg->coherent_walk) |
319 | return; |
320 | |
321 | dma_sync_single_for_device(dev: cfg->iommu_dev, addr: __arm_v7s_dma_addr(pages: ptep), |
322 | size: num_entries * sizeof(*ptep), dir: DMA_TO_DEVICE); |
323 | } |
324 | static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte, |
325 | int num_entries, struct io_pgtable_cfg *cfg) |
326 | { |
327 | int i; |
328 | |
329 | for (i = 0; i < num_entries; i++) |
330 | ptep[i] = pte; |
331 | |
332 | __arm_v7s_pte_sync(ptep, num_entries, cfg); |
333 | } |
334 | |
335 | static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, |
336 | struct io_pgtable_cfg *cfg) |
337 | { |
338 | bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS); |
339 | arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S; |
340 | |
341 | if (!(prot & IOMMU_MMIO)) |
342 | pte |= ARM_V7S_ATTR_TEX(1); |
343 | if (ap) { |
344 | pte |= ARM_V7S_PTE_AF; |
345 | if (!(prot & IOMMU_PRIV)) |
346 | pte |= ARM_V7S_PTE_AP_UNPRIV; |
347 | if (!(prot & IOMMU_WRITE)) |
348 | pte |= ARM_V7S_PTE_AP_RDONLY; |
349 | } |
350 | pte <<= ARM_V7S_ATTR_SHIFT(lvl); |
351 | |
352 | if ((prot & IOMMU_NOEXEC) && ap) |
353 | pte |= ARM_V7S_ATTR_XN(lvl); |
354 | if (prot & IOMMU_MMIO) |
355 | pte |= ARM_V7S_ATTR_B; |
356 | else if (prot & IOMMU_CACHE) |
357 | pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C; |
358 | |
359 | pte |= ARM_V7S_PTE_TYPE_PAGE; |
360 | if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) |
361 | pte |= ARM_V7S_ATTR_NS_SECTION; |
362 | |
363 | return pte; |
364 | } |
365 | |
366 | static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) |
367 | { |
368 | int prot = IOMMU_READ; |
369 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); |
370 | |
371 | if (!(attr & ARM_V7S_PTE_AP_RDONLY)) |
372 | prot |= IOMMU_WRITE; |
373 | if (!(attr & ARM_V7S_PTE_AP_UNPRIV)) |
374 | prot |= IOMMU_PRIV; |
375 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) |
376 | prot |= IOMMU_MMIO; |
377 | else if (pte & ARM_V7S_ATTR_C) |
378 | prot |= IOMMU_CACHE; |
379 | if (pte & ARM_V7S_ATTR_XN(lvl)) |
380 | prot |= IOMMU_NOEXEC; |
381 | |
382 | return prot; |
383 | } |
384 | |
385 | static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl) |
386 | { |
387 | if (lvl == 1) { |
388 | pte |= ARM_V7S_CONT_SECTION; |
389 | } else if (lvl == 2) { |
390 | arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl); |
391 | arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK; |
392 | |
393 | pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE; |
394 | pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) | |
395 | (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) | |
396 | ARM_V7S_PTE_TYPE_CONT_PAGE; |
397 | } |
398 | return pte; |
399 | } |
400 | |
401 | static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl) |
402 | { |
403 | if (lvl == 1) { |
404 | pte &= ~ARM_V7S_CONT_SECTION; |
405 | } else if (lvl == 2) { |
406 | arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT); |
407 | arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK << |
408 | ARM_V7S_CONT_PAGE_TEX_SHIFT); |
409 | |
410 | pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE; |
411 | pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) | |
412 | (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) | |
413 | ARM_V7S_PTE_TYPE_PAGE; |
414 | } |
415 | return pte; |
416 | } |
417 | |
418 | static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl) |
419 | { |
420 | if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl)) |
421 | return pte & ARM_V7S_CONT_SECTION; |
422 | else if (lvl == 2) |
423 | return !(pte & ARM_V7S_PTE_TYPE_PAGE); |
424 | return false; |
425 | } |
426 | |
427 | static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, |
428 | struct iommu_iotlb_gather *, unsigned long, |
429 | size_t, int, arm_v7s_iopte *); |
430 | |
431 | static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, |
432 | unsigned long iova, phys_addr_t paddr, int prot, |
433 | int lvl, int num_entries, arm_v7s_iopte *ptep) |
434 | { |
435 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
436 | arm_v7s_iopte pte; |
437 | int i; |
438 | |
439 | for (i = 0; i < num_entries; i++) |
440 | if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) { |
441 | /* |
442 | * We need to unmap and free the old table before |
443 | * overwriting it with a block entry. |
444 | */ |
445 | arm_v7s_iopte *tblp; |
446 | size_t sz = ARM_V7S_BLOCK_SIZE(lvl); |
447 | |
448 | tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg); |
449 | if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz, |
450 | sz, lvl, tblp) != sz)) |
451 | return -EINVAL; |
452 | } else if (ptep[i]) { |
453 | /* We require an unmap first */ |
454 | WARN_ON(!selftest_running); |
455 | return -EEXIST; |
456 | } |
457 | |
458 | pte = arm_v7s_prot_to_pte(prot, lvl, cfg); |
459 | if (num_entries > 1) |
460 | pte = arm_v7s_pte_to_cont(pte, lvl); |
461 | |
462 | pte |= paddr_to_iopte(paddr, lvl, cfg); |
463 | |
464 | __arm_v7s_set_pte(ptep, pte, num_entries, cfg); |
465 | return 0; |
466 | } |
467 | |
468 | static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, |
469 | arm_v7s_iopte *ptep, |
470 | arm_v7s_iopte curr, |
471 | struct io_pgtable_cfg *cfg) |
472 | { |
473 | phys_addr_t phys = virt_to_phys(address: table); |
474 | arm_v7s_iopte old, new; |
475 | |
476 | new = phys | ARM_V7S_PTE_TYPE_TABLE; |
477 | |
478 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) |
479 | new = to_mtk_iopte(paddr: phys, pte: new); |
480 | |
481 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) |
482 | new |= ARM_V7S_ATTR_NS_TABLE; |
483 | |
484 | /* |
485 | * Ensure the table itself is visible before its PTE can be. |
486 | * Whilst we could get away with cmpxchg64_release below, this |
487 | * doesn't have any ordering semantics when !CONFIG_SMP. |
488 | */ |
489 | dma_wmb(); |
490 | |
491 | old = cmpxchg_relaxed(ptep, curr, new); |
492 | __arm_v7s_pte_sync(ptep, num_entries: 1, cfg); |
493 | |
494 | return old; |
495 | } |
496 | |
497 | static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, |
498 | phys_addr_t paddr, size_t size, int prot, |
499 | int lvl, arm_v7s_iopte *ptep, gfp_t gfp) |
500 | { |
501 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
502 | arm_v7s_iopte pte, *cptep; |
503 | int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); |
504 | |
505 | /* Find our entry at the current level */ |
506 | ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg); |
507 | |
508 | /* If we can install a leaf entry at this level, then do so */ |
509 | if (num_entries) |
510 | return arm_v7s_init_pte(data, iova, paddr, prot, |
511 | lvl, num_entries, ptep); |
512 | |
513 | /* We can't allocate tables at the final level */ |
514 | if (WARN_ON(lvl == 2)) |
515 | return -EINVAL; |
516 | |
517 | /* Grab a pointer to the next level */ |
518 | pte = READ_ONCE(*ptep); |
519 | if (!pte) { |
520 | cptep = __arm_v7s_alloc_table(lvl: lvl + 1, gfp, data); |
521 | if (!cptep) |
522 | return -ENOMEM; |
523 | |
524 | pte = arm_v7s_install_table(table: cptep, ptep, curr: 0, cfg); |
525 | if (pte) |
526 | __arm_v7s_free_table(table: cptep, lvl: lvl + 1, data); |
527 | } else { |
528 | /* We've no easy way of knowing if it's synced yet, so... */ |
529 | __arm_v7s_pte_sync(ptep, num_entries: 1, cfg); |
530 | } |
531 | |
532 | if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { |
533 | cptep = iopte_deref(pte, lvl, data); |
534 | } else if (pte) { |
535 | /* We require an unmap first */ |
536 | WARN_ON(!selftest_running); |
537 | return -EEXIST; |
538 | } |
539 | |
540 | /* Rinse, repeat */ |
541 | return __arm_v7s_map(data, iova, paddr, size, prot, lvl: lvl + 1, ptep: cptep, gfp); |
542 | } |
543 | |
544 | static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, |
545 | phys_addr_t paddr, size_t pgsize, size_t pgcount, |
546 | int prot, gfp_t gfp, size_t *mapped) |
547 | { |
548 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
549 | int ret = -EINVAL; |
550 | |
551 | if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || |
552 | paddr >= (1ULL << data->iop.cfg.oas))) |
553 | return -ERANGE; |
554 | |
555 | /* If no access, then nothing to do */ |
556 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) |
557 | return 0; |
558 | |
559 | while (pgcount--) { |
560 | ret = __arm_v7s_map(data, iova, paddr, size: pgsize, prot, lvl: 1, ptep: data->pgd, |
561 | gfp); |
562 | if (ret) |
563 | break; |
564 | |
565 | iova += pgsize; |
566 | paddr += pgsize; |
567 | *mapped += pgsize; |
568 | } |
569 | /* |
570 | * Synchronise all PTE updates for the new mapping before there's |
571 | * a chance for anything to kick off a table walk for the new iova. |
572 | */ |
573 | wmb(); |
574 | |
575 | return ret; |
576 | } |
577 | |
578 | static void arm_v7s_free_pgtable(struct io_pgtable *iop) |
579 | { |
580 | struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop); |
581 | int i; |
582 | |
583 | for (i = 0; i < ARM_V7S_PTES_PER_LVL(1, &data->iop.cfg); i++) { |
584 | arm_v7s_iopte pte = data->pgd[i]; |
585 | |
586 | if (ARM_V7S_PTE_IS_TABLE(pte, 1)) |
587 | __arm_v7s_free_table(table: iopte_deref(pte, lvl: 1, data), |
588 | lvl: 2, data); |
589 | } |
590 | __arm_v7s_free_table(table: data->pgd, lvl: 1, data); |
591 | kmem_cache_destroy(s: data->l2_tables); |
592 | kfree(objp: data); |
593 | } |
594 | |
595 | static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, |
596 | unsigned long iova, int idx, int lvl, |
597 | arm_v7s_iopte *ptep) |
598 | { |
599 | struct io_pgtable *iop = &data->iop; |
600 | arm_v7s_iopte pte; |
601 | size_t size = ARM_V7S_BLOCK_SIZE(lvl); |
602 | int i; |
603 | |
604 | /* Check that we didn't lose a race to get the lock */ |
605 | pte = *ptep; |
606 | if (!arm_v7s_pte_is_cont(pte, lvl)) |
607 | return pte; |
608 | |
609 | ptep -= idx & (ARM_V7S_CONT_PAGES - 1); |
610 | pte = arm_v7s_cont_to_pte(pte, lvl); |
611 | for (i = 0; i < ARM_V7S_CONT_PAGES; i++) |
612 | ptep[i] = pte + i * size; |
613 | |
614 | __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, cfg: &iop->cfg); |
615 | |
616 | size *= ARM_V7S_CONT_PAGES; |
617 | io_pgtable_tlb_flush_walk(iop, iova, size, granule: size); |
618 | return pte; |
619 | } |
620 | |
621 | static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, |
622 | struct iommu_iotlb_gather *gather, |
623 | unsigned long iova, size_t size, |
624 | arm_v7s_iopte blk_pte, |
625 | arm_v7s_iopte *ptep) |
626 | { |
627 | struct io_pgtable_cfg *cfg = &data->iop.cfg; |
628 | arm_v7s_iopte pte, *tablep; |
629 | int i, unmap_idx, num_entries, num_ptes; |
630 | |
631 | tablep = __arm_v7s_alloc_table(lvl: 2, GFP_ATOMIC, data); |
632 | if (!tablep) |
633 | return 0; /* Bytes unmapped */ |
634 | |
635 | num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg); |
636 | num_entries = size >> ARM_V7S_LVL_SHIFT(2); |
637 | unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg); |
638 | |
639 | pte = arm_v7s_prot_to_pte(prot: arm_v7s_pte_to_prot(pte: blk_pte, lvl: 1), lvl: 2, cfg); |
640 | if (num_entries > 1) |
641 | pte = arm_v7s_pte_to_cont(pte, lvl: 2); |
642 | |
643 | for (i = 0; i < num_ptes; i += num_entries, pte += size) { |
644 | /* Unmap! */ |
645 | if (i == unmap_idx) |
646 | continue; |
647 | |
648 | __arm_v7s_set_pte(ptep: &tablep[i], pte, num_entries, cfg); |
649 | } |
650 | |
651 | pte = arm_v7s_install_table(table: tablep, ptep, curr: blk_pte, cfg); |
652 | if (pte != blk_pte) { |
653 | __arm_v7s_free_table(table: tablep, lvl: 2, data); |
654 | |
655 | if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) |
656 | return 0; |
657 | |
658 | tablep = iopte_deref(pte, lvl: 1, data); |
659 | return __arm_v7s_unmap(data, gather, iova, size, 2, tablep); |
660 | } |
661 | |
662 | io_pgtable_tlb_add_page(iop: &data->iop, gather, iova, granule: size); |
663 | return size; |
664 | } |
665 | |
666 | static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, |
667 | struct iommu_iotlb_gather *gather, |
668 | unsigned long iova, size_t size, int lvl, |
669 | arm_v7s_iopte *ptep) |
670 | { |
671 | arm_v7s_iopte pte[ARM_V7S_CONT_PAGES]; |
672 | struct io_pgtable *iop = &data->iop; |
673 | int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); |
674 | |
675 | /* Something went horribly wrong and we ran out of page table */ |
676 | if (WARN_ON(lvl > 2)) |
677 | return 0; |
678 | |
679 | idx = ARM_V7S_LVL_IDX(iova, lvl, &iop->cfg); |
680 | ptep += idx; |
681 | do { |
682 | pte[i] = READ_ONCE(ptep[i]); |
683 | if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i]))) |
684 | return 0; |
685 | } while (++i < num_entries); |
686 | |
687 | /* |
688 | * If we've hit a contiguous 'large page' entry at this level, it |
689 | * needs splitting first, unless we're unmapping the whole lot. |
690 | * |
691 | * For splitting, we can't rewrite 16 PTEs atomically, and since we |
692 | * can't necessarily assume TEX remap we don't have a software bit to |
693 | * mark live entries being split. In practice (i.e. DMA API code), we |
694 | * will never be splitting large pages anyway, so just wrap this edge |
695 | * case in a lock for the sake of correctness and be done with it. |
696 | */ |
697 | if (num_entries <= 1 && arm_v7s_pte_is_cont(pte: pte[0], lvl)) { |
698 | unsigned long flags; |
699 | |
700 | spin_lock_irqsave(&data->split_lock, flags); |
701 | pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep); |
702 | spin_unlock_irqrestore(lock: &data->split_lock, flags); |
703 | } |
704 | |
705 | /* If the size matches this level, we're in the right place */ |
706 | if (num_entries) { |
707 | size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl); |
708 | |
709 | __arm_v7s_set_pte(ptep, pte: 0, num_entries, cfg: &iop->cfg); |
710 | |
711 | for (i = 0; i < num_entries; i++) { |
712 | if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) { |
713 | /* Also flush any partial walks */ |
714 | io_pgtable_tlb_flush_walk(iop, iova, size: blk_size, |
715 | ARM_V7S_BLOCK_SIZE(lvl + 1)); |
716 | ptep = iopte_deref(pte: pte[i], lvl, data); |
717 | __arm_v7s_free_table(table: ptep, lvl: lvl + 1, data); |
718 | } else if (!iommu_iotlb_gather_queued(gather)) { |
719 | io_pgtable_tlb_add_page(iop, gather, iova, granule: blk_size); |
720 | } |
721 | iova += blk_size; |
722 | } |
723 | return size; |
724 | } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) { |
725 | /* |
726 | * Insert a table at the next level to map the old region, |
727 | * minus the part we want to unmap |
728 | */ |
729 | return arm_v7s_split_blk_unmap(data, gather, iova, size, blk_pte: pte[0], |
730 | ptep); |
731 | } |
732 | |
733 | /* Keep on walkin' */ |
734 | ptep = iopte_deref(pte: pte[0], lvl, data); |
735 | return __arm_v7s_unmap(data, gather, iova, size, lvl: lvl + 1, ptep); |
736 | } |
737 | |
738 | static size_t arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, |
739 | size_t pgsize, size_t pgcount, |
740 | struct iommu_iotlb_gather *gather) |
741 | { |
742 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
743 | size_t unmapped = 0, ret; |
744 | |
745 | if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) |
746 | return 0; |
747 | |
748 | while (pgcount--) { |
749 | ret = __arm_v7s_unmap(data, gather, iova, size: pgsize, lvl: 1, ptep: data->pgd); |
750 | if (!ret) |
751 | break; |
752 | |
753 | unmapped += pgsize; |
754 | iova += pgsize; |
755 | } |
756 | |
757 | return unmapped; |
758 | } |
759 | |
760 | static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, |
761 | unsigned long iova) |
762 | { |
763 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
764 | arm_v7s_iopte *ptep = data->pgd, pte; |
765 | int lvl = 0; |
766 | u32 mask; |
767 | |
768 | do { |
769 | ptep += ARM_V7S_LVL_IDX(iova, ++lvl, &data->iop.cfg); |
770 | pte = READ_ONCE(*ptep); |
771 | ptep = iopte_deref(pte, lvl, data); |
772 | } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); |
773 | |
774 | if (!ARM_V7S_PTE_IS_VALID(pte)) |
775 | return 0; |
776 | |
777 | mask = ARM_V7S_LVL_MASK(lvl); |
778 | if (arm_v7s_pte_is_cont(pte, lvl)) |
779 | mask *= ARM_V7S_CONT_PAGES; |
780 | return iopte_to_paddr(pte, lvl, cfg: &data->iop.cfg) | (iova & ~mask); |
781 | } |
782 | |
783 | static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, |
784 | void *cookie) |
785 | { |
786 | struct arm_v7s_io_pgtable *data; |
787 | slab_flags_t slab_flag; |
788 | phys_addr_t paddr; |
789 | |
790 | if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS)) |
791 | return NULL; |
792 | |
793 | if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 35 : ARM_V7S_ADDR_BITS)) |
794 | return NULL; |
795 | |
796 | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | |
797 | IO_PGTABLE_QUIRK_NO_PERMS | |
798 | IO_PGTABLE_QUIRK_ARM_MTK_EXT | |
799 | IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT)) |
800 | return NULL; |
801 | |
802 | /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ |
803 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT && |
804 | !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS)) |
805 | return NULL; |
806 | |
807 | if ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) && |
808 | !arm_v7s_is_mtk_enabled(cfg)) |
809 | return NULL; |
810 | |
811 | data = kmalloc(size: sizeof(*data), GFP_KERNEL); |
812 | if (!data) |
813 | return NULL; |
814 | |
815 | spin_lock_init(&data->split_lock); |
816 | |
817 | /* |
818 | * ARM_MTK_TTBR_EXT extend the translation table base support larger |
819 | * memory address. |
820 | */ |
821 | slab_flag = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ? |
822 | 0 : ARM_V7S_TABLE_SLAB_FLAGS; |
823 | |
824 | data->l2_tables = kmem_cache_create(name: "io-pgtable_armv7s_l2" , |
825 | ARM_V7S_TABLE_SIZE(2, cfg), |
826 | ARM_V7S_TABLE_SIZE(2, cfg), |
827 | flags: slab_flag, NULL); |
828 | if (!data->l2_tables) |
829 | goto out_free_data; |
830 | |
831 | data->iop.ops = (struct io_pgtable_ops) { |
832 | .map_pages = arm_v7s_map_pages, |
833 | .unmap_pages = arm_v7s_unmap_pages, |
834 | .iova_to_phys = arm_v7s_iova_to_phys, |
835 | }; |
836 | |
837 | /* We have to do this early for __arm_v7s_alloc_table to work... */ |
838 | data->iop.cfg = *cfg; |
839 | |
840 | /* |
841 | * Unless the IOMMU driver indicates supersection support by |
842 | * having SZ_16M set in the initial bitmap, they won't be used. |
843 | */ |
844 | cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M; |
845 | |
846 | /* TCR: T0SZ=0, EAE=0 (if applicable) */ |
847 | cfg->arm_v7s_cfg.tcr = 0; |
848 | |
849 | /* |
850 | * TEX remap: the indices used map to the closest equivalent types |
851 | * under the non-TEX-remap interpretation of those attribute bits, |
852 | * excepting various implementation-defined aspects of shareability. |
853 | */ |
854 | cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) | |
855 | ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) | |
856 | ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) | |
857 | ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 | |
858 | ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7); |
859 | cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) | |
860 | ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA); |
861 | |
862 | /* Looking good; allocate a pgd */ |
863 | data->pgd = __arm_v7s_alloc_table(lvl: 1, GFP_KERNEL, data); |
864 | if (!data->pgd) |
865 | goto out_free_data; |
866 | |
867 | /* Ensure the empty pgd is visible before any actual TTBR write */ |
868 | wmb(); |
869 | |
870 | /* TTBR */ |
871 | paddr = virt_to_phys(address: data->pgd); |
872 | if (arm_v7s_is_mtk_enabled(cfg)) |
873 | cfg->arm_v7s_cfg.ttbr = paddr | upper_32_bits(paddr); |
874 | else |
875 | cfg->arm_v7s_cfg.ttbr = paddr | ARM_V7S_TTBR_S | |
876 | (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS | |
877 | ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | |
878 | ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) : |
879 | (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) | |
880 | ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC))); |
881 | return &data->iop; |
882 | |
883 | out_free_data: |
884 | kmem_cache_destroy(s: data->l2_tables); |
885 | kfree(objp: data); |
886 | return NULL; |
887 | } |
888 | |
889 | struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = { |
890 | .alloc = arm_v7s_alloc_pgtable, |
891 | .free = arm_v7s_free_pgtable, |
892 | }; |
893 | |
894 | #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST |
895 | |
896 | static struct io_pgtable_cfg *cfg_cookie __initdata; |
897 | |
898 | static void __init dummy_tlb_flush_all(void *cookie) |
899 | { |
900 | WARN_ON(cookie != cfg_cookie); |
901 | } |
902 | |
903 | static void __init dummy_tlb_flush(unsigned long iova, size_t size, |
904 | size_t granule, void *cookie) |
905 | { |
906 | WARN_ON(cookie != cfg_cookie); |
907 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); |
908 | } |
909 | |
910 | static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, |
911 | unsigned long iova, size_t granule, |
912 | void *cookie) |
913 | { |
914 | dummy_tlb_flush(iova, size: granule, granule, cookie); |
915 | } |
916 | |
917 | static const struct iommu_flush_ops dummy_tlb_ops __initconst = { |
918 | .tlb_flush_all = dummy_tlb_flush_all, |
919 | .tlb_flush_walk = dummy_tlb_flush, |
920 | .tlb_add_page = dummy_tlb_add_page, |
921 | }; |
922 | |
923 | #define __FAIL(ops) ({ \ |
924 | WARN(1, "selftest: test failed\n"); \ |
925 | selftest_running = false; \ |
926 | -EFAULT; \ |
927 | }) |
928 | |
929 | static int __init arm_v7s_do_selftests(void) |
930 | { |
931 | struct io_pgtable_ops *ops; |
932 | struct io_pgtable_cfg cfg = { |
933 | .tlb = &dummy_tlb_ops, |
934 | .oas = 32, |
935 | .ias = 32, |
936 | .coherent_walk = true, |
937 | .quirks = IO_PGTABLE_QUIRK_ARM_NS, |
938 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, |
939 | }; |
940 | unsigned int iova, size, iova_start; |
941 | unsigned int i, loopnr = 0; |
942 | size_t mapped; |
943 | |
944 | selftest_running = true; |
945 | |
946 | cfg_cookie = &cfg; |
947 | |
948 | ops = alloc_io_pgtable_ops(fmt: ARM_V7S, cfg: &cfg, cookie: &cfg); |
949 | if (!ops) { |
950 | pr_err("selftest: failed to allocate io pgtable ops\n" ); |
951 | return -EINVAL; |
952 | } |
953 | |
954 | /* |
955 | * Initial sanity checks. |
956 | * Empty page tables shouldn't provide any translations. |
957 | */ |
958 | if (ops->iova_to_phys(ops, 42)) |
959 | return __FAIL(ops); |
960 | |
961 | if (ops->iova_to_phys(ops, SZ_1G + 42)) |
962 | return __FAIL(ops); |
963 | |
964 | if (ops->iova_to_phys(ops, SZ_2G + 42)) |
965 | return __FAIL(ops); |
966 | |
967 | /* |
968 | * Distinct mappings of different granule sizes. |
969 | */ |
970 | iova = 0; |
971 | for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { |
972 | size = 1UL << i; |
973 | if (ops->map_pages(ops, iova, iova, size, 1, |
974 | IOMMU_READ | IOMMU_WRITE | |
975 | IOMMU_NOEXEC | IOMMU_CACHE, |
976 | GFP_KERNEL, &mapped)) |
977 | return __FAIL(ops); |
978 | |
979 | /* Overlapping mappings */ |
980 | if (!ops->map_pages(ops, iova, iova + size, size, 1, |
981 | IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL, |
982 | &mapped)) |
983 | return __FAIL(ops); |
984 | |
985 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) |
986 | return __FAIL(ops); |
987 | |
988 | iova += SZ_16M; |
989 | loopnr++; |
990 | } |
991 | |
992 | /* Partial unmap */ |
993 | i = 1; |
994 | size = 1UL << __ffs(cfg.pgsize_bitmap); |
995 | while (i < loopnr) { |
996 | iova_start = i * SZ_16M; |
997 | if (ops->unmap_pages(ops, iova_start + size, size, 1, NULL) != size) |
998 | return __FAIL(ops); |
999 | |
1000 | /* Remap of partial unmap */ |
1001 | if (ops->map_pages(ops, iova_start + size, size, size, 1, |
1002 | IOMMU_READ, GFP_KERNEL, &mapped)) |
1003 | return __FAIL(ops); |
1004 | |
1005 | if (ops->iova_to_phys(ops, iova_start + size + 42) |
1006 | != (size + 42)) |
1007 | return __FAIL(ops); |
1008 | i++; |
1009 | } |
1010 | |
1011 | /* Full unmap */ |
1012 | iova = 0; |
1013 | for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { |
1014 | size = 1UL << i; |
1015 | |
1016 | if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) |
1017 | return __FAIL(ops); |
1018 | |
1019 | if (ops->iova_to_phys(ops, iova + 42)) |
1020 | return __FAIL(ops); |
1021 | |
1022 | /* Remap full block */ |
1023 | if (ops->map_pages(ops, iova, iova, size, 1, IOMMU_WRITE, |
1024 | GFP_KERNEL, &mapped)) |
1025 | return __FAIL(ops); |
1026 | |
1027 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) |
1028 | return __FAIL(ops); |
1029 | |
1030 | iova += SZ_16M; |
1031 | } |
1032 | |
1033 | free_io_pgtable_ops(ops); |
1034 | |
1035 | selftest_running = false; |
1036 | |
1037 | pr_info("self test ok\n" ); |
1038 | return 0; |
1039 | } |
1040 | subsys_initcall(arm_v7s_do_selftests); |
1041 | #endif |
1042 | |