1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com |
5 | */ |
6 | |
7 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG |
8 | #define DEBUG |
9 | #endif |
10 | |
11 | #include <linux/clk.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/err.h> |
14 | #include <linux/io.h> |
15 | #include <linux/iommu.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/kmemleak.h> |
18 | #include <linux/list.h> |
19 | #include <linux/of.h> |
20 | #include <linux/of_platform.h> |
21 | #include <linux/platform_device.h> |
22 | #include <linux/pm_runtime.h> |
23 | #include <linux/slab.h> |
24 | |
25 | typedef u32 sysmmu_iova_t; |
26 | typedef u32 sysmmu_pte_t; |
27 | static struct iommu_domain exynos_identity_domain; |
28 | |
29 | /* We do not consider super section mapping (16MB) */ |
30 | #define SECT_ORDER 20 |
31 | #define LPAGE_ORDER 16 |
32 | #define SPAGE_ORDER 12 |
33 | |
34 | #define SECT_SIZE (1 << SECT_ORDER) |
35 | #define LPAGE_SIZE (1 << LPAGE_ORDER) |
36 | #define SPAGE_SIZE (1 << SPAGE_ORDER) |
37 | |
38 | #define SECT_MASK (~(SECT_SIZE - 1)) |
39 | #define LPAGE_MASK (~(LPAGE_SIZE - 1)) |
40 | #define SPAGE_MASK (~(SPAGE_SIZE - 1)) |
41 | |
42 | #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ |
43 | ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) |
44 | #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) |
45 | #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) |
46 | #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ |
47 | ((*(sent) & 3) == 1)) |
48 | #define lv1ent_section(sent) ((*(sent) & 3) == 2) |
49 | |
50 | #define lv2ent_fault(pent) ((*(pent) & 3) == 0) |
51 | #define lv2ent_small(pent) ((*(pent) & 2) == 2) |
52 | #define lv2ent_large(pent) ((*(pent) & 3) == 1) |
53 | |
54 | /* |
55 | * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces |
56 | * v5.0 introduced support for 36bit physical address space by shifting |
57 | * all page entry values by 4 bits. |
58 | * All SYSMMU controllers in the system support the address spaces of the same |
59 | * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper |
60 | * value (0 or 4). |
61 | */ |
62 | static short PG_ENT_SHIFT = -1; |
63 | #define SYSMMU_PG_ENT_SHIFT 0 |
64 | #define SYSMMU_V5_PG_ENT_SHIFT 4 |
65 | |
66 | static const sysmmu_pte_t *LV1_PROT; |
67 | static const sysmmu_pte_t SYSMMU_LV1_PROT[] = { |
68 | ((0 << 15) | (0 << 10)), /* no access */ |
69 | ((1 << 15) | (1 << 10)), /* IOMMU_READ only */ |
70 | ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */ |
71 | ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */ |
72 | }; |
73 | static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = { |
74 | (0 << 4), /* no access */ |
75 | (1 << 4), /* IOMMU_READ only */ |
76 | (2 << 4), /* IOMMU_WRITE only */ |
77 | (3 << 4), /* IOMMU_READ | IOMMU_WRITE */ |
78 | }; |
79 | |
80 | static const sysmmu_pte_t *LV2_PROT; |
81 | static const sysmmu_pte_t SYSMMU_LV2_PROT[] = { |
82 | ((0 << 9) | (0 << 4)), /* no access */ |
83 | ((1 << 9) | (1 << 4)), /* IOMMU_READ only */ |
84 | ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */ |
85 | ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */ |
86 | }; |
87 | static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = { |
88 | (0 << 2), /* no access */ |
89 | (1 << 2), /* IOMMU_READ only */ |
90 | (2 << 2), /* IOMMU_WRITE only */ |
91 | (3 << 2), /* IOMMU_READ | IOMMU_WRITE */ |
92 | }; |
93 | |
94 | #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE) |
95 | |
96 | #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) |
97 | #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) |
98 | #define section_offs(iova) (iova & (SECT_SIZE - 1)) |
99 | #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) |
100 | #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) |
101 | #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) |
102 | #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) |
103 | |
104 | #define NUM_LV1ENTRIES 4096 |
105 | #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) |
106 | |
107 | static u32 lv1ent_offset(sysmmu_iova_t iova) |
108 | { |
109 | return iova >> SECT_ORDER; |
110 | } |
111 | |
112 | static u32 lv2ent_offset(sysmmu_iova_t iova) |
113 | { |
114 | return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); |
115 | } |
116 | |
117 | #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) |
118 | #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) |
119 | |
120 | #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) |
121 | #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) |
122 | |
123 | #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2) |
124 | #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) |
125 | #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1) |
126 | #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2) |
127 | |
128 | #define CTRL_ENABLE 0x5 |
129 | #define CTRL_BLOCK 0x7 |
130 | #define CTRL_DISABLE 0x0 |
131 | |
132 | #define CFG_LRU 0x1 |
133 | #define CFG_EAP (1 << 2) |
134 | #define CFG_QOS(n) ((n & 0xF) << 7) |
135 | #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ |
136 | #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ |
137 | #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ |
138 | |
139 | #define CTRL_VM_ENABLE BIT(0) |
140 | #define CTRL_VM_FAULT_MODE_STALL BIT(3) |
141 | #define CAPA0_CAPA1_EXIST BIT(11) |
142 | #define CAPA1_VCR_ENABLED BIT(14) |
143 | |
144 | /* common registers */ |
145 | #define REG_MMU_CTRL 0x000 |
146 | #define REG_MMU_CFG 0x004 |
147 | #define REG_MMU_STATUS 0x008 |
148 | #define REG_MMU_VERSION 0x034 |
149 | |
150 | #define MMU_MAJ_VER(val) ((val) >> 7) |
151 | #define MMU_MIN_VER(val) ((val) & 0x7F) |
152 | #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ |
153 | |
154 | #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) |
155 | |
156 | /* v1.x - v3.x registers */ |
157 | #define REG_PAGE_FAULT_ADDR 0x024 |
158 | #define REG_AW_FAULT_ADDR 0x028 |
159 | #define REG_AR_FAULT_ADDR 0x02C |
160 | #define REG_DEFAULT_SLAVE_ADDR 0x030 |
161 | |
162 | /* v5.x registers */ |
163 | #define REG_V5_FAULT_AR_VA 0x070 |
164 | #define REG_V5_FAULT_AW_VA 0x080 |
165 | |
166 | /* v7.x registers */ |
167 | #define REG_V7_CAPA0 0x870 |
168 | #define REG_V7_CAPA1 0x874 |
169 | #define REG_V7_CTRL_VM 0x8000 |
170 | |
171 | #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL) |
172 | |
173 | static struct device *dma_dev; |
174 | static struct kmem_cache *lv2table_kmem_cache; |
175 | static sysmmu_pte_t *zero_lv2_table; |
176 | #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) |
177 | |
178 | static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) |
179 | { |
180 | return pgtable + lv1ent_offset(iova); |
181 | } |
182 | |
183 | static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) |
184 | { |
185 | return (sysmmu_pte_t *)phys_to_virt( |
186 | lv2table_base(sent)) + lv2ent_offset(iova); |
187 | } |
188 | |
189 | struct sysmmu_fault { |
190 | sysmmu_iova_t addr; /* IOVA address that caused fault */ |
191 | const char *name; /* human readable fault name */ |
192 | unsigned int type; /* fault type for report_iommu_fault() */ |
193 | }; |
194 | |
195 | struct sysmmu_v1_fault_info { |
196 | unsigned short addr_reg; /* register to read IOVA fault address */ |
197 | const char *name; /* human readable fault name */ |
198 | unsigned int type; /* fault type for report_iommu_fault */ |
199 | }; |
200 | |
201 | static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = { |
202 | { REG_PAGE_FAULT_ADDR, "PAGE" , IOMMU_FAULT_READ }, |
203 | { REG_AR_FAULT_ADDR, "MULTI-HIT" , IOMMU_FAULT_READ }, |
204 | { REG_AW_FAULT_ADDR, "MULTI-HIT" , IOMMU_FAULT_WRITE }, |
205 | { REG_DEFAULT_SLAVE_ADDR, "BUS ERROR" , IOMMU_FAULT_READ }, |
206 | { REG_AR_FAULT_ADDR, "SECURITY PROTECTION" , IOMMU_FAULT_READ }, |
207 | { REG_AR_FAULT_ADDR, "ACCESS PROTECTION" , IOMMU_FAULT_READ }, |
208 | { REG_AW_FAULT_ADDR, "SECURITY PROTECTION" , IOMMU_FAULT_WRITE }, |
209 | { REG_AW_FAULT_ADDR, "ACCESS PROTECTION" , IOMMU_FAULT_WRITE }, |
210 | }; |
211 | |
212 | /* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */ |
213 | static const char * const sysmmu_v5_fault_names[] = { |
214 | "PTW" , |
215 | "PAGE" , |
216 | "MULTI-HIT" , |
217 | "ACCESS PROTECTION" , |
218 | "SECURITY PROTECTION" |
219 | }; |
220 | |
221 | static const char * const sysmmu_v7_fault_names[] = { |
222 | "PTW" , |
223 | "PAGE" , |
224 | "ACCESS PROTECTION" , |
225 | "RESERVED" |
226 | }; |
227 | |
228 | /* |
229 | * This structure is attached to dev->iommu->priv of the master device |
230 | * on device add, contains a list of SYSMMU controllers defined by device tree, |
231 | * which are bound to given master device. It is usually referenced by 'owner' |
232 | * pointer. |
233 | */ |
234 | struct exynos_iommu_owner { |
235 | struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ |
236 | struct iommu_domain *domain; /* domain this device is attached */ |
237 | struct mutex rpm_lock; /* for runtime pm of all sysmmus */ |
238 | }; |
239 | |
240 | /* |
241 | * This structure exynos specific generalization of struct iommu_domain. |
242 | * It contains list of SYSMMU controllers from all master devices, which has |
243 | * been attached to this domain and page tables of IO address space defined by |
244 | * it. It is usually referenced by 'domain' pointer. |
245 | */ |
246 | struct exynos_iommu_domain { |
247 | struct list_head clients; /* list of sysmmu_drvdata.domain_node */ |
248 | sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ |
249 | short *lv2entcnt; /* free lv2 entry counter for each section */ |
250 | spinlock_t lock; /* lock for modyfying list of clients */ |
251 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ |
252 | struct iommu_domain domain; /* generic domain data structure */ |
253 | }; |
254 | |
255 | struct sysmmu_drvdata; |
256 | |
257 | /* |
258 | * SysMMU version specific data. Contains offsets for the registers which can |
259 | * be found in different SysMMU variants, but have different offset values. |
260 | * Also contains version specific callbacks to abstract the hardware. |
261 | */ |
262 | struct sysmmu_variant { |
263 | u32 pt_base; /* page table base address (physical) */ |
264 | u32 flush_all; /* invalidate all TLB entries */ |
265 | u32 flush_entry; /* invalidate specific TLB entry */ |
266 | u32 flush_range; /* invalidate TLB entries in specified range */ |
267 | u32 flush_start; /* start address of range invalidation */ |
268 | u32 flush_end; /* end address of range invalidation */ |
269 | u32 int_status; /* interrupt status information */ |
270 | u32 int_clear; /* clear the interrupt */ |
271 | u32 fault_va; /* IOVA address that caused fault */ |
272 | u32 fault_info; /* fault transaction info */ |
273 | |
274 | int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype, |
275 | struct sysmmu_fault *fault); |
276 | }; |
277 | |
278 | /* |
279 | * This structure hold all data of a single SYSMMU controller, this includes |
280 | * hw resources like registers and clocks, pointers and list nodes to connect |
281 | * it to all other structures, internal state and parameters read from device |
282 | * tree. It is usually referenced by 'data' pointer. |
283 | */ |
284 | struct sysmmu_drvdata { |
285 | struct device *sysmmu; /* SYSMMU controller device */ |
286 | struct device *master; /* master device (owner) */ |
287 | struct device_link *link; /* runtime PM link to master */ |
288 | void __iomem *sfrbase; /* our registers */ |
289 | struct clk *clk; /* SYSMMU's clock */ |
290 | struct clk *aclk; /* SYSMMU's aclk clock */ |
291 | struct clk *pclk; /* SYSMMU's pclk clock */ |
292 | struct clk *clk_master; /* master's device clock */ |
293 | spinlock_t lock; /* lock for modyfying state */ |
294 | bool active; /* current status */ |
295 | struct exynos_iommu_domain *domain; /* domain we belong to */ |
296 | struct list_head domain_node; /* node for domain clients list */ |
297 | struct list_head owner_node; /* node for owner controllers list */ |
298 | phys_addr_t pgtable; /* assigned page table structure */ |
299 | unsigned int version; /* our version */ |
300 | |
301 | struct iommu_device iommu; /* IOMMU core handle */ |
302 | const struct sysmmu_variant *variant; /* version specific data */ |
303 | |
304 | /* v7 fields */ |
305 | bool has_vcr; /* virtual machine control register */ |
306 | }; |
307 | |
308 | #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg) |
309 | |
310 | static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data, |
311 | unsigned int itype, |
312 | struct sysmmu_fault *fault) |
313 | { |
314 | const struct sysmmu_v1_fault_info *finfo; |
315 | |
316 | if (itype >= ARRAY_SIZE(sysmmu_v1_faults)) |
317 | return -ENXIO; |
318 | |
319 | finfo = &sysmmu_v1_faults[itype]; |
320 | fault->addr = readl(addr: data->sfrbase + finfo->addr_reg); |
321 | fault->name = finfo->name; |
322 | fault->type = finfo->type; |
323 | |
324 | return 0; |
325 | } |
326 | |
327 | static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data, |
328 | unsigned int itype, |
329 | struct sysmmu_fault *fault) |
330 | { |
331 | unsigned int addr_reg; |
332 | |
333 | if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) { |
334 | fault->type = IOMMU_FAULT_READ; |
335 | addr_reg = REG_V5_FAULT_AR_VA; |
336 | } else if (itype >= 16 && itype <= 20) { |
337 | fault->type = IOMMU_FAULT_WRITE; |
338 | addr_reg = REG_V5_FAULT_AW_VA; |
339 | itype -= 16; |
340 | } else { |
341 | return -ENXIO; |
342 | } |
343 | |
344 | fault->name = sysmmu_v5_fault_names[itype]; |
345 | fault->addr = readl(addr: data->sfrbase + addr_reg); |
346 | |
347 | return 0; |
348 | } |
349 | |
350 | static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data, |
351 | unsigned int itype, |
352 | struct sysmmu_fault *fault) |
353 | { |
354 | u32 info = readl(SYSMMU_REG(data, fault_info)); |
355 | |
356 | fault->addr = readl(SYSMMU_REG(data, fault_va)); |
357 | fault->name = sysmmu_v7_fault_names[itype % 4]; |
358 | fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | /* SysMMU v1..v3 */ |
364 | static const struct sysmmu_variant sysmmu_v1_variant = { |
365 | .flush_all = 0x0c, |
366 | .flush_entry = 0x10, |
367 | .pt_base = 0x14, |
368 | .int_status = 0x18, |
369 | .int_clear = 0x1c, |
370 | |
371 | .get_fault_info = exynos_sysmmu_v1_get_fault_info, |
372 | }; |
373 | |
374 | /* SysMMU v5 */ |
375 | static const struct sysmmu_variant sysmmu_v5_variant = { |
376 | .pt_base = 0x0c, |
377 | .flush_all = 0x10, |
378 | .flush_entry = 0x14, |
379 | .flush_range = 0x18, |
380 | .flush_start = 0x20, |
381 | .flush_end = 0x24, |
382 | .int_status = 0x60, |
383 | .int_clear = 0x64, |
384 | |
385 | .get_fault_info = exynos_sysmmu_v5_get_fault_info, |
386 | }; |
387 | |
388 | /* SysMMU v7: non-VM capable register layout */ |
389 | static const struct sysmmu_variant sysmmu_v7_variant = { |
390 | .pt_base = 0x0c, |
391 | .flush_all = 0x10, |
392 | .flush_entry = 0x14, |
393 | .flush_range = 0x18, |
394 | .flush_start = 0x20, |
395 | .flush_end = 0x24, |
396 | .int_status = 0x60, |
397 | .int_clear = 0x64, |
398 | .fault_va = 0x70, |
399 | .fault_info = 0x78, |
400 | |
401 | .get_fault_info = exynos_sysmmu_v7_get_fault_info, |
402 | }; |
403 | |
404 | /* SysMMU v7: VM capable register layout */ |
405 | static const struct sysmmu_variant sysmmu_v7_vm_variant = { |
406 | .pt_base = 0x800c, |
407 | .flush_all = 0x8010, |
408 | .flush_entry = 0x8014, |
409 | .flush_range = 0x8018, |
410 | .flush_start = 0x8020, |
411 | .flush_end = 0x8024, |
412 | .int_status = 0x60, |
413 | .int_clear = 0x64, |
414 | .fault_va = 0x1000, |
415 | .fault_info = 0x1004, |
416 | |
417 | .get_fault_info = exynos_sysmmu_v7_get_fault_info, |
418 | }; |
419 | |
420 | static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) |
421 | { |
422 | return container_of(dom, struct exynos_iommu_domain, domain); |
423 | } |
424 | |
425 | static void sysmmu_unblock(struct sysmmu_drvdata *data) |
426 | { |
427 | writel(CTRL_ENABLE, addr: data->sfrbase + REG_MMU_CTRL); |
428 | } |
429 | |
430 | static bool sysmmu_block(struct sysmmu_drvdata *data) |
431 | { |
432 | int i = 120; |
433 | |
434 | writel(CTRL_BLOCK, addr: data->sfrbase + REG_MMU_CTRL); |
435 | while ((i > 0) && !(readl(addr: data->sfrbase + REG_MMU_STATUS) & 1)) |
436 | --i; |
437 | |
438 | if (!(readl(addr: data->sfrbase + REG_MMU_STATUS) & 1)) { |
439 | sysmmu_unblock(data); |
440 | return false; |
441 | } |
442 | |
443 | return true; |
444 | } |
445 | |
446 | static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) |
447 | { |
448 | writel(val: 0x1, SYSMMU_REG(data, flush_all)); |
449 | } |
450 | |
451 | static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, |
452 | sysmmu_iova_t iova, unsigned int num_inv) |
453 | { |
454 | unsigned int i; |
455 | |
456 | if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { |
457 | for (i = 0; i < num_inv; i++) { |
458 | writel(val: (iova & SPAGE_MASK) | 1, |
459 | SYSMMU_REG(data, flush_entry)); |
460 | iova += SPAGE_SIZE; |
461 | } |
462 | } else { |
463 | writel(val: iova & SPAGE_MASK, SYSMMU_REG(data, flush_start)); |
464 | writel(val: (iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, |
465 | SYSMMU_REG(data, flush_end)); |
466 | writel(val: 0x1, SYSMMU_REG(data, flush_range)); |
467 | } |
468 | } |
469 | |
470 | static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) |
471 | { |
472 | u32 pt_base; |
473 | |
474 | if (MMU_MAJ_VER(data->version) < 5) |
475 | pt_base = pgd; |
476 | else |
477 | pt_base = pgd >> SPAGE_ORDER; |
478 | |
479 | writel(val: pt_base, SYSMMU_REG(data, pt_base)); |
480 | __sysmmu_tlb_invalidate(data); |
481 | } |
482 | |
483 | static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) |
484 | { |
485 | BUG_ON(clk_prepare_enable(data->clk_master)); |
486 | BUG_ON(clk_prepare_enable(data->clk)); |
487 | BUG_ON(clk_prepare_enable(data->pclk)); |
488 | BUG_ON(clk_prepare_enable(data->aclk)); |
489 | } |
490 | |
491 | static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) |
492 | { |
493 | clk_disable_unprepare(clk: data->aclk); |
494 | clk_disable_unprepare(clk: data->pclk); |
495 | clk_disable_unprepare(clk: data->clk); |
496 | clk_disable_unprepare(clk: data->clk_master); |
497 | } |
498 | |
499 | static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data) |
500 | { |
501 | u32 capa0 = readl(addr: data->sfrbase + REG_V7_CAPA0); |
502 | |
503 | return capa0 & CAPA0_CAPA1_EXIST; |
504 | } |
505 | |
506 | static void __sysmmu_get_vcr(struct sysmmu_drvdata *data) |
507 | { |
508 | u32 capa1 = readl(addr: data->sfrbase + REG_V7_CAPA1); |
509 | |
510 | data->has_vcr = capa1 & CAPA1_VCR_ENABLED; |
511 | } |
512 | |
513 | static void __sysmmu_get_version(struct sysmmu_drvdata *data) |
514 | { |
515 | u32 ver; |
516 | |
517 | __sysmmu_enable_clocks(data); |
518 | |
519 | ver = readl(addr: data->sfrbase + REG_MMU_VERSION); |
520 | |
521 | /* controllers on some SoCs don't report proper version */ |
522 | if (ver == 0x80000001u) |
523 | data->version = MAKE_MMU_VER(1, 0); |
524 | else |
525 | data->version = MMU_RAW_VER(ver); |
526 | |
527 | dev_dbg(data->sysmmu, "hardware version: %d.%d\n" , |
528 | MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); |
529 | |
530 | if (MMU_MAJ_VER(data->version) < 5) { |
531 | data->variant = &sysmmu_v1_variant; |
532 | } else if (MMU_MAJ_VER(data->version) < 7) { |
533 | data->variant = &sysmmu_v5_variant; |
534 | } else { |
535 | if (__sysmmu_has_capa1(data)) |
536 | __sysmmu_get_vcr(data); |
537 | if (data->has_vcr) |
538 | data->variant = &sysmmu_v7_vm_variant; |
539 | else |
540 | data->variant = &sysmmu_v7_variant; |
541 | } |
542 | |
543 | __sysmmu_disable_clocks(data); |
544 | } |
545 | |
546 | static void show_fault_information(struct sysmmu_drvdata *data, |
547 | const struct sysmmu_fault *fault) |
548 | { |
549 | sysmmu_pte_t *ent; |
550 | |
551 | dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n" , |
552 | dev_name(data->master), |
553 | fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE" , |
554 | fault->name, fault->addr); |
555 | dev_dbg(data->sysmmu, "Page table base: %pa\n" , &data->pgtable); |
556 | ent = section_entry(phys_to_virt(address: data->pgtable), iova: fault->addr); |
557 | dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n" , *ent); |
558 | if (lv1ent_page(ent)) { |
559 | ent = page_entry(sent: ent, iova: fault->addr); |
560 | dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n" , *ent); |
561 | } |
562 | } |
563 | |
564 | static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) |
565 | { |
566 | struct sysmmu_drvdata *data = dev_id; |
567 | unsigned int itype; |
568 | struct sysmmu_fault fault; |
569 | int ret = -ENOSYS; |
570 | |
571 | WARN_ON(!data->active); |
572 | |
573 | spin_lock(lock: &data->lock); |
574 | clk_enable(clk: data->clk_master); |
575 | |
576 | itype = __ffs(readl(SYSMMU_REG(data, int_status))); |
577 | ret = data->variant->get_fault_info(data, itype, &fault); |
578 | if (ret) { |
579 | dev_err(data->sysmmu, "Unhandled interrupt bit %u\n" , itype); |
580 | goto out; |
581 | } |
582 | show_fault_information(data, fault: &fault); |
583 | |
584 | if (data->domain) { |
585 | ret = report_iommu_fault(domain: &data->domain->domain, dev: data->master, |
586 | iova: fault.addr, flags: fault.type); |
587 | } |
588 | if (ret) |
589 | panic(fmt: "Unrecoverable System MMU Fault!" ); |
590 | |
591 | out: |
592 | writel(val: 1 << itype, SYSMMU_REG(data, int_clear)); |
593 | |
594 | /* SysMMU is in blocked state when interrupt occurred */ |
595 | sysmmu_unblock(data); |
596 | clk_disable(clk: data->clk_master); |
597 | spin_unlock(lock: &data->lock); |
598 | |
599 | return IRQ_HANDLED; |
600 | } |
601 | |
602 | static void __sysmmu_disable(struct sysmmu_drvdata *data) |
603 | { |
604 | unsigned long flags; |
605 | |
606 | clk_enable(clk: data->clk_master); |
607 | |
608 | spin_lock_irqsave(&data->lock, flags); |
609 | writel(CTRL_DISABLE, addr: data->sfrbase + REG_MMU_CTRL); |
610 | writel(val: 0, addr: data->sfrbase + REG_MMU_CFG); |
611 | data->active = false; |
612 | spin_unlock_irqrestore(lock: &data->lock, flags); |
613 | |
614 | __sysmmu_disable_clocks(data); |
615 | } |
616 | |
617 | static void __sysmmu_init_config(struct sysmmu_drvdata *data) |
618 | { |
619 | unsigned int cfg; |
620 | |
621 | if (data->version <= MAKE_MMU_VER(3, 1)) |
622 | cfg = CFG_LRU | CFG_QOS(15); |
623 | else if (data->version <= MAKE_MMU_VER(3, 2)) |
624 | cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; |
625 | else |
626 | cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; |
627 | |
628 | cfg |= CFG_EAP; /* enable access protection bits check */ |
629 | |
630 | writel(val: cfg, addr: data->sfrbase + REG_MMU_CFG); |
631 | } |
632 | |
633 | static void __sysmmu_enable_vid(struct sysmmu_drvdata *data) |
634 | { |
635 | u32 ctrl; |
636 | |
637 | if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) |
638 | return; |
639 | |
640 | ctrl = readl(addr: data->sfrbase + REG_V7_CTRL_VM); |
641 | ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL; |
642 | writel(val: ctrl, addr: data->sfrbase + REG_V7_CTRL_VM); |
643 | } |
644 | |
645 | static void __sysmmu_enable(struct sysmmu_drvdata *data) |
646 | { |
647 | unsigned long flags; |
648 | |
649 | __sysmmu_enable_clocks(data); |
650 | |
651 | spin_lock_irqsave(&data->lock, flags); |
652 | writel(CTRL_BLOCK, addr: data->sfrbase + REG_MMU_CTRL); |
653 | __sysmmu_init_config(data); |
654 | __sysmmu_set_ptbase(data, pgd: data->pgtable); |
655 | __sysmmu_enable_vid(data); |
656 | writel(CTRL_ENABLE, addr: data->sfrbase + REG_MMU_CTRL); |
657 | data->active = true; |
658 | spin_unlock_irqrestore(lock: &data->lock, flags); |
659 | |
660 | /* |
661 | * SYSMMU driver keeps master's clock enabled only for the short |
662 | * time, while accessing the registers. For performing address |
663 | * translation during DMA transaction it relies on the client |
664 | * driver to enable it. |
665 | */ |
666 | clk_disable(clk: data->clk_master); |
667 | } |
668 | |
669 | static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, |
670 | sysmmu_iova_t iova) |
671 | { |
672 | unsigned long flags; |
673 | |
674 | spin_lock_irqsave(&data->lock, flags); |
675 | if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { |
676 | clk_enable(clk: data->clk_master); |
677 | if (sysmmu_block(data)) { |
678 | if (data->version >= MAKE_MMU_VER(5, 0)) |
679 | __sysmmu_tlb_invalidate(data); |
680 | else |
681 | __sysmmu_tlb_invalidate_entry(data, iova, num_inv: 1); |
682 | sysmmu_unblock(data); |
683 | } |
684 | clk_disable(clk: data->clk_master); |
685 | } |
686 | spin_unlock_irqrestore(lock: &data->lock, flags); |
687 | } |
688 | |
689 | static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, |
690 | sysmmu_iova_t iova, size_t size) |
691 | { |
692 | unsigned long flags; |
693 | |
694 | spin_lock_irqsave(&data->lock, flags); |
695 | if (data->active) { |
696 | unsigned int num_inv = 1; |
697 | |
698 | clk_enable(clk: data->clk_master); |
699 | |
700 | /* |
701 | * L2TLB invalidation required |
702 | * 4KB page: 1 invalidation |
703 | * 64KB page: 16 invalidations |
704 | * 1MB page: 64 invalidations |
705 | * because it is set-associative TLB |
706 | * with 8-way and 64 sets. |
707 | * 1MB page can be cached in one of all sets. |
708 | * 64KB page can be one of 16 consecutive sets. |
709 | */ |
710 | if (MMU_MAJ_VER(data->version) == 2) |
711 | num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64); |
712 | |
713 | if (sysmmu_block(data)) { |
714 | __sysmmu_tlb_invalidate_entry(data, iova, num_inv); |
715 | sysmmu_unblock(data); |
716 | } |
717 | clk_disable(clk: data->clk_master); |
718 | } |
719 | spin_unlock_irqrestore(lock: &data->lock, flags); |
720 | } |
721 | |
722 | static const struct iommu_ops exynos_iommu_ops; |
723 | |
724 | static int exynos_sysmmu_probe(struct platform_device *pdev) |
725 | { |
726 | int irq, ret; |
727 | struct device *dev = &pdev->dev; |
728 | struct sysmmu_drvdata *data; |
729 | struct resource *res; |
730 | |
731 | data = devm_kzalloc(dev, size: sizeof(*data), GFP_KERNEL); |
732 | if (!data) |
733 | return -ENOMEM; |
734 | |
735 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
736 | data->sfrbase = devm_ioremap_resource(dev, res); |
737 | if (IS_ERR(ptr: data->sfrbase)) |
738 | return PTR_ERR(ptr: data->sfrbase); |
739 | |
740 | irq = platform_get_irq(pdev, 0); |
741 | if (irq <= 0) |
742 | return irq; |
743 | |
744 | ret = devm_request_irq(dev, irq, handler: exynos_sysmmu_irq, irqflags: 0, |
745 | devname: dev_name(dev), dev_id: data); |
746 | if (ret) { |
747 | dev_err(dev, "Unabled to register handler of irq %d\n" , irq); |
748 | return ret; |
749 | } |
750 | |
751 | data->clk = devm_clk_get_optional(dev, id: "sysmmu" ); |
752 | if (IS_ERR(ptr: data->clk)) |
753 | return PTR_ERR(ptr: data->clk); |
754 | |
755 | data->aclk = devm_clk_get_optional(dev, id: "aclk" ); |
756 | if (IS_ERR(ptr: data->aclk)) |
757 | return PTR_ERR(ptr: data->aclk); |
758 | |
759 | data->pclk = devm_clk_get_optional(dev, id: "pclk" ); |
760 | if (IS_ERR(ptr: data->pclk)) |
761 | return PTR_ERR(ptr: data->pclk); |
762 | |
763 | if (!data->clk && (!data->aclk || !data->pclk)) { |
764 | dev_err(dev, "Failed to get device clock(s)!\n" ); |
765 | return -ENOSYS; |
766 | } |
767 | |
768 | data->clk_master = devm_clk_get_optional(dev, id: "master" ); |
769 | if (IS_ERR(ptr: data->clk_master)) |
770 | return PTR_ERR(ptr: data->clk_master); |
771 | |
772 | data->sysmmu = dev; |
773 | spin_lock_init(&data->lock); |
774 | |
775 | __sysmmu_get_version(data); |
776 | |
777 | ret = iommu_device_sysfs_add(iommu: &data->iommu, parent: &pdev->dev, NULL, |
778 | fmt: dev_name(dev: data->sysmmu)); |
779 | if (ret) |
780 | return ret; |
781 | |
782 | platform_set_drvdata(pdev, data); |
783 | |
784 | if (PG_ENT_SHIFT < 0) { |
785 | if (MMU_MAJ_VER(data->version) < 5) { |
786 | PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; |
787 | LV1_PROT = SYSMMU_LV1_PROT; |
788 | LV2_PROT = SYSMMU_LV2_PROT; |
789 | } else { |
790 | PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; |
791 | LV1_PROT = SYSMMU_V5_LV1_PROT; |
792 | LV2_PROT = SYSMMU_V5_LV2_PROT; |
793 | } |
794 | } |
795 | |
796 | if (MMU_MAJ_VER(data->version) >= 5) { |
797 | ret = dma_set_mask(dev, DMA_BIT_MASK(36)); |
798 | if (ret) { |
799 | dev_err(dev, "Unable to set DMA mask: %d\n" , ret); |
800 | goto err_dma_set_mask; |
801 | } |
802 | } |
803 | |
804 | /* |
805 | * use the first registered sysmmu device for performing |
806 | * dma mapping operations on iommu page tables (cpu cache flush) |
807 | */ |
808 | if (!dma_dev) |
809 | dma_dev = &pdev->dev; |
810 | |
811 | pm_runtime_enable(dev); |
812 | |
813 | ret = iommu_device_register(iommu: &data->iommu, ops: &exynos_iommu_ops, hwdev: dev); |
814 | if (ret) |
815 | goto err_dma_set_mask; |
816 | |
817 | return 0; |
818 | |
819 | err_dma_set_mask: |
820 | iommu_device_sysfs_remove(iommu: &data->iommu); |
821 | return ret; |
822 | } |
823 | |
824 | static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) |
825 | { |
826 | struct sysmmu_drvdata *data = dev_get_drvdata(dev); |
827 | struct device *master = data->master; |
828 | |
829 | if (master) { |
830 | struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev: master); |
831 | |
832 | mutex_lock(&owner->rpm_lock); |
833 | if (&data->domain->domain != &exynos_identity_domain) { |
834 | dev_dbg(data->sysmmu, "saving state\n" ); |
835 | __sysmmu_disable(data); |
836 | } |
837 | mutex_unlock(lock: &owner->rpm_lock); |
838 | } |
839 | return 0; |
840 | } |
841 | |
842 | static int __maybe_unused exynos_sysmmu_resume(struct device *dev) |
843 | { |
844 | struct sysmmu_drvdata *data = dev_get_drvdata(dev); |
845 | struct device *master = data->master; |
846 | |
847 | if (master) { |
848 | struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev: master); |
849 | |
850 | mutex_lock(&owner->rpm_lock); |
851 | if (&data->domain->domain != &exynos_identity_domain) { |
852 | dev_dbg(data->sysmmu, "restoring state\n" ); |
853 | __sysmmu_enable(data); |
854 | } |
855 | mutex_unlock(lock: &owner->rpm_lock); |
856 | } |
857 | return 0; |
858 | } |
859 | |
860 | static const struct dev_pm_ops sysmmu_pm_ops = { |
861 | SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL) |
862 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
863 | pm_runtime_force_resume) |
864 | }; |
865 | |
866 | static const struct of_device_id sysmmu_of_match[] = { |
867 | { .compatible = "samsung,exynos-sysmmu" , }, |
868 | { }, |
869 | }; |
870 | |
871 | static struct platform_driver exynos_sysmmu_driver __refdata = { |
872 | .probe = exynos_sysmmu_probe, |
873 | .driver = { |
874 | .name = "exynos-sysmmu" , |
875 | .of_match_table = sysmmu_of_match, |
876 | .pm = &sysmmu_pm_ops, |
877 | .suppress_bind_attrs = true, |
878 | } |
879 | }; |
880 | |
881 | static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) |
882 | { |
883 | dma_sync_single_for_cpu(dev: dma_dev, virt_to_phys(address: ent), size: sizeof(*ent), |
884 | dir: DMA_TO_DEVICE); |
885 | *ent = cpu_to_le32(val); |
886 | dma_sync_single_for_device(dev: dma_dev, virt_to_phys(address: ent), size: sizeof(*ent), |
887 | dir: DMA_TO_DEVICE); |
888 | } |
889 | |
890 | static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) |
891 | { |
892 | struct exynos_iommu_domain *domain; |
893 | dma_addr_t handle; |
894 | int i; |
895 | |
896 | /* Check if correct PTE offsets are initialized */ |
897 | BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); |
898 | |
899 | domain = kzalloc(size: sizeof(*domain), GFP_KERNEL); |
900 | if (!domain) |
901 | return NULL; |
902 | |
903 | domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, order: 2); |
904 | if (!domain->pgtable) |
905 | goto err_pgtable; |
906 | |
907 | domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order: 1); |
908 | if (!domain->lv2entcnt) |
909 | goto err_counter; |
910 | |
911 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
912 | for (i = 0; i < NUM_LV1ENTRIES; i++) |
913 | domain->pgtable[i] = ZERO_LV2LINK; |
914 | |
915 | handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, |
916 | DMA_TO_DEVICE); |
917 | /* For mapping page table entries we rely on dma == phys */ |
918 | BUG_ON(handle != virt_to_phys(domain->pgtable)); |
919 | if (dma_mapping_error(dev: dma_dev, dma_addr: handle)) |
920 | goto err_lv2ent; |
921 | |
922 | spin_lock_init(&domain->lock); |
923 | spin_lock_init(&domain->pgtablelock); |
924 | INIT_LIST_HEAD(list: &domain->clients); |
925 | |
926 | domain->domain.geometry.aperture_start = 0; |
927 | domain->domain.geometry.aperture_end = ~0UL; |
928 | domain->domain.geometry.force_aperture = true; |
929 | |
930 | return &domain->domain; |
931 | |
932 | err_lv2ent: |
933 | free_pages(addr: (unsigned long)domain->lv2entcnt, order: 1); |
934 | err_counter: |
935 | free_pages(addr: (unsigned long)domain->pgtable, order: 2); |
936 | err_pgtable: |
937 | kfree(objp: domain); |
938 | return NULL; |
939 | } |
940 | |
941 | static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) |
942 | { |
943 | struct exynos_iommu_domain *domain = to_exynos_domain(dom: iommu_domain); |
944 | struct sysmmu_drvdata *data, *next; |
945 | unsigned long flags; |
946 | int i; |
947 | |
948 | WARN_ON(!list_empty(&domain->clients)); |
949 | |
950 | spin_lock_irqsave(&domain->lock, flags); |
951 | |
952 | list_for_each_entry_safe(data, next, &domain->clients, domain_node) { |
953 | spin_lock(lock: &data->lock); |
954 | __sysmmu_disable(data); |
955 | data->pgtable = 0; |
956 | data->domain = NULL; |
957 | list_del_init(entry: &data->domain_node); |
958 | spin_unlock(lock: &data->lock); |
959 | } |
960 | |
961 | spin_unlock_irqrestore(lock: &domain->lock, flags); |
962 | |
963 | dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, |
964 | DMA_TO_DEVICE); |
965 | |
966 | for (i = 0; i < NUM_LV1ENTRIES; i++) |
967 | if (lv1ent_page(domain->pgtable + i)) { |
968 | phys_addr_t base = lv2table_base(domain->pgtable + i); |
969 | |
970 | dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, |
971 | DMA_TO_DEVICE); |
972 | kmem_cache_free(s: lv2table_kmem_cache, |
973 | phys_to_virt(address: base)); |
974 | } |
975 | |
976 | free_pages(addr: (unsigned long)domain->pgtable, order: 2); |
977 | free_pages(addr: (unsigned long)domain->lv2entcnt, order: 1); |
978 | kfree(objp: domain); |
979 | } |
980 | |
981 | static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain, |
982 | struct device *dev) |
983 | { |
984 | struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); |
985 | struct exynos_iommu_domain *domain; |
986 | phys_addr_t pagetable; |
987 | struct sysmmu_drvdata *data, *next; |
988 | unsigned long flags; |
989 | |
990 | if (owner->domain == identity_domain) |
991 | return 0; |
992 | |
993 | domain = to_exynos_domain(dom: owner->domain); |
994 | pagetable = virt_to_phys(address: domain->pgtable); |
995 | |
996 | mutex_lock(&owner->rpm_lock); |
997 | |
998 | list_for_each_entry(data, &owner->controllers, owner_node) { |
999 | pm_runtime_get_noresume(dev: data->sysmmu); |
1000 | if (pm_runtime_active(dev: data->sysmmu)) |
1001 | __sysmmu_disable(data); |
1002 | pm_runtime_put(dev: data->sysmmu); |
1003 | } |
1004 | |
1005 | spin_lock_irqsave(&domain->lock, flags); |
1006 | list_for_each_entry_safe(data, next, &domain->clients, domain_node) { |
1007 | spin_lock(lock: &data->lock); |
1008 | data->pgtable = 0; |
1009 | data->domain = NULL; |
1010 | list_del_init(entry: &data->domain_node); |
1011 | spin_unlock(lock: &data->lock); |
1012 | } |
1013 | owner->domain = identity_domain; |
1014 | spin_unlock_irqrestore(lock: &domain->lock, flags); |
1015 | |
1016 | mutex_unlock(lock: &owner->rpm_lock); |
1017 | |
1018 | dev_dbg(dev, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n" , |
1019 | __func__, &pagetable); |
1020 | return 0; |
1021 | } |
1022 | |
1023 | static struct iommu_domain_ops exynos_identity_ops = { |
1024 | .attach_dev = exynos_iommu_identity_attach, |
1025 | }; |
1026 | |
1027 | static struct iommu_domain exynos_identity_domain = { |
1028 | .type = IOMMU_DOMAIN_IDENTITY, |
1029 | .ops = &exynos_identity_ops, |
1030 | }; |
1031 | |
1032 | static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, |
1033 | struct device *dev) |
1034 | { |
1035 | struct exynos_iommu_domain *domain = to_exynos_domain(dom: iommu_domain); |
1036 | struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); |
1037 | struct sysmmu_drvdata *data; |
1038 | phys_addr_t pagetable = virt_to_phys(address: domain->pgtable); |
1039 | unsigned long flags; |
1040 | int err; |
1041 | |
1042 | err = exynos_iommu_identity_attach(identity_domain: &exynos_identity_domain, dev); |
1043 | if (err) |
1044 | return err; |
1045 | |
1046 | mutex_lock(&owner->rpm_lock); |
1047 | |
1048 | spin_lock_irqsave(&domain->lock, flags); |
1049 | list_for_each_entry(data, &owner->controllers, owner_node) { |
1050 | spin_lock(lock: &data->lock); |
1051 | data->pgtable = pagetable; |
1052 | data->domain = domain; |
1053 | list_add_tail(new: &data->domain_node, head: &domain->clients); |
1054 | spin_unlock(lock: &data->lock); |
1055 | } |
1056 | owner->domain = iommu_domain; |
1057 | spin_unlock_irqrestore(lock: &domain->lock, flags); |
1058 | |
1059 | list_for_each_entry(data, &owner->controllers, owner_node) { |
1060 | pm_runtime_get_noresume(dev: data->sysmmu); |
1061 | if (pm_runtime_active(dev: data->sysmmu)) |
1062 | __sysmmu_enable(data); |
1063 | pm_runtime_put(dev: data->sysmmu); |
1064 | } |
1065 | |
1066 | mutex_unlock(lock: &owner->rpm_lock); |
1067 | |
1068 | dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n" , __func__, |
1069 | &pagetable); |
1070 | |
1071 | return 0; |
1072 | } |
1073 | |
1074 | static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, |
1075 | sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) |
1076 | { |
1077 | if (lv1ent_section(sent)) { |
1078 | WARN(1, "Trying mapping on %#08x mapped with 1MiB page" , iova); |
1079 | return ERR_PTR(error: -EADDRINUSE); |
1080 | } |
1081 | |
1082 | if (lv1ent_fault(sent)) { |
1083 | dma_addr_t handle; |
1084 | sysmmu_pte_t *pent; |
1085 | bool need_flush_flpd_cache = lv1ent_zero(sent); |
1086 | |
1087 | pent = kmem_cache_zalloc(k: lv2table_kmem_cache, GFP_ATOMIC); |
1088 | BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); |
1089 | if (!pent) |
1090 | return ERR_PTR(error: -ENOMEM); |
1091 | |
1092 | exynos_iommu_set_pte(ent: sent, mk_lv1ent_page(virt_to_phys(pent))); |
1093 | kmemleak_ignore(ptr: pent); |
1094 | *pgcounter = NUM_LV2ENTRIES; |
1095 | handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, |
1096 | DMA_TO_DEVICE); |
1097 | if (dma_mapping_error(dev: dma_dev, dma_addr: handle)) { |
1098 | kmem_cache_free(s: lv2table_kmem_cache, objp: pent); |
1099 | return ERR_PTR(error: -EADDRINUSE); |
1100 | } |
1101 | |
1102 | /* |
1103 | * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, |
1104 | * FLPD cache may cache the address of zero_l2_table. This |
1105 | * function replaces the zero_l2_table with new L2 page table |
1106 | * to write valid mappings. |
1107 | * Accessing the valid area may cause page fault since FLPD |
1108 | * cache may still cache zero_l2_table for the valid area |
1109 | * instead of new L2 page table that has the mapping |
1110 | * information of the valid area. |
1111 | * Thus any replacement of zero_l2_table with other valid L2 |
1112 | * page table must involve FLPD cache invalidation for System |
1113 | * MMU v3.3. |
1114 | * FLPD cache invalidation is performed with TLB invalidation |
1115 | * by VPN without blocking. It is safe to invalidate TLB without |
1116 | * blocking because the target address of TLB invalidation is |
1117 | * not currently mapped. |
1118 | */ |
1119 | if (need_flush_flpd_cache) { |
1120 | struct sysmmu_drvdata *data; |
1121 | |
1122 | spin_lock(lock: &domain->lock); |
1123 | list_for_each_entry(data, &domain->clients, domain_node) |
1124 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
1125 | spin_unlock(lock: &domain->lock); |
1126 | } |
1127 | } |
1128 | |
1129 | return page_entry(sent, iova); |
1130 | } |
1131 | |
1132 | static int lv1set_section(struct exynos_iommu_domain *domain, |
1133 | sysmmu_pte_t *sent, sysmmu_iova_t iova, |
1134 | phys_addr_t paddr, int prot, short *pgcnt) |
1135 | { |
1136 | if (lv1ent_section(sent)) { |
1137 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped" , |
1138 | iova); |
1139 | return -EADDRINUSE; |
1140 | } |
1141 | |
1142 | if (lv1ent_page(sent)) { |
1143 | if (*pgcnt != NUM_LV2ENTRIES) { |
1144 | WARN(1, "Trying mapping on 1MiB@%#08x that is mapped" , |
1145 | iova); |
1146 | return -EADDRINUSE; |
1147 | } |
1148 | |
1149 | kmem_cache_free(s: lv2table_kmem_cache, objp: page_entry(sent, iova: 0)); |
1150 | *pgcnt = 0; |
1151 | } |
1152 | |
1153 | exynos_iommu_set_pte(ent: sent, mk_lv1ent_sect(paddr, prot)); |
1154 | |
1155 | spin_lock(lock: &domain->lock); |
1156 | if (lv1ent_page_zero(sent)) { |
1157 | struct sysmmu_drvdata *data; |
1158 | /* |
1159 | * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD |
1160 | * entry by speculative prefetch of SLPD which has no mapping. |
1161 | */ |
1162 | list_for_each_entry(data, &domain->clients, domain_node) |
1163 | sysmmu_tlb_invalidate_flpdcache(data, iova); |
1164 | } |
1165 | spin_unlock(lock: &domain->lock); |
1166 | |
1167 | return 0; |
1168 | } |
1169 | |
1170 | static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, |
1171 | int prot, short *pgcnt) |
1172 | { |
1173 | if (size == SPAGE_SIZE) { |
1174 | if (WARN_ON(!lv2ent_fault(pent))) |
1175 | return -EADDRINUSE; |
1176 | |
1177 | exynos_iommu_set_pte(ent: pent, mk_lv2ent_spage(paddr, prot)); |
1178 | *pgcnt -= 1; |
1179 | } else { /* size == LPAGE_SIZE */ |
1180 | int i; |
1181 | dma_addr_t pent_base = virt_to_phys(address: pent); |
1182 | |
1183 | dma_sync_single_for_cpu(dev: dma_dev, addr: pent_base, |
1184 | size: sizeof(*pent) * SPAGES_PER_LPAGE, |
1185 | dir: DMA_TO_DEVICE); |
1186 | for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { |
1187 | if (WARN_ON(!lv2ent_fault(pent))) { |
1188 | if (i > 0) |
1189 | memset(pent - i, 0, sizeof(*pent) * i); |
1190 | return -EADDRINUSE; |
1191 | } |
1192 | |
1193 | *pent = mk_lv2ent_lpage(paddr, prot); |
1194 | } |
1195 | dma_sync_single_for_device(dev: dma_dev, addr: pent_base, |
1196 | size: sizeof(*pent) * SPAGES_PER_LPAGE, |
1197 | dir: DMA_TO_DEVICE); |
1198 | *pgcnt -= SPAGES_PER_LPAGE; |
1199 | } |
1200 | |
1201 | return 0; |
1202 | } |
1203 | |
1204 | /* |
1205 | * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: |
1206 | * |
1207 | * System MMU v3.x has advanced logic to improve address translation |
1208 | * performance with caching more page table entries by a page table walk. |
1209 | * However, the logic has a bug that while caching faulty page table entries, |
1210 | * System MMU reports page fault if the cached fault entry is hit even though |
1211 | * the fault entry is updated to a valid entry after the entry is cached. |
1212 | * To prevent caching faulty page table entries which may be updated to valid |
1213 | * entries later, the virtual memory manager should care about the workaround |
1214 | * for the problem. The following describes the workaround. |
1215 | * |
1216 | * Any two consecutive I/O virtual address regions must have a hole of 128KiB |
1217 | * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). |
1218 | * |
1219 | * Precisely, any start address of I/O virtual region must be aligned with |
1220 | * the following sizes for System MMU v3.1 and v3.2. |
1221 | * System MMU v3.1: 128KiB |
1222 | * System MMU v3.2: 256KiB |
1223 | * |
1224 | * Because System MMU v3.3 caches page table entries more aggressively, it needs |
1225 | * more workarounds. |
1226 | * - Any two consecutive I/O virtual regions must have a hole of size larger |
1227 | * than or equal to 128KiB. |
1228 | * - Start address of an I/O virtual region must be aligned by 128KiB. |
1229 | */ |
1230 | static int exynos_iommu_map(struct iommu_domain *iommu_domain, |
1231 | unsigned long l_iova, phys_addr_t paddr, size_t size, |
1232 | size_t count, int prot, gfp_t gfp, size_t *mapped) |
1233 | { |
1234 | struct exynos_iommu_domain *domain = to_exynos_domain(dom: iommu_domain); |
1235 | sysmmu_pte_t *entry; |
1236 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
1237 | unsigned long flags; |
1238 | int ret = -ENOMEM; |
1239 | |
1240 | BUG_ON(domain->pgtable == NULL); |
1241 | prot &= SYSMMU_SUPPORTED_PROT_BITS; |
1242 | |
1243 | spin_lock_irqsave(&domain->pgtablelock, flags); |
1244 | |
1245 | entry = section_entry(pgtable: domain->pgtable, iova); |
1246 | |
1247 | if (size == SECT_SIZE) { |
1248 | ret = lv1set_section(domain, sent: entry, iova, paddr, prot, |
1249 | pgcnt: &domain->lv2entcnt[lv1ent_offset(iova)]); |
1250 | } else { |
1251 | sysmmu_pte_t *pent; |
1252 | |
1253 | pent = alloc_lv2entry(domain, sent: entry, iova, |
1254 | pgcounter: &domain->lv2entcnt[lv1ent_offset(iova)]); |
1255 | |
1256 | if (IS_ERR(ptr: pent)) |
1257 | ret = PTR_ERR(ptr: pent); |
1258 | else |
1259 | ret = lv2set_page(pent, paddr, size, prot, |
1260 | pgcnt: &domain->lv2entcnt[lv1ent_offset(iova)]); |
1261 | } |
1262 | |
1263 | if (ret) |
1264 | pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n" , |
1265 | __func__, ret, size, iova); |
1266 | else |
1267 | *mapped = size; |
1268 | |
1269 | spin_unlock_irqrestore(lock: &domain->pgtablelock, flags); |
1270 | |
1271 | return ret; |
1272 | } |
1273 | |
1274 | static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, |
1275 | sysmmu_iova_t iova, size_t size) |
1276 | { |
1277 | struct sysmmu_drvdata *data; |
1278 | unsigned long flags; |
1279 | |
1280 | spin_lock_irqsave(&domain->lock, flags); |
1281 | |
1282 | list_for_each_entry(data, &domain->clients, domain_node) |
1283 | sysmmu_tlb_invalidate_entry(data, iova, size); |
1284 | |
1285 | spin_unlock_irqrestore(lock: &domain->lock, flags); |
1286 | } |
1287 | |
1288 | static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, |
1289 | unsigned long l_iova, size_t size, size_t count, |
1290 | struct iommu_iotlb_gather *gather) |
1291 | { |
1292 | struct exynos_iommu_domain *domain = to_exynos_domain(dom: iommu_domain); |
1293 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
1294 | sysmmu_pte_t *ent; |
1295 | size_t err_pgsize; |
1296 | unsigned long flags; |
1297 | |
1298 | BUG_ON(domain->pgtable == NULL); |
1299 | |
1300 | spin_lock_irqsave(&domain->pgtablelock, flags); |
1301 | |
1302 | ent = section_entry(pgtable: domain->pgtable, iova); |
1303 | |
1304 | if (lv1ent_section(ent)) { |
1305 | if (WARN_ON(size < SECT_SIZE)) { |
1306 | err_pgsize = SECT_SIZE; |
1307 | goto err; |
1308 | } |
1309 | |
1310 | /* workaround for h/w bug in System MMU v3.3 */ |
1311 | exynos_iommu_set_pte(ent, ZERO_LV2LINK); |
1312 | size = SECT_SIZE; |
1313 | goto done; |
1314 | } |
1315 | |
1316 | if (unlikely(lv1ent_fault(ent))) { |
1317 | if (size > SECT_SIZE) |
1318 | size = SECT_SIZE; |
1319 | goto done; |
1320 | } |
1321 | |
1322 | /* lv1ent_page(sent) == true here */ |
1323 | |
1324 | ent = page_entry(sent: ent, iova); |
1325 | |
1326 | if (unlikely(lv2ent_fault(ent))) { |
1327 | size = SPAGE_SIZE; |
1328 | goto done; |
1329 | } |
1330 | |
1331 | if (lv2ent_small(ent)) { |
1332 | exynos_iommu_set_pte(ent, val: 0); |
1333 | size = SPAGE_SIZE; |
1334 | domain->lv2entcnt[lv1ent_offset(iova)] += 1; |
1335 | goto done; |
1336 | } |
1337 | |
1338 | /* lv1ent_large(ent) == true here */ |
1339 | if (WARN_ON(size < LPAGE_SIZE)) { |
1340 | err_pgsize = LPAGE_SIZE; |
1341 | goto err; |
1342 | } |
1343 | |
1344 | dma_sync_single_for_cpu(dev: dma_dev, virt_to_phys(address: ent), |
1345 | size: sizeof(*ent) * SPAGES_PER_LPAGE, |
1346 | dir: DMA_TO_DEVICE); |
1347 | memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); |
1348 | dma_sync_single_for_device(dev: dma_dev, virt_to_phys(address: ent), |
1349 | size: sizeof(*ent) * SPAGES_PER_LPAGE, |
1350 | dir: DMA_TO_DEVICE); |
1351 | size = LPAGE_SIZE; |
1352 | domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; |
1353 | done: |
1354 | spin_unlock_irqrestore(lock: &domain->pgtablelock, flags); |
1355 | |
1356 | exynos_iommu_tlb_invalidate_entry(domain, iova, size); |
1357 | |
1358 | return size; |
1359 | err: |
1360 | spin_unlock_irqrestore(lock: &domain->pgtablelock, flags); |
1361 | |
1362 | pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n" , |
1363 | __func__, size, iova, err_pgsize); |
1364 | |
1365 | return 0; |
1366 | } |
1367 | |
1368 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, |
1369 | dma_addr_t iova) |
1370 | { |
1371 | struct exynos_iommu_domain *domain = to_exynos_domain(dom: iommu_domain); |
1372 | sysmmu_pte_t *entry; |
1373 | unsigned long flags; |
1374 | phys_addr_t phys = 0; |
1375 | |
1376 | spin_lock_irqsave(&domain->pgtablelock, flags); |
1377 | |
1378 | entry = section_entry(pgtable: domain->pgtable, iova); |
1379 | |
1380 | if (lv1ent_section(entry)) { |
1381 | phys = section_phys(entry) + section_offs(iova); |
1382 | } else if (lv1ent_page(entry)) { |
1383 | entry = page_entry(sent: entry, iova); |
1384 | |
1385 | if (lv2ent_large(entry)) |
1386 | phys = lpage_phys(entry) + lpage_offs(iova); |
1387 | else if (lv2ent_small(entry)) |
1388 | phys = spage_phys(entry) + spage_offs(iova); |
1389 | } |
1390 | |
1391 | spin_unlock_irqrestore(lock: &domain->pgtablelock, flags); |
1392 | |
1393 | return phys; |
1394 | } |
1395 | |
1396 | static struct iommu_device *exynos_iommu_probe_device(struct device *dev) |
1397 | { |
1398 | struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); |
1399 | struct sysmmu_drvdata *data; |
1400 | |
1401 | if (!has_sysmmu(dev)) |
1402 | return ERR_PTR(error: -ENODEV); |
1403 | |
1404 | list_for_each_entry(data, &owner->controllers, owner_node) { |
1405 | /* |
1406 | * SYSMMU will be runtime activated via device link |
1407 | * (dependency) to its master device, so there are no |
1408 | * direct calls to pm_runtime_get/put in this driver. |
1409 | */ |
1410 | data->link = device_link_add(consumer: dev, supplier: data->sysmmu, |
1411 | DL_FLAG_STATELESS | |
1412 | DL_FLAG_PM_RUNTIME); |
1413 | } |
1414 | |
1415 | /* There is always at least one entry, see exynos_iommu_of_xlate() */ |
1416 | data = list_first_entry(&owner->controllers, |
1417 | struct sysmmu_drvdata, owner_node); |
1418 | |
1419 | return &data->iommu; |
1420 | } |
1421 | |
1422 | static void exynos_iommu_release_device(struct device *dev) |
1423 | { |
1424 | struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); |
1425 | struct sysmmu_drvdata *data; |
1426 | |
1427 | WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev)); |
1428 | |
1429 | list_for_each_entry(data, &owner->controllers, owner_node) |
1430 | device_link_del(link: data->link); |
1431 | } |
1432 | |
1433 | static int exynos_iommu_of_xlate(struct device *dev, |
1434 | const struct of_phandle_args *spec) |
1435 | { |
1436 | struct platform_device *sysmmu = of_find_device_by_node(np: spec->np); |
1437 | struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); |
1438 | struct sysmmu_drvdata *data, *entry; |
1439 | |
1440 | if (!sysmmu) |
1441 | return -ENODEV; |
1442 | |
1443 | data = platform_get_drvdata(pdev: sysmmu); |
1444 | if (!data) { |
1445 | put_device(dev: &sysmmu->dev); |
1446 | return -ENODEV; |
1447 | } |
1448 | |
1449 | if (!owner) { |
1450 | owner = kzalloc(size: sizeof(*owner), GFP_KERNEL); |
1451 | if (!owner) { |
1452 | put_device(dev: &sysmmu->dev); |
1453 | return -ENOMEM; |
1454 | } |
1455 | |
1456 | INIT_LIST_HEAD(list: &owner->controllers); |
1457 | mutex_init(&owner->rpm_lock); |
1458 | owner->domain = &exynos_identity_domain; |
1459 | dev_iommu_priv_set(dev, priv: owner); |
1460 | } |
1461 | |
1462 | list_for_each_entry(entry, &owner->controllers, owner_node) |
1463 | if (entry == data) |
1464 | return 0; |
1465 | |
1466 | list_add_tail(new: &data->owner_node, head: &owner->controllers); |
1467 | data->master = dev; |
1468 | |
1469 | return 0; |
1470 | } |
1471 | |
1472 | static const struct iommu_ops exynos_iommu_ops = { |
1473 | .identity_domain = &exynos_identity_domain, |
1474 | .domain_alloc_paging = exynos_iommu_domain_alloc_paging, |
1475 | .device_group = generic_device_group, |
1476 | .probe_device = exynos_iommu_probe_device, |
1477 | .release_device = exynos_iommu_release_device, |
1478 | .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, |
1479 | .of_xlate = exynos_iommu_of_xlate, |
1480 | .default_domain_ops = &(const struct iommu_domain_ops) { |
1481 | .attach_dev = exynos_iommu_attach_device, |
1482 | .map_pages = exynos_iommu_map, |
1483 | .unmap_pages = exynos_iommu_unmap, |
1484 | .iova_to_phys = exynos_iommu_iova_to_phys, |
1485 | .free = exynos_iommu_domain_free, |
1486 | } |
1487 | }; |
1488 | |
1489 | static int __init exynos_iommu_init(void) |
1490 | { |
1491 | struct device_node *np; |
1492 | int ret; |
1493 | |
1494 | np = of_find_matching_node(NULL, matches: sysmmu_of_match); |
1495 | if (!np) |
1496 | return 0; |
1497 | |
1498 | of_node_put(node: np); |
1499 | |
1500 | lv2table_kmem_cache = kmem_cache_create(name: "exynos-iommu-lv2table" , |
1501 | LV2TABLE_SIZE, LV2TABLE_SIZE, flags: 0, NULL); |
1502 | if (!lv2table_kmem_cache) { |
1503 | pr_err("%s: Failed to create kmem cache\n" , __func__); |
1504 | return -ENOMEM; |
1505 | } |
1506 | |
1507 | zero_lv2_table = kmem_cache_zalloc(k: lv2table_kmem_cache, GFP_KERNEL); |
1508 | if (zero_lv2_table == NULL) { |
1509 | pr_err("%s: Failed to allocate zero level2 page table\n" , |
1510 | __func__); |
1511 | ret = -ENOMEM; |
1512 | goto err_zero_lv2; |
1513 | } |
1514 | |
1515 | ret = platform_driver_register(&exynos_sysmmu_driver); |
1516 | if (ret) { |
1517 | pr_err("%s: Failed to register driver\n" , __func__); |
1518 | goto err_reg_driver; |
1519 | } |
1520 | |
1521 | return 0; |
1522 | err_reg_driver: |
1523 | kmem_cache_free(s: lv2table_kmem_cache, objp: zero_lv2_table); |
1524 | err_zero_lv2: |
1525 | kmem_cache_destroy(s: lv2table_kmem_cache); |
1526 | return ret; |
1527 | } |
1528 | core_initcall(exynos_iommu_init); |
1529 | |