1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
3 | * |
4 | * Author: Stepan Moskovchenko <stepanm@codeaurora.org> |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | #include <linux/kernel.h> |
9 | #include <linux/init.h> |
10 | #include <linux/platform_device.h> |
11 | #include <linux/errno.h> |
12 | #include <linux/io.h> |
13 | #include <linux/io-pgtable.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/list.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/iommu.h> |
19 | #include <linux/clk.h> |
20 | #include <linux/err.h> |
21 | |
22 | #include <asm/cacheflush.h> |
23 | #include <linux/sizes.h> |
24 | |
25 | #include "msm_iommu_hw-8xxx.h" |
26 | #include "msm_iommu.h" |
27 | |
28 | #define MRC(reg, processor, op1, crn, crm, op2) \ |
29 | __asm__ __volatile__ ( \ |
30 | " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ |
31 | : "=r" (reg)) |
32 | |
33 | /* bitmap of the page sizes currently supported */ |
34 | #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
35 | |
36 | static DEFINE_SPINLOCK(msm_iommu_lock); |
37 | static LIST_HEAD(qcom_iommu_devices); |
38 | static struct iommu_ops msm_iommu_ops; |
39 | |
40 | struct msm_priv { |
41 | struct list_head list_attached; |
42 | struct iommu_domain domain; |
43 | struct io_pgtable_cfg cfg; |
44 | struct io_pgtable_ops *iop; |
45 | struct device *dev; |
46 | spinlock_t pgtlock; /* pagetable lock */ |
47 | }; |
48 | |
49 | static struct msm_priv *to_msm_priv(struct iommu_domain *dom) |
50 | { |
51 | return container_of(dom, struct msm_priv, domain); |
52 | } |
53 | |
54 | static int __enable_clocks(struct msm_iommu_dev *iommu) |
55 | { |
56 | int ret; |
57 | |
58 | ret = clk_enable(clk: iommu->pclk); |
59 | if (ret) |
60 | goto fail; |
61 | |
62 | if (iommu->clk) { |
63 | ret = clk_enable(clk: iommu->clk); |
64 | if (ret) |
65 | clk_disable(clk: iommu->pclk); |
66 | } |
67 | fail: |
68 | return ret; |
69 | } |
70 | |
71 | static void __disable_clocks(struct msm_iommu_dev *iommu) |
72 | { |
73 | if (iommu->clk) |
74 | clk_disable(clk: iommu->clk); |
75 | clk_disable(clk: iommu->pclk); |
76 | } |
77 | |
78 | static void msm_iommu_reset(void __iomem *base, int ncb) |
79 | { |
80 | int ctx; |
81 | |
82 | SET_RPUE(base, 0); |
83 | SET_RPUEIE(base, 0); |
84 | SET_ESRRESTORE(base, 0); |
85 | SET_TBE(base, 0); |
86 | SET_CR(base, 0); |
87 | SET_SPDMBE(base, 0); |
88 | SET_TESTBUSCR(base, 0); |
89 | SET_TLBRSW(base, 0); |
90 | SET_GLOBAL_TLBIALL(base, 0); |
91 | SET_RPU_ACR(base, 0); |
92 | SET_TLBLKCRWE(base, 1); |
93 | |
94 | for (ctx = 0; ctx < ncb; ctx++) { |
95 | SET_BPRCOSH(base, ctx, 0); |
96 | SET_BPRCISH(base, ctx, 0); |
97 | SET_BPRCNSH(base, ctx, 0); |
98 | SET_BPSHCFG(base, ctx, 0); |
99 | SET_BPMTCFG(base, ctx, 0); |
100 | SET_ACTLR(base, ctx, 0); |
101 | SET_SCTLR(base, ctx, 0); |
102 | SET_FSRRESTORE(base, ctx, 0); |
103 | SET_TTBR0(base, ctx, 0); |
104 | SET_TTBR1(base, ctx, 0); |
105 | SET_TTBCR(base, ctx, 0); |
106 | SET_BFBCR(base, ctx, 0); |
107 | SET_PAR(base, ctx, 0); |
108 | SET_FAR(base, ctx, 0); |
109 | SET_CTX_TLBIALL(base, ctx, 0); |
110 | SET_TLBFLPTER(base, ctx, 0); |
111 | SET_TLBSLPTER(base, ctx, 0); |
112 | SET_TLBLKCR(base, ctx, 0); |
113 | SET_CONTEXTIDR(base, ctx, 0); |
114 | } |
115 | } |
116 | |
117 | static void __flush_iotlb(void *cookie) |
118 | { |
119 | struct msm_priv *priv = cookie; |
120 | struct msm_iommu_dev *iommu = NULL; |
121 | struct msm_iommu_ctx_dev *master; |
122 | int ret = 0; |
123 | |
124 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
125 | ret = __enable_clocks(iommu); |
126 | if (ret) |
127 | goto fail; |
128 | |
129 | list_for_each_entry(master, &iommu->ctx_list, list) |
130 | SET_CTX_TLBIALL(iommu->base, master->num, 0); |
131 | |
132 | __disable_clocks(iommu); |
133 | } |
134 | fail: |
135 | return; |
136 | } |
137 | |
138 | static void __flush_iotlb_range(unsigned long iova, size_t size, |
139 | size_t granule, bool leaf, void *cookie) |
140 | { |
141 | struct msm_priv *priv = cookie; |
142 | struct msm_iommu_dev *iommu = NULL; |
143 | struct msm_iommu_ctx_dev *master; |
144 | int ret = 0; |
145 | int temp_size; |
146 | |
147 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
148 | ret = __enable_clocks(iommu); |
149 | if (ret) |
150 | goto fail; |
151 | |
152 | list_for_each_entry(master, &iommu->ctx_list, list) { |
153 | temp_size = size; |
154 | do { |
155 | iova &= TLBIVA_VA; |
156 | iova |= GET_CONTEXTIDR_ASID(iommu->base, |
157 | master->num); |
158 | SET_TLBIVA(iommu->base, master->num, iova); |
159 | iova += granule; |
160 | } while (temp_size -= granule); |
161 | } |
162 | |
163 | __disable_clocks(iommu); |
164 | } |
165 | |
166 | fail: |
167 | return; |
168 | } |
169 | |
170 | static void __flush_iotlb_walk(unsigned long iova, size_t size, |
171 | size_t granule, void *cookie) |
172 | { |
173 | __flush_iotlb_range(iova, size, granule, leaf: false, cookie); |
174 | } |
175 | |
176 | static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, |
177 | unsigned long iova, size_t granule, void *cookie) |
178 | { |
179 | __flush_iotlb_range(iova, size: granule, granule, leaf: true, cookie); |
180 | } |
181 | |
182 | static const struct iommu_flush_ops msm_iommu_flush_ops = { |
183 | .tlb_flush_all = __flush_iotlb, |
184 | .tlb_flush_walk = __flush_iotlb_walk, |
185 | .tlb_add_page = __flush_iotlb_page, |
186 | }; |
187 | |
188 | static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) |
189 | { |
190 | int idx; |
191 | |
192 | do { |
193 | idx = find_next_zero_bit(addr: map, size: end, offset: start); |
194 | if (idx == end) |
195 | return -ENOSPC; |
196 | } while (test_and_set_bit(nr: idx, addr: map)); |
197 | |
198 | return idx; |
199 | } |
200 | |
201 | static void msm_iommu_free_ctx(unsigned long *map, int idx) |
202 | { |
203 | clear_bit(nr: idx, addr: map); |
204 | } |
205 | |
206 | static void config_mids(struct msm_iommu_dev *iommu, |
207 | struct msm_iommu_ctx_dev *master) |
208 | { |
209 | int mid, ctx, i; |
210 | |
211 | for (i = 0; i < master->num_mids; i++) { |
212 | mid = master->mids[i]; |
213 | ctx = master->num; |
214 | |
215 | SET_M2VCBR_N(iommu->base, mid, 0); |
216 | SET_CBACR_N(iommu->base, ctx, 0); |
217 | |
218 | /* Set VMID = 0 */ |
219 | SET_VMID(iommu->base, mid, 0); |
220 | |
221 | /* Set the context number for that MID to this context */ |
222 | SET_CBNDX(iommu->base, mid, ctx); |
223 | |
224 | /* Set MID associated with this context bank to 0*/ |
225 | SET_CBVMID(iommu->base, ctx, 0); |
226 | |
227 | /* Set the ASID for TLB tagging for this context */ |
228 | SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx); |
229 | |
230 | /* Set security bit override to be Non-secure */ |
231 | SET_NSCFG(iommu->base, mid, 3); |
232 | } |
233 | } |
234 | |
235 | static void __reset_context(void __iomem *base, int ctx) |
236 | { |
237 | SET_BPRCOSH(base, ctx, 0); |
238 | SET_BPRCISH(base, ctx, 0); |
239 | SET_BPRCNSH(base, ctx, 0); |
240 | SET_BPSHCFG(base, ctx, 0); |
241 | SET_BPMTCFG(base, ctx, 0); |
242 | SET_ACTLR(base, ctx, 0); |
243 | SET_SCTLR(base, ctx, 0); |
244 | SET_FSRRESTORE(base, ctx, 0); |
245 | SET_TTBR0(base, ctx, 0); |
246 | SET_TTBR1(base, ctx, 0); |
247 | SET_TTBCR(base, ctx, 0); |
248 | SET_BFBCR(base, ctx, 0); |
249 | SET_PAR(base, ctx, 0); |
250 | SET_FAR(base, ctx, 0); |
251 | SET_CTX_TLBIALL(base, ctx, 0); |
252 | SET_TLBFLPTER(base, ctx, 0); |
253 | SET_TLBSLPTER(base, ctx, 0); |
254 | SET_TLBLKCR(base, ctx, 0); |
255 | } |
256 | |
257 | static void __program_context(void __iomem *base, int ctx, |
258 | struct msm_priv *priv) |
259 | { |
260 | __reset_context(base, ctx); |
261 | |
262 | /* Turn on TEX Remap */ |
263 | SET_TRE(base, ctx, 1); |
264 | SET_AFE(base, ctx, 1); |
265 | |
266 | /* Set up HTW mode */ |
267 | /* TLB miss configuration: perform HTW on miss */ |
268 | SET_TLBMCFG(base, ctx, 0x3); |
269 | |
270 | /* V2P configuration: HTW for access */ |
271 | SET_V2PCFG(base, ctx, 0x3); |
272 | |
273 | SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); |
274 | SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr); |
275 | SET_TTBR1(base, ctx, 0); |
276 | |
277 | /* Set prrr and nmrr */ |
278 | SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); |
279 | SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr); |
280 | |
281 | /* Invalidate the TLB for this context */ |
282 | SET_CTX_TLBIALL(base, ctx, 0); |
283 | |
284 | /* Set interrupt number to "secure" interrupt */ |
285 | SET_IRPTNDX(base, ctx, 0); |
286 | |
287 | /* Enable context fault interrupt */ |
288 | SET_CFEIE(base, ctx, 1); |
289 | |
290 | /* Stall access on a context fault and let the handler deal with it */ |
291 | SET_CFCFG(base, ctx, 1); |
292 | |
293 | /* Redirect all cacheable requests to L2 slave port. */ |
294 | SET_RCISH(base, ctx, 1); |
295 | SET_RCOSH(base, ctx, 1); |
296 | SET_RCNSH(base, ctx, 1); |
297 | |
298 | /* Turn on BFB prefetch */ |
299 | SET_BFBDFE(base, ctx, 1); |
300 | |
301 | /* Enable the MMU */ |
302 | SET_M(base, ctx, 1); |
303 | } |
304 | |
305 | static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev) |
306 | { |
307 | struct msm_priv *priv; |
308 | |
309 | priv = kzalloc(size: sizeof(*priv), GFP_KERNEL); |
310 | if (!priv) |
311 | goto fail_nomem; |
312 | |
313 | INIT_LIST_HEAD(list: &priv->list_attached); |
314 | |
315 | priv->domain.geometry.aperture_start = 0; |
316 | priv->domain.geometry.aperture_end = (1ULL << 32) - 1; |
317 | priv->domain.geometry.force_aperture = true; |
318 | |
319 | return &priv->domain; |
320 | |
321 | fail_nomem: |
322 | kfree(objp: priv); |
323 | return NULL; |
324 | } |
325 | |
326 | static void msm_iommu_domain_free(struct iommu_domain *domain) |
327 | { |
328 | struct msm_priv *priv; |
329 | unsigned long flags; |
330 | |
331 | spin_lock_irqsave(&msm_iommu_lock, flags); |
332 | priv = to_msm_priv(dom: domain); |
333 | kfree(objp: priv); |
334 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
335 | } |
336 | |
337 | static int msm_iommu_domain_config(struct msm_priv *priv) |
338 | { |
339 | spin_lock_init(&priv->pgtlock); |
340 | |
341 | priv->cfg = (struct io_pgtable_cfg) { |
342 | .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, |
343 | .ias = 32, |
344 | .oas = 32, |
345 | .tlb = &msm_iommu_flush_ops, |
346 | .iommu_dev = priv->dev, |
347 | }; |
348 | |
349 | priv->iop = alloc_io_pgtable_ops(fmt: ARM_V7S, cfg: &priv->cfg, cookie: priv); |
350 | if (!priv->iop) { |
351 | dev_err(priv->dev, "Failed to allocate pgtable\n" ); |
352 | return -EINVAL; |
353 | } |
354 | |
355 | msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; |
356 | |
357 | return 0; |
358 | } |
359 | |
360 | /* Must be called under msm_iommu_lock */ |
361 | static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) |
362 | { |
363 | struct msm_iommu_dev *iommu, *ret = NULL; |
364 | struct msm_iommu_ctx_dev *master; |
365 | |
366 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { |
367 | master = list_first_entry(&iommu->ctx_list, |
368 | struct msm_iommu_ctx_dev, |
369 | list); |
370 | if (master->of_node == dev->of_node) { |
371 | ret = iommu; |
372 | break; |
373 | } |
374 | } |
375 | |
376 | return ret; |
377 | } |
378 | |
379 | static struct iommu_device *msm_iommu_probe_device(struct device *dev) |
380 | { |
381 | struct msm_iommu_dev *iommu; |
382 | unsigned long flags; |
383 | |
384 | spin_lock_irqsave(&msm_iommu_lock, flags); |
385 | iommu = find_iommu_for_dev(dev); |
386 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
387 | |
388 | if (!iommu) |
389 | return ERR_PTR(error: -ENODEV); |
390 | |
391 | return &iommu->iommu; |
392 | } |
393 | |
394 | static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
395 | { |
396 | int ret = 0; |
397 | unsigned long flags; |
398 | struct msm_iommu_dev *iommu; |
399 | struct msm_priv *priv = to_msm_priv(dom: domain); |
400 | struct msm_iommu_ctx_dev *master; |
401 | |
402 | priv->dev = dev; |
403 | msm_iommu_domain_config(priv); |
404 | |
405 | spin_lock_irqsave(&msm_iommu_lock, flags); |
406 | list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { |
407 | master = list_first_entry(&iommu->ctx_list, |
408 | struct msm_iommu_ctx_dev, |
409 | list); |
410 | if (master->of_node == dev->of_node) { |
411 | ret = __enable_clocks(iommu); |
412 | if (ret) |
413 | goto fail; |
414 | |
415 | list_for_each_entry(master, &iommu->ctx_list, list) { |
416 | if (master->num) { |
417 | dev_err(dev, "domain already attached" ); |
418 | ret = -EEXIST; |
419 | goto fail; |
420 | } |
421 | master->num = |
422 | msm_iommu_alloc_ctx(map: iommu->context_map, |
423 | start: 0, end: iommu->ncb); |
424 | if (IS_ERR_VALUE(master->num)) { |
425 | ret = -ENODEV; |
426 | goto fail; |
427 | } |
428 | config_mids(iommu, master); |
429 | __program_context(base: iommu->base, ctx: master->num, |
430 | priv); |
431 | } |
432 | __disable_clocks(iommu); |
433 | list_add(new: &iommu->dom_node, head: &priv->list_attached); |
434 | } |
435 | } |
436 | |
437 | fail: |
438 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
439 | |
440 | return ret; |
441 | } |
442 | |
443 | static int msm_iommu_identity_attach(struct iommu_domain *identity_domain, |
444 | struct device *dev) |
445 | { |
446 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
447 | struct msm_priv *priv; |
448 | unsigned long flags; |
449 | struct msm_iommu_dev *iommu; |
450 | struct msm_iommu_ctx_dev *master; |
451 | int ret = 0; |
452 | |
453 | if (domain == identity_domain || !domain) |
454 | return 0; |
455 | |
456 | priv = to_msm_priv(dom: domain); |
457 | free_io_pgtable_ops(ops: priv->iop); |
458 | |
459 | spin_lock_irqsave(&msm_iommu_lock, flags); |
460 | list_for_each_entry(iommu, &priv->list_attached, dom_node) { |
461 | ret = __enable_clocks(iommu); |
462 | if (ret) |
463 | goto fail; |
464 | |
465 | list_for_each_entry(master, &iommu->ctx_list, list) { |
466 | msm_iommu_free_ctx(map: iommu->context_map, idx: master->num); |
467 | __reset_context(base: iommu->base, ctx: master->num); |
468 | } |
469 | __disable_clocks(iommu); |
470 | } |
471 | fail: |
472 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
473 | return ret; |
474 | } |
475 | |
476 | static struct iommu_domain_ops msm_iommu_identity_ops = { |
477 | .attach_dev = msm_iommu_identity_attach, |
478 | }; |
479 | |
480 | static struct iommu_domain msm_iommu_identity_domain = { |
481 | .type = IOMMU_DOMAIN_IDENTITY, |
482 | .ops = &msm_iommu_identity_ops, |
483 | }; |
484 | |
485 | static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, |
486 | phys_addr_t pa, size_t pgsize, size_t pgcount, |
487 | int prot, gfp_t gfp, size_t *mapped) |
488 | { |
489 | struct msm_priv *priv = to_msm_priv(dom: domain); |
490 | unsigned long flags; |
491 | int ret; |
492 | |
493 | spin_lock_irqsave(&priv->pgtlock, flags); |
494 | ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot, |
495 | GFP_ATOMIC, mapped); |
496 | spin_unlock_irqrestore(lock: &priv->pgtlock, flags); |
497 | |
498 | return ret; |
499 | } |
500 | |
501 | static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, |
502 | size_t size) |
503 | { |
504 | struct msm_priv *priv = to_msm_priv(dom: domain); |
505 | |
506 | __flush_iotlb_range(iova, size, SZ_4K, leaf: false, cookie: priv); |
507 | return 0; |
508 | } |
509 | |
510 | static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
511 | size_t pgsize, size_t pgcount, |
512 | struct iommu_iotlb_gather *gather) |
513 | { |
514 | struct msm_priv *priv = to_msm_priv(dom: domain); |
515 | unsigned long flags; |
516 | size_t ret; |
517 | |
518 | spin_lock_irqsave(&priv->pgtlock, flags); |
519 | ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather); |
520 | spin_unlock_irqrestore(lock: &priv->pgtlock, flags); |
521 | |
522 | return ret; |
523 | } |
524 | |
525 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, |
526 | dma_addr_t va) |
527 | { |
528 | struct msm_priv *priv; |
529 | struct msm_iommu_dev *iommu; |
530 | struct msm_iommu_ctx_dev *master; |
531 | unsigned int par; |
532 | unsigned long flags; |
533 | phys_addr_t ret = 0; |
534 | |
535 | spin_lock_irqsave(&msm_iommu_lock, flags); |
536 | |
537 | priv = to_msm_priv(dom: domain); |
538 | iommu = list_first_entry(&priv->list_attached, |
539 | struct msm_iommu_dev, dom_node); |
540 | |
541 | if (list_empty(head: &iommu->ctx_list)) |
542 | goto fail; |
543 | |
544 | master = list_first_entry(&iommu->ctx_list, |
545 | struct msm_iommu_ctx_dev, list); |
546 | if (!master) |
547 | goto fail; |
548 | |
549 | ret = __enable_clocks(iommu); |
550 | if (ret) |
551 | goto fail; |
552 | |
553 | /* Invalidate context TLB */ |
554 | SET_CTX_TLBIALL(iommu->base, master->num, 0); |
555 | SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA); |
556 | |
557 | par = GET_PAR(iommu->base, master->num); |
558 | |
559 | /* We are dealing with a supersection */ |
560 | if (GET_NOFAULT_SS(iommu->base, master->num)) |
561 | ret = (par & 0xFF000000) | (va & 0x00FFFFFF); |
562 | else /* Upper 20 bits from PAR, lower 12 from VA */ |
563 | ret = (par & 0xFFFFF000) | (va & 0x00000FFF); |
564 | |
565 | if (GET_FAULT(iommu->base, master->num)) |
566 | ret = 0; |
567 | |
568 | __disable_clocks(iommu); |
569 | fail: |
570 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
571 | return ret; |
572 | } |
573 | |
574 | static void print_ctx_regs(void __iomem *base, int ctx) |
575 | { |
576 | unsigned int fsr = GET_FSR(base, ctx); |
577 | pr_err("FAR = %08x PAR = %08x\n" , |
578 | GET_FAR(base, ctx), GET_PAR(base, ctx)); |
579 | pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n" , fsr, |
580 | (fsr & 0x02) ? "TF " : "" , |
581 | (fsr & 0x04) ? "AFF " : "" , |
582 | (fsr & 0x08) ? "APF " : "" , |
583 | (fsr & 0x10) ? "TLBMF " : "" , |
584 | (fsr & 0x20) ? "HTWDEEF " : "" , |
585 | (fsr & 0x40) ? "HTWSEEF " : "" , |
586 | (fsr & 0x80) ? "MHF " : "" , |
587 | (fsr & 0x10000) ? "SL " : "" , |
588 | (fsr & 0x40000000) ? "SS " : "" , |
589 | (fsr & 0x80000000) ? "MULTI " : "" ); |
590 | |
591 | pr_err("FSYNR0 = %08x FSYNR1 = %08x\n" , |
592 | GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); |
593 | pr_err("TTBR0 = %08x TTBR1 = %08x\n" , |
594 | GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); |
595 | pr_err("SCTLR = %08x ACTLR = %08x\n" , |
596 | GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); |
597 | } |
598 | |
599 | static int insert_iommu_master(struct device *dev, |
600 | struct msm_iommu_dev **iommu, |
601 | const struct of_phandle_args *spec) |
602 | { |
603 | struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev); |
604 | int sid; |
605 | |
606 | if (list_empty(head: &(*iommu)->ctx_list)) { |
607 | master = kzalloc(size: sizeof(*master), GFP_ATOMIC); |
608 | if (!master) { |
609 | dev_err(dev, "Failed to allocate iommu_master\n" ); |
610 | return -ENOMEM; |
611 | } |
612 | master->of_node = dev->of_node; |
613 | list_add(new: &master->list, head: &(*iommu)->ctx_list); |
614 | dev_iommu_priv_set(dev, priv: master); |
615 | } |
616 | |
617 | for (sid = 0; sid < master->num_mids; sid++) |
618 | if (master->mids[sid] == spec->args[0]) { |
619 | dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n" , |
620 | sid); |
621 | return 0; |
622 | } |
623 | |
624 | master->mids[master->num_mids++] = spec->args[0]; |
625 | return 0; |
626 | } |
627 | |
628 | static int qcom_iommu_of_xlate(struct device *dev, |
629 | const struct of_phandle_args *spec) |
630 | { |
631 | struct msm_iommu_dev *iommu = NULL, *iter; |
632 | unsigned long flags; |
633 | int ret = 0; |
634 | |
635 | spin_lock_irqsave(&msm_iommu_lock, flags); |
636 | list_for_each_entry(iter, &qcom_iommu_devices, dev_node) { |
637 | if (iter->dev->of_node == spec->np) { |
638 | iommu = iter; |
639 | break; |
640 | } |
641 | } |
642 | |
643 | if (!iommu) { |
644 | ret = -ENODEV; |
645 | goto fail; |
646 | } |
647 | |
648 | ret = insert_iommu_master(dev, iommu: &iommu, spec); |
649 | fail: |
650 | spin_unlock_irqrestore(lock: &msm_iommu_lock, flags); |
651 | |
652 | return ret; |
653 | } |
654 | |
655 | irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) |
656 | { |
657 | struct msm_iommu_dev *iommu = dev_id; |
658 | unsigned int fsr; |
659 | int i, ret; |
660 | |
661 | spin_lock(lock: &msm_iommu_lock); |
662 | |
663 | if (!iommu) { |
664 | pr_err("Invalid device ID in context interrupt handler\n" ); |
665 | goto fail; |
666 | } |
667 | |
668 | pr_err("Unexpected IOMMU page fault!\n" ); |
669 | pr_err("base = %08x\n" , (unsigned int)iommu->base); |
670 | |
671 | ret = __enable_clocks(iommu); |
672 | if (ret) |
673 | goto fail; |
674 | |
675 | for (i = 0; i < iommu->ncb; i++) { |
676 | fsr = GET_FSR(iommu->base, i); |
677 | if (fsr) { |
678 | pr_err("Fault occurred in context %d.\n" , i); |
679 | pr_err("Interesting registers:\n" ); |
680 | print_ctx_regs(base: iommu->base, ctx: i); |
681 | SET_FSR(iommu->base, i, 0x4000000F); |
682 | } |
683 | } |
684 | __disable_clocks(iommu); |
685 | fail: |
686 | spin_unlock(lock: &msm_iommu_lock); |
687 | return 0; |
688 | } |
689 | |
690 | static struct iommu_ops msm_iommu_ops = { |
691 | .identity_domain = &msm_iommu_identity_domain, |
692 | .domain_alloc_paging = msm_iommu_domain_alloc_paging, |
693 | .probe_device = msm_iommu_probe_device, |
694 | .device_group = generic_device_group, |
695 | .pgsize_bitmap = MSM_IOMMU_PGSIZES, |
696 | .of_xlate = qcom_iommu_of_xlate, |
697 | .default_domain_ops = &(const struct iommu_domain_ops) { |
698 | .attach_dev = msm_iommu_attach_dev, |
699 | .map_pages = msm_iommu_map, |
700 | .unmap_pages = msm_iommu_unmap, |
701 | /* |
702 | * Nothing is needed here, the barrier to guarantee |
703 | * completion of the tlb sync operation is implicitly |
704 | * taken care when the iommu client does a writel before |
705 | * kick starting the other master. |
706 | */ |
707 | .iotlb_sync = NULL, |
708 | .iotlb_sync_map = msm_iommu_sync_map, |
709 | .iova_to_phys = msm_iommu_iova_to_phys, |
710 | .free = msm_iommu_domain_free, |
711 | } |
712 | }; |
713 | |
714 | static int msm_iommu_probe(struct platform_device *pdev) |
715 | { |
716 | struct resource *r; |
717 | resource_size_t ioaddr; |
718 | struct msm_iommu_dev *iommu; |
719 | int ret, par, val; |
720 | |
721 | iommu = devm_kzalloc(dev: &pdev->dev, size: sizeof(*iommu), GFP_KERNEL); |
722 | if (!iommu) |
723 | return -ENODEV; |
724 | |
725 | iommu->dev = &pdev->dev; |
726 | INIT_LIST_HEAD(list: &iommu->ctx_list); |
727 | |
728 | iommu->pclk = devm_clk_get(dev: iommu->dev, id: "smmu_pclk" ); |
729 | if (IS_ERR(ptr: iommu->pclk)) |
730 | return dev_err_probe(dev: iommu->dev, err: PTR_ERR(ptr: iommu->pclk), |
731 | fmt: "could not get smmu_pclk\n" ); |
732 | |
733 | ret = clk_prepare(clk: iommu->pclk); |
734 | if (ret) |
735 | return dev_err_probe(dev: iommu->dev, err: ret, |
736 | fmt: "could not prepare smmu_pclk\n" ); |
737 | |
738 | iommu->clk = devm_clk_get(dev: iommu->dev, id: "iommu_clk" ); |
739 | if (IS_ERR(ptr: iommu->clk)) { |
740 | clk_unprepare(clk: iommu->pclk); |
741 | return dev_err_probe(dev: iommu->dev, err: PTR_ERR(ptr: iommu->clk), |
742 | fmt: "could not get iommu_clk\n" ); |
743 | } |
744 | |
745 | ret = clk_prepare(clk: iommu->clk); |
746 | if (ret) { |
747 | clk_unprepare(clk: iommu->pclk); |
748 | return dev_err_probe(dev: iommu->dev, err: ret, fmt: "could not prepare iommu_clk\n" ); |
749 | } |
750 | |
751 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
752 | iommu->base = devm_ioremap_resource(dev: iommu->dev, res: r); |
753 | if (IS_ERR(ptr: iommu->base)) { |
754 | ret = dev_err_probe(dev: iommu->dev, err: PTR_ERR(ptr: iommu->base), fmt: "could not get iommu base\n" ); |
755 | goto fail; |
756 | } |
757 | ioaddr = r->start; |
758 | |
759 | iommu->irq = platform_get_irq(pdev, 0); |
760 | if (iommu->irq < 0) { |
761 | ret = -ENODEV; |
762 | goto fail; |
763 | } |
764 | |
765 | ret = of_property_read_u32(np: iommu->dev->of_node, propname: "qcom,ncb" , out_value: &val); |
766 | if (ret) { |
767 | dev_err(iommu->dev, "could not get ncb\n" ); |
768 | goto fail; |
769 | } |
770 | iommu->ncb = val; |
771 | |
772 | msm_iommu_reset(base: iommu->base, ncb: iommu->ncb); |
773 | SET_M(iommu->base, 0, 1); |
774 | SET_PAR(iommu->base, 0, 0); |
775 | SET_V2PCFG(iommu->base, 0, 1); |
776 | SET_V2PPR(iommu->base, 0, 0); |
777 | par = GET_PAR(iommu->base, 0); |
778 | SET_V2PCFG(iommu->base, 0, 0); |
779 | SET_M(iommu->base, 0, 0); |
780 | |
781 | if (!par) { |
782 | pr_err("Invalid PAR value detected\n" ); |
783 | ret = -ENODEV; |
784 | goto fail; |
785 | } |
786 | |
787 | ret = devm_request_threaded_irq(dev: iommu->dev, irq: iommu->irq, NULL, |
788 | thread_fn: msm_iommu_fault_handler, |
789 | IRQF_ONESHOT | IRQF_SHARED, |
790 | devname: "msm_iommu_secure_irpt_handler" , |
791 | dev_id: iommu); |
792 | if (ret) { |
793 | pr_err("Request IRQ %d failed with ret=%d\n" , iommu->irq, ret); |
794 | goto fail; |
795 | } |
796 | |
797 | list_add(new: &iommu->dev_node, head: &qcom_iommu_devices); |
798 | |
799 | ret = iommu_device_sysfs_add(iommu: &iommu->iommu, parent: iommu->dev, NULL, |
800 | fmt: "msm-smmu.%pa" , &ioaddr); |
801 | if (ret) { |
802 | pr_err("Could not add msm-smmu at %pa to sysfs\n" , &ioaddr); |
803 | goto fail; |
804 | } |
805 | |
806 | ret = iommu_device_register(iommu: &iommu->iommu, ops: &msm_iommu_ops, hwdev: &pdev->dev); |
807 | if (ret) { |
808 | pr_err("Could not register msm-smmu at %pa\n" , &ioaddr); |
809 | goto fail; |
810 | } |
811 | |
812 | pr_info("device mapped at %p, irq %d with %d ctx banks\n" , |
813 | iommu->base, iommu->irq, iommu->ncb); |
814 | |
815 | return ret; |
816 | fail: |
817 | clk_unprepare(clk: iommu->clk); |
818 | clk_unprepare(clk: iommu->pclk); |
819 | return ret; |
820 | } |
821 | |
822 | static const struct of_device_id msm_iommu_dt_match[] = { |
823 | { .compatible = "qcom,apq8064-iommu" }, |
824 | {} |
825 | }; |
826 | |
827 | static void msm_iommu_remove(struct platform_device *pdev) |
828 | { |
829 | struct msm_iommu_dev *iommu = platform_get_drvdata(pdev); |
830 | |
831 | clk_unprepare(clk: iommu->clk); |
832 | clk_unprepare(clk: iommu->pclk); |
833 | } |
834 | |
835 | static struct platform_driver msm_iommu_driver = { |
836 | .driver = { |
837 | .name = "msm_iommu" , |
838 | .of_match_table = msm_iommu_dt_match, |
839 | }, |
840 | .probe = msm_iommu_probe, |
841 | .remove_new = msm_iommu_remove, |
842 | }; |
843 | builtin_platform_driver(msm_iommu_driver); |
844 | |