1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/bitops.h> |
7 | #include <linux/debugfs.h> |
8 | #include <linux/err.h> |
9 | #include <linux/iommu.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/of.h> |
12 | #include <linux/of_platform.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/dma-mapping.h> |
18 | |
19 | #include <soc/tegra/ahb.h> |
20 | #include <soc/tegra/mc.h> |
21 | |
22 | struct tegra_smmu_group { |
23 | struct list_head list; |
24 | struct tegra_smmu *smmu; |
25 | const struct tegra_smmu_group_soc *soc; |
26 | struct iommu_group *group; |
27 | unsigned int swgroup; |
28 | }; |
29 | |
30 | struct tegra_smmu { |
31 | void __iomem *regs; |
32 | struct device *dev; |
33 | |
34 | struct tegra_mc *mc; |
35 | const struct tegra_smmu_soc *soc; |
36 | |
37 | struct list_head groups; |
38 | |
39 | unsigned long pfn_mask; |
40 | unsigned long tlb_mask; |
41 | |
42 | unsigned long *asids; |
43 | struct mutex lock; |
44 | |
45 | struct list_head list; |
46 | |
47 | struct dentry *debugfs; |
48 | |
49 | struct iommu_device iommu; /* IOMMU Core code handle */ |
50 | }; |
51 | |
52 | struct tegra_smmu_as { |
53 | struct iommu_domain domain; |
54 | struct tegra_smmu *smmu; |
55 | unsigned int use_count; |
56 | spinlock_t lock; |
57 | u32 *count; |
58 | struct page **pts; |
59 | struct page *pd; |
60 | dma_addr_t pd_dma; |
61 | unsigned id; |
62 | u32 attr; |
63 | }; |
64 | |
65 | static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) |
66 | { |
67 | return container_of(dom, struct tegra_smmu_as, domain); |
68 | } |
69 | |
70 | static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, |
71 | unsigned long offset) |
72 | { |
73 | writel(val: value, addr: smmu->regs + offset); |
74 | } |
75 | |
76 | static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) |
77 | { |
78 | return readl(addr: smmu->regs + offset); |
79 | } |
80 | |
81 | #define SMMU_CONFIG 0x010 |
82 | #define SMMU_CONFIG_ENABLE (1 << 0) |
83 | |
84 | #define SMMU_TLB_CONFIG 0x14 |
85 | #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) |
86 | #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) |
87 | #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ |
88 | ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) |
89 | |
90 | #define SMMU_PTC_CONFIG 0x18 |
91 | #define SMMU_PTC_CONFIG_ENABLE (1 << 29) |
92 | #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) |
93 | #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) |
94 | |
95 | #define SMMU_PTB_ASID 0x01c |
96 | #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) |
97 | |
98 | #define SMMU_PTB_DATA 0x020 |
99 | #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) |
100 | |
101 | #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) |
102 | |
103 | #define SMMU_TLB_FLUSH 0x030 |
104 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) |
105 | #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) |
106 | #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) |
107 | #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ |
108 | SMMU_TLB_FLUSH_VA_MATCH_SECTION) |
109 | #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ |
110 | SMMU_TLB_FLUSH_VA_MATCH_GROUP) |
111 | #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) |
112 | |
113 | #define SMMU_PTC_FLUSH 0x034 |
114 | #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) |
115 | #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) |
116 | |
117 | #define SMMU_PTC_FLUSH_HI 0x9b8 |
118 | #define SMMU_PTC_FLUSH_HI_MASK 0x3 |
119 | |
120 | /* per-SWGROUP SMMU_*_ASID register */ |
121 | #define SMMU_ASID_ENABLE (1 << 31) |
122 | #define SMMU_ASID_MASK 0x7f |
123 | #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) |
124 | |
125 | /* page table definitions */ |
126 | #define SMMU_NUM_PDE 1024 |
127 | #define SMMU_NUM_PTE 1024 |
128 | |
129 | #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) |
130 | #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) |
131 | |
132 | #define SMMU_PDE_SHIFT 22 |
133 | #define SMMU_PTE_SHIFT 12 |
134 | |
135 | #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1)) |
136 | #define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK) |
137 | #define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT) |
138 | #define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT)) |
139 | |
140 | #define SMMU_PD_READABLE (1 << 31) |
141 | #define SMMU_PD_WRITABLE (1 << 30) |
142 | #define SMMU_PD_NONSECURE (1 << 29) |
143 | |
144 | #define SMMU_PDE_READABLE (1 << 31) |
145 | #define SMMU_PDE_WRITABLE (1 << 30) |
146 | #define SMMU_PDE_NONSECURE (1 << 29) |
147 | #define SMMU_PDE_NEXT (1 << 28) |
148 | |
149 | #define SMMU_PTE_READABLE (1 << 31) |
150 | #define SMMU_PTE_WRITABLE (1 << 30) |
151 | #define SMMU_PTE_NONSECURE (1 << 29) |
152 | |
153 | #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ |
154 | SMMU_PDE_NONSECURE) |
155 | |
156 | static unsigned int iova_pd_index(unsigned long iova) |
157 | { |
158 | return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); |
159 | } |
160 | |
161 | static unsigned int iova_pt_index(unsigned long iova) |
162 | { |
163 | return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); |
164 | } |
165 | |
166 | static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) |
167 | { |
168 | addr >>= 12; |
169 | return (addr & smmu->pfn_mask) == addr; |
170 | } |
171 | |
172 | static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) |
173 | { |
174 | return (dma_addr_t)(pde & smmu->pfn_mask) << 12; |
175 | } |
176 | |
177 | static void smmu_flush_ptc_all(struct tegra_smmu *smmu) |
178 | { |
179 | smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); |
180 | } |
181 | |
182 | static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, |
183 | unsigned long offset) |
184 | { |
185 | u32 value; |
186 | |
187 | offset &= ~(smmu->mc->soc->atom_size - 1); |
188 | |
189 | if (smmu->mc->soc->num_address_bits > 32) { |
190 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
191 | value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; |
192 | #else |
193 | value = 0; |
194 | #endif |
195 | smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); |
196 | } |
197 | |
198 | value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; |
199 | smmu_writel(smmu, value, SMMU_PTC_FLUSH); |
200 | } |
201 | |
202 | static inline void smmu_flush_tlb(struct tegra_smmu *smmu) |
203 | { |
204 | smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); |
205 | } |
206 | |
207 | static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, |
208 | unsigned long asid) |
209 | { |
210 | u32 value; |
211 | |
212 | if (smmu->soc->num_asids == 4) |
213 | value = (asid & 0x3) << 29; |
214 | else |
215 | value = (asid & 0x7f) << 24; |
216 | |
217 | value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL; |
218 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
219 | } |
220 | |
221 | static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, |
222 | unsigned long asid, |
223 | unsigned long iova) |
224 | { |
225 | u32 value; |
226 | |
227 | if (smmu->soc->num_asids == 4) |
228 | value = (asid & 0x3) << 29; |
229 | else |
230 | value = (asid & 0x7f) << 24; |
231 | |
232 | value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova); |
233 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
234 | } |
235 | |
236 | static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, |
237 | unsigned long asid, |
238 | unsigned long iova) |
239 | { |
240 | u32 value; |
241 | |
242 | if (smmu->soc->num_asids == 4) |
243 | value = (asid & 0x3) << 29; |
244 | else |
245 | value = (asid & 0x7f) << 24; |
246 | |
247 | value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova); |
248 | smmu_writel(smmu, value, SMMU_TLB_FLUSH); |
249 | } |
250 | |
251 | static inline void smmu_flush(struct tegra_smmu *smmu) |
252 | { |
253 | smmu_readl(smmu, SMMU_PTB_ASID); |
254 | } |
255 | |
256 | static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) |
257 | { |
258 | unsigned long id; |
259 | |
260 | id = find_first_zero_bit(addr: smmu->asids, size: smmu->soc->num_asids); |
261 | if (id >= smmu->soc->num_asids) |
262 | return -ENOSPC; |
263 | |
264 | set_bit(nr: id, addr: smmu->asids); |
265 | *idp = id; |
266 | |
267 | return 0; |
268 | } |
269 | |
270 | static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) |
271 | { |
272 | clear_bit(nr: id, addr: smmu->asids); |
273 | } |
274 | |
275 | static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) |
276 | { |
277 | struct tegra_smmu_as *as; |
278 | |
279 | as = kzalloc(size: sizeof(*as), GFP_KERNEL); |
280 | if (!as) |
281 | return NULL; |
282 | |
283 | as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; |
284 | |
285 | as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); |
286 | if (!as->pd) { |
287 | kfree(objp: as); |
288 | return NULL; |
289 | } |
290 | |
291 | as->count = kcalloc(SMMU_NUM_PDE, size: sizeof(u32), GFP_KERNEL); |
292 | if (!as->count) { |
293 | __free_page(as->pd); |
294 | kfree(objp: as); |
295 | return NULL; |
296 | } |
297 | |
298 | as->pts = kcalloc(SMMU_NUM_PDE, size: sizeof(*as->pts), GFP_KERNEL); |
299 | if (!as->pts) { |
300 | kfree(objp: as->count); |
301 | __free_page(as->pd); |
302 | kfree(objp: as); |
303 | return NULL; |
304 | } |
305 | |
306 | spin_lock_init(&as->lock); |
307 | |
308 | /* setup aperture */ |
309 | as->domain.geometry.aperture_start = 0; |
310 | as->domain.geometry.aperture_end = 0xffffffff; |
311 | as->domain.geometry.force_aperture = true; |
312 | |
313 | return &as->domain; |
314 | } |
315 | |
316 | static void tegra_smmu_domain_free(struct iommu_domain *domain) |
317 | { |
318 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
319 | |
320 | /* TODO: free page directory and page tables */ |
321 | |
322 | WARN_ON_ONCE(as->use_count); |
323 | kfree(objp: as->count); |
324 | kfree(objp: as->pts); |
325 | kfree(objp: as); |
326 | } |
327 | |
328 | static const struct tegra_smmu_swgroup * |
329 | tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) |
330 | { |
331 | const struct tegra_smmu_swgroup *group = NULL; |
332 | unsigned int i; |
333 | |
334 | for (i = 0; i < smmu->soc->num_swgroups; i++) { |
335 | if (smmu->soc->swgroups[i].swgroup == swgroup) { |
336 | group = &smmu->soc->swgroups[i]; |
337 | break; |
338 | } |
339 | } |
340 | |
341 | return group; |
342 | } |
343 | |
344 | static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, |
345 | unsigned int asid) |
346 | { |
347 | const struct tegra_smmu_swgroup *group; |
348 | unsigned int i; |
349 | u32 value; |
350 | |
351 | group = tegra_smmu_find_swgroup(smmu, swgroup); |
352 | if (group) { |
353 | value = smmu_readl(smmu, offset: group->reg); |
354 | value &= ~SMMU_ASID_MASK; |
355 | value |= SMMU_ASID_VALUE(asid); |
356 | value |= SMMU_ASID_ENABLE; |
357 | smmu_writel(smmu, value, offset: group->reg); |
358 | } else { |
359 | pr_warn("%s group from swgroup %u not found\n" , __func__, |
360 | swgroup); |
361 | /* No point moving ahead if group was not found */ |
362 | return; |
363 | } |
364 | |
365 | for (i = 0; i < smmu->soc->num_clients; i++) { |
366 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; |
367 | |
368 | if (client->swgroup != swgroup) |
369 | continue; |
370 | |
371 | value = smmu_readl(smmu, offset: client->regs.smmu.reg); |
372 | value |= BIT(client->regs.smmu.bit); |
373 | smmu_writel(smmu, value, offset: client->regs.smmu.reg); |
374 | } |
375 | } |
376 | |
377 | static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, |
378 | unsigned int asid) |
379 | { |
380 | const struct tegra_smmu_swgroup *group; |
381 | unsigned int i; |
382 | u32 value; |
383 | |
384 | group = tegra_smmu_find_swgroup(smmu, swgroup); |
385 | if (group) { |
386 | value = smmu_readl(smmu, offset: group->reg); |
387 | value &= ~SMMU_ASID_MASK; |
388 | value |= SMMU_ASID_VALUE(asid); |
389 | value &= ~SMMU_ASID_ENABLE; |
390 | smmu_writel(smmu, value, offset: group->reg); |
391 | } |
392 | |
393 | for (i = 0; i < smmu->soc->num_clients; i++) { |
394 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; |
395 | |
396 | if (client->swgroup != swgroup) |
397 | continue; |
398 | |
399 | value = smmu_readl(smmu, offset: client->regs.smmu.reg); |
400 | value &= ~BIT(client->regs.smmu.bit); |
401 | smmu_writel(smmu, value, offset: client->regs.smmu.reg); |
402 | } |
403 | } |
404 | |
405 | static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, |
406 | struct tegra_smmu_as *as) |
407 | { |
408 | u32 value; |
409 | int err = 0; |
410 | |
411 | mutex_lock(&smmu->lock); |
412 | |
413 | if (as->use_count > 0) { |
414 | as->use_count++; |
415 | goto unlock; |
416 | } |
417 | |
418 | as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, |
419 | DMA_TO_DEVICE); |
420 | if (dma_mapping_error(dev: smmu->dev, dma_addr: as->pd_dma)) { |
421 | err = -ENOMEM; |
422 | goto unlock; |
423 | } |
424 | |
425 | /* We can't handle 64-bit DMA addresses */ |
426 | if (!smmu_dma_addr_valid(smmu, addr: as->pd_dma)) { |
427 | err = -ENOMEM; |
428 | goto err_unmap; |
429 | } |
430 | |
431 | err = tegra_smmu_alloc_asid(smmu, idp: &as->id); |
432 | if (err < 0) |
433 | goto err_unmap; |
434 | |
435 | smmu_flush_ptc(smmu, dma: as->pd_dma, offset: 0); |
436 | smmu_flush_tlb_asid(smmu, asid: as->id); |
437 | |
438 | smmu_writel(smmu, value: as->id & 0x7f, SMMU_PTB_ASID); |
439 | value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); |
440 | smmu_writel(smmu, value, SMMU_PTB_DATA); |
441 | smmu_flush(smmu); |
442 | |
443 | as->smmu = smmu; |
444 | as->use_count++; |
445 | |
446 | mutex_unlock(lock: &smmu->lock); |
447 | |
448 | return 0; |
449 | |
450 | err_unmap: |
451 | dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); |
452 | unlock: |
453 | mutex_unlock(lock: &smmu->lock); |
454 | |
455 | return err; |
456 | } |
457 | |
458 | static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, |
459 | struct tegra_smmu_as *as) |
460 | { |
461 | mutex_lock(&smmu->lock); |
462 | |
463 | if (--as->use_count > 0) { |
464 | mutex_unlock(lock: &smmu->lock); |
465 | return; |
466 | } |
467 | |
468 | tegra_smmu_free_asid(smmu, id: as->id); |
469 | |
470 | dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); |
471 | |
472 | as->smmu = NULL; |
473 | |
474 | mutex_unlock(lock: &smmu->lock); |
475 | } |
476 | |
477 | static int tegra_smmu_attach_dev(struct iommu_domain *domain, |
478 | struct device *dev) |
479 | { |
480 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
481 | struct tegra_smmu *smmu = dev_iommu_priv_get(dev); |
482 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
483 | unsigned int index; |
484 | int err; |
485 | |
486 | if (!fwspec) |
487 | return -ENOENT; |
488 | |
489 | for (index = 0; index < fwspec->num_ids; index++) { |
490 | err = tegra_smmu_as_prepare(smmu, as); |
491 | if (err) |
492 | goto disable; |
493 | |
494 | tegra_smmu_enable(smmu, swgroup: fwspec->ids[index], asid: as->id); |
495 | } |
496 | |
497 | if (index == 0) |
498 | return -ENODEV; |
499 | |
500 | return 0; |
501 | |
502 | disable: |
503 | while (index--) { |
504 | tegra_smmu_disable(smmu, swgroup: fwspec->ids[index], asid: as->id); |
505 | tegra_smmu_as_unprepare(smmu, as); |
506 | } |
507 | |
508 | return err; |
509 | } |
510 | |
511 | static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain, |
512 | struct device *dev) |
513 | { |
514 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
515 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
516 | struct tegra_smmu_as *as; |
517 | struct tegra_smmu *smmu; |
518 | unsigned int index; |
519 | |
520 | if (!fwspec) |
521 | return -ENODEV; |
522 | |
523 | if (domain == identity_domain || !domain) |
524 | return 0; |
525 | |
526 | as = to_smmu_as(dom: domain); |
527 | smmu = as->smmu; |
528 | for (index = 0; index < fwspec->num_ids; index++) { |
529 | tegra_smmu_disable(smmu, swgroup: fwspec->ids[index], asid: as->id); |
530 | tegra_smmu_as_unprepare(smmu, as); |
531 | } |
532 | return 0; |
533 | } |
534 | |
535 | static struct iommu_domain_ops tegra_smmu_identity_ops = { |
536 | .attach_dev = tegra_smmu_identity_attach, |
537 | }; |
538 | |
539 | static struct iommu_domain tegra_smmu_identity_domain = { |
540 | .type = IOMMU_DOMAIN_IDENTITY, |
541 | .ops = &tegra_smmu_identity_ops, |
542 | }; |
543 | |
544 | static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, |
545 | u32 value) |
546 | { |
547 | unsigned int pd_index = iova_pd_index(iova); |
548 | struct tegra_smmu *smmu = as->smmu; |
549 | u32 *pd = page_address(as->pd); |
550 | unsigned long offset = pd_index * sizeof(*pd); |
551 | |
552 | /* Set the page directory entry first */ |
553 | pd[pd_index] = value; |
554 | |
555 | /* The flush the page directory entry from caches */ |
556 | dma_sync_single_range_for_device(dev: smmu->dev, addr: as->pd_dma, offset, |
557 | size: sizeof(*pd), dir: DMA_TO_DEVICE); |
558 | |
559 | /* And flush the iommu */ |
560 | smmu_flush_ptc(smmu, dma: as->pd_dma, offset); |
561 | smmu_flush_tlb_section(smmu, asid: as->id, iova); |
562 | smmu_flush(smmu); |
563 | } |
564 | |
565 | static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) |
566 | { |
567 | u32 *pt = page_address(pt_page); |
568 | |
569 | return pt + iova_pt_index(iova); |
570 | } |
571 | |
572 | static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, |
573 | dma_addr_t *dmap) |
574 | { |
575 | unsigned int pd_index = iova_pd_index(iova); |
576 | struct tegra_smmu *smmu = as->smmu; |
577 | struct page *pt_page; |
578 | u32 *pd; |
579 | |
580 | pt_page = as->pts[pd_index]; |
581 | if (!pt_page) |
582 | return NULL; |
583 | |
584 | pd = page_address(as->pd); |
585 | *dmap = smmu_pde_to_dma(smmu, pde: pd[pd_index]); |
586 | |
587 | return tegra_smmu_pte_offset(pt_page, iova); |
588 | } |
589 | |
590 | static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, |
591 | dma_addr_t *dmap, struct page *page) |
592 | { |
593 | unsigned int pde = iova_pd_index(iova); |
594 | struct tegra_smmu *smmu = as->smmu; |
595 | |
596 | if (!as->pts[pde]) { |
597 | dma_addr_t dma; |
598 | |
599 | dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, |
600 | DMA_TO_DEVICE); |
601 | if (dma_mapping_error(dev: smmu->dev, dma_addr: dma)) { |
602 | __free_page(page); |
603 | return NULL; |
604 | } |
605 | |
606 | if (!smmu_dma_addr_valid(smmu, addr: dma)) { |
607 | dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, |
608 | DMA_TO_DEVICE); |
609 | __free_page(page); |
610 | return NULL; |
611 | } |
612 | |
613 | as->pts[pde] = page; |
614 | |
615 | tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | |
616 | SMMU_PDE_NEXT)); |
617 | |
618 | *dmap = dma; |
619 | } else { |
620 | u32 *pd = page_address(as->pd); |
621 | |
622 | *dmap = smmu_pde_to_dma(smmu, pde: pd[pde]); |
623 | } |
624 | |
625 | return tegra_smmu_pte_offset(pt_page: as->pts[pde], iova); |
626 | } |
627 | |
628 | static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) |
629 | { |
630 | unsigned int pd_index = iova_pd_index(iova); |
631 | |
632 | as->count[pd_index]++; |
633 | } |
634 | |
635 | static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) |
636 | { |
637 | unsigned int pde = iova_pd_index(iova); |
638 | struct page *page = as->pts[pde]; |
639 | |
640 | /* |
641 | * When no entries in this page table are used anymore, return the |
642 | * memory page to the system. |
643 | */ |
644 | if (--as->count[pde] == 0) { |
645 | struct tegra_smmu *smmu = as->smmu; |
646 | u32 *pd = page_address(as->pd); |
647 | dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pde: pd[pde]); |
648 | |
649 | tegra_smmu_set_pde(as, iova, value: 0); |
650 | |
651 | dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); |
652 | __free_page(page); |
653 | as->pts[pde] = NULL; |
654 | } |
655 | } |
656 | |
657 | static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, |
658 | u32 *pte, dma_addr_t pte_dma, u32 val) |
659 | { |
660 | struct tegra_smmu *smmu = as->smmu; |
661 | unsigned long offset = SMMU_OFFSET_IN_PAGE(pte); |
662 | |
663 | *pte = val; |
664 | |
665 | dma_sync_single_range_for_device(dev: smmu->dev, addr: pte_dma, offset, |
666 | size: 4, dir: DMA_TO_DEVICE); |
667 | smmu_flush_ptc(smmu, dma: pte_dma, offset); |
668 | smmu_flush_tlb_group(smmu, asid: as->id, iova); |
669 | smmu_flush(smmu); |
670 | } |
671 | |
672 | static struct page *as_get_pde_page(struct tegra_smmu_as *as, |
673 | unsigned long iova, gfp_t gfp, |
674 | unsigned long *flags) |
675 | { |
676 | unsigned int pde = iova_pd_index(iova); |
677 | struct page *page = as->pts[pde]; |
678 | |
679 | /* at first check whether allocation needs to be done at all */ |
680 | if (page) |
681 | return page; |
682 | |
683 | /* |
684 | * In order to prevent exhaustion of the atomic memory pool, we |
685 | * allocate page in a sleeping context if GFP flags permit. Hence |
686 | * spinlock needs to be unlocked and re-locked after allocation. |
687 | */ |
688 | if (gfpflags_allow_blocking(gfp_flags: gfp)) |
689 | spin_unlock_irqrestore(lock: &as->lock, flags: *flags); |
690 | |
691 | page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO); |
692 | |
693 | if (gfpflags_allow_blocking(gfp_flags: gfp)) |
694 | spin_lock_irqsave(&as->lock, *flags); |
695 | |
696 | /* |
697 | * In a case of blocking allocation, a concurrent mapping may win |
698 | * the PDE allocation. In this case the allocated page isn't needed |
699 | * if allocation succeeded and the allocation failure isn't fatal. |
700 | */ |
701 | if (as->pts[pde]) { |
702 | if (page) |
703 | __free_page(page); |
704 | |
705 | page = as->pts[pde]; |
706 | } |
707 | |
708 | return page; |
709 | } |
710 | |
711 | static int |
712 | __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, |
713 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp, |
714 | unsigned long *flags) |
715 | { |
716 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
717 | dma_addr_t pte_dma; |
718 | struct page *page; |
719 | u32 pte_attrs; |
720 | u32 *pte; |
721 | |
722 | page = as_get_pde_page(as, iova, gfp, flags); |
723 | if (!page) |
724 | return -ENOMEM; |
725 | |
726 | pte = as_get_pte(as, iova, dmap: &pte_dma, page); |
727 | if (!pte) |
728 | return -ENOMEM; |
729 | |
730 | /* If we aren't overwriting a pre-existing entry, increment use */ |
731 | if (*pte == 0) |
732 | tegra_smmu_pte_get_use(as, iova); |
733 | |
734 | pte_attrs = SMMU_PTE_NONSECURE; |
735 | |
736 | if (prot & IOMMU_READ) |
737 | pte_attrs |= SMMU_PTE_READABLE; |
738 | |
739 | if (prot & IOMMU_WRITE) |
740 | pte_attrs |= SMMU_PTE_WRITABLE; |
741 | |
742 | tegra_smmu_set_pte(as, iova, pte, pte_dma, |
743 | SMMU_PHYS_PFN(paddr) | pte_attrs); |
744 | |
745 | return 0; |
746 | } |
747 | |
748 | static size_t |
749 | __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
750 | size_t size, struct iommu_iotlb_gather *gather) |
751 | { |
752 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
753 | dma_addr_t pte_dma; |
754 | u32 *pte; |
755 | |
756 | pte = tegra_smmu_pte_lookup(as, iova, dmap: &pte_dma); |
757 | if (!pte || !*pte) |
758 | return 0; |
759 | |
760 | tegra_smmu_set_pte(as, iova, pte, pte_dma, val: 0); |
761 | tegra_smmu_pte_put_use(as, iova); |
762 | |
763 | return size; |
764 | } |
765 | |
766 | static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, |
767 | phys_addr_t paddr, size_t size, size_t count, |
768 | int prot, gfp_t gfp, size_t *mapped) |
769 | { |
770 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
771 | unsigned long flags; |
772 | int ret; |
773 | |
774 | spin_lock_irqsave(&as->lock, flags); |
775 | ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, flags: &flags); |
776 | spin_unlock_irqrestore(lock: &as->lock, flags); |
777 | |
778 | if (!ret) |
779 | *mapped = size; |
780 | |
781 | return ret; |
782 | } |
783 | |
784 | static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
785 | size_t size, size_t count, struct iommu_iotlb_gather *gather) |
786 | { |
787 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
788 | unsigned long flags; |
789 | |
790 | spin_lock_irqsave(&as->lock, flags); |
791 | size = __tegra_smmu_unmap(domain, iova, size, gather); |
792 | spin_unlock_irqrestore(lock: &as->lock, flags); |
793 | |
794 | return size; |
795 | } |
796 | |
797 | static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, |
798 | dma_addr_t iova) |
799 | { |
800 | struct tegra_smmu_as *as = to_smmu_as(dom: domain); |
801 | unsigned long pfn; |
802 | dma_addr_t pte_dma; |
803 | u32 *pte; |
804 | |
805 | pte = tegra_smmu_pte_lookup(as, iova, dmap: &pte_dma); |
806 | if (!pte || !*pte) |
807 | return 0; |
808 | |
809 | pfn = *pte & as->smmu->pfn_mask; |
810 | |
811 | return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova); |
812 | } |
813 | |
814 | static struct tegra_smmu *tegra_smmu_find(struct device_node *np) |
815 | { |
816 | struct platform_device *pdev; |
817 | struct tegra_mc *mc; |
818 | |
819 | pdev = of_find_device_by_node(np); |
820 | if (!pdev) |
821 | return NULL; |
822 | |
823 | mc = platform_get_drvdata(pdev); |
824 | if (!mc) { |
825 | put_device(dev: &pdev->dev); |
826 | return NULL; |
827 | } |
828 | |
829 | return mc->smmu; |
830 | } |
831 | |
832 | static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, |
833 | const struct of_phandle_args *args) |
834 | { |
835 | const struct iommu_ops *ops = smmu->iommu.ops; |
836 | int err; |
837 | |
838 | err = iommu_fwspec_init(dev, iommu_fwnode: &dev->of_node->fwnode, ops); |
839 | if (err < 0) { |
840 | dev_err(dev, "failed to initialize fwspec: %d\n" , err); |
841 | return err; |
842 | } |
843 | |
844 | err = ops->of_xlate(dev, args); |
845 | if (err < 0) { |
846 | dev_err(dev, "failed to parse SW group ID: %d\n" , err); |
847 | iommu_fwspec_free(dev); |
848 | return err; |
849 | } |
850 | |
851 | return 0; |
852 | } |
853 | |
854 | static struct iommu_device *tegra_smmu_probe_device(struct device *dev) |
855 | { |
856 | struct device_node *np = dev->of_node; |
857 | struct tegra_smmu *smmu = NULL; |
858 | struct of_phandle_args args; |
859 | unsigned int index = 0; |
860 | int err; |
861 | |
862 | while (of_parse_phandle_with_args(np, list_name: "iommus" , cells_name: "#iommu-cells" , index, |
863 | out_args: &args) == 0) { |
864 | smmu = tegra_smmu_find(np: args.np); |
865 | if (smmu) { |
866 | err = tegra_smmu_configure(smmu, dev, args: &args); |
867 | |
868 | if (err < 0) { |
869 | of_node_put(node: args.np); |
870 | return ERR_PTR(error: err); |
871 | } |
872 | } |
873 | |
874 | of_node_put(node: args.np); |
875 | index++; |
876 | } |
877 | |
878 | smmu = dev_iommu_priv_get(dev); |
879 | if (!smmu) |
880 | return ERR_PTR(error: -ENODEV); |
881 | |
882 | return &smmu->iommu; |
883 | } |
884 | |
885 | static const struct tegra_smmu_group_soc * |
886 | tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) |
887 | { |
888 | unsigned int i, j; |
889 | |
890 | for (i = 0; i < smmu->soc->num_groups; i++) |
891 | for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) |
892 | if (smmu->soc->groups[i].swgroups[j] == swgroup) |
893 | return &smmu->soc->groups[i]; |
894 | |
895 | return NULL; |
896 | } |
897 | |
898 | static void tegra_smmu_group_release(void *iommu_data) |
899 | { |
900 | struct tegra_smmu_group *group = iommu_data; |
901 | struct tegra_smmu *smmu = group->smmu; |
902 | |
903 | mutex_lock(&smmu->lock); |
904 | list_del(entry: &group->list); |
905 | mutex_unlock(lock: &smmu->lock); |
906 | } |
907 | |
908 | static struct iommu_group *tegra_smmu_device_group(struct device *dev) |
909 | { |
910 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
911 | struct tegra_smmu *smmu = dev_iommu_priv_get(dev); |
912 | const struct tegra_smmu_group_soc *soc; |
913 | unsigned int swgroup = fwspec->ids[0]; |
914 | struct tegra_smmu_group *group; |
915 | struct iommu_group *grp; |
916 | |
917 | /* Find group_soc associating with swgroup */ |
918 | soc = tegra_smmu_find_group(smmu, swgroup); |
919 | |
920 | mutex_lock(&smmu->lock); |
921 | |
922 | /* Find existing iommu_group associating with swgroup or group_soc */ |
923 | list_for_each_entry(group, &smmu->groups, list) |
924 | if ((group->swgroup == swgroup) || (soc && group->soc == soc)) { |
925 | grp = iommu_group_ref_get(group: group->group); |
926 | mutex_unlock(lock: &smmu->lock); |
927 | return grp; |
928 | } |
929 | |
930 | group = devm_kzalloc(dev: smmu->dev, size: sizeof(*group), GFP_KERNEL); |
931 | if (!group) { |
932 | mutex_unlock(lock: &smmu->lock); |
933 | return NULL; |
934 | } |
935 | |
936 | INIT_LIST_HEAD(list: &group->list); |
937 | group->swgroup = swgroup; |
938 | group->smmu = smmu; |
939 | group->soc = soc; |
940 | |
941 | if (dev_is_pci(dev)) |
942 | group->group = pci_device_group(dev); |
943 | else |
944 | group->group = generic_device_group(dev); |
945 | |
946 | if (IS_ERR(ptr: group->group)) { |
947 | devm_kfree(dev: smmu->dev, p: group); |
948 | mutex_unlock(lock: &smmu->lock); |
949 | return NULL; |
950 | } |
951 | |
952 | iommu_group_set_iommudata(group: group->group, iommu_data: group, release: tegra_smmu_group_release); |
953 | if (soc) |
954 | iommu_group_set_name(group: group->group, name: soc->name); |
955 | list_add_tail(new: &group->list, head: &smmu->groups); |
956 | mutex_unlock(lock: &smmu->lock); |
957 | |
958 | return group->group; |
959 | } |
960 | |
961 | static int tegra_smmu_of_xlate(struct device *dev, |
962 | const struct of_phandle_args *args) |
963 | { |
964 | struct platform_device *iommu_pdev = of_find_device_by_node(np: args->np); |
965 | struct tegra_mc *mc = platform_get_drvdata(pdev: iommu_pdev); |
966 | u32 id = args->args[0]; |
967 | |
968 | /* |
969 | * Note: we are here releasing the reference of &iommu_pdev->dev, which |
970 | * is mc->dev. Although some functions in tegra_smmu_ops may keep using |
971 | * its private data beyond this point, it's still safe to do so because |
972 | * the SMMU parent device is the same as the MC, so the reference count |
973 | * isn't strictly necessary. |
974 | */ |
975 | put_device(dev: &iommu_pdev->dev); |
976 | |
977 | dev_iommu_priv_set(dev, priv: mc->smmu); |
978 | |
979 | return iommu_fwspec_add_ids(dev, ids: &id, num_ids: 1); |
980 | } |
981 | |
982 | static int tegra_smmu_def_domain_type(struct device *dev) |
983 | { |
984 | /* |
985 | * FIXME: For now we want to run all translation in IDENTITY mode, due |
986 | * to some device quirks. Better would be to just quirk the troubled |
987 | * devices. |
988 | */ |
989 | return IOMMU_DOMAIN_IDENTITY; |
990 | } |
991 | |
992 | static const struct iommu_ops tegra_smmu_ops = { |
993 | .identity_domain = &tegra_smmu_identity_domain, |
994 | .def_domain_type = &tegra_smmu_def_domain_type, |
995 | .domain_alloc_paging = tegra_smmu_domain_alloc_paging, |
996 | .probe_device = tegra_smmu_probe_device, |
997 | .device_group = tegra_smmu_device_group, |
998 | .of_xlate = tegra_smmu_of_xlate, |
999 | .pgsize_bitmap = SZ_4K, |
1000 | .default_domain_ops = &(const struct iommu_domain_ops) { |
1001 | .attach_dev = tegra_smmu_attach_dev, |
1002 | .map_pages = tegra_smmu_map, |
1003 | .unmap_pages = tegra_smmu_unmap, |
1004 | .iova_to_phys = tegra_smmu_iova_to_phys, |
1005 | .free = tegra_smmu_domain_free, |
1006 | } |
1007 | }; |
1008 | |
1009 | static void tegra_smmu_ahb_enable(void) |
1010 | { |
1011 | static const struct of_device_id ahb_match[] = { |
1012 | { .compatible = "nvidia,tegra30-ahb" , }, |
1013 | { } |
1014 | }; |
1015 | struct device_node *ahb; |
1016 | |
1017 | ahb = of_find_matching_node(NULL, matches: ahb_match); |
1018 | if (ahb) { |
1019 | tegra_ahb_enable_smmu(ahb); |
1020 | of_node_put(node: ahb); |
1021 | } |
1022 | } |
1023 | |
1024 | static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) |
1025 | { |
1026 | struct tegra_smmu *smmu = s->private; |
1027 | unsigned int i; |
1028 | u32 value; |
1029 | |
1030 | seq_printf(m: s, fmt: "swgroup enabled ASID\n" ); |
1031 | seq_printf(m: s, fmt: "------------------------\n" ); |
1032 | |
1033 | for (i = 0; i < smmu->soc->num_swgroups; i++) { |
1034 | const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; |
1035 | const char *status; |
1036 | unsigned int asid; |
1037 | |
1038 | value = smmu_readl(smmu, offset: group->reg); |
1039 | |
1040 | if (value & SMMU_ASID_ENABLE) |
1041 | status = "yes" ; |
1042 | else |
1043 | status = "no" ; |
1044 | |
1045 | asid = value & SMMU_ASID_MASK; |
1046 | |
1047 | seq_printf(m: s, fmt: "%-9s %-7s %#04x\n" , group->name, status, |
1048 | asid); |
1049 | } |
1050 | |
1051 | return 0; |
1052 | } |
1053 | |
1054 | DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups); |
1055 | |
1056 | static int tegra_smmu_clients_show(struct seq_file *s, void *data) |
1057 | { |
1058 | struct tegra_smmu *smmu = s->private; |
1059 | unsigned int i; |
1060 | u32 value; |
1061 | |
1062 | seq_printf(m: s, fmt: "client enabled\n" ); |
1063 | seq_printf(m: s, fmt: "--------------------\n" ); |
1064 | |
1065 | for (i = 0; i < smmu->soc->num_clients; i++) { |
1066 | const struct tegra_mc_client *client = &smmu->soc->clients[i]; |
1067 | const char *status; |
1068 | |
1069 | value = smmu_readl(smmu, offset: client->regs.smmu.reg); |
1070 | |
1071 | if (value & BIT(client->regs.smmu.bit)) |
1072 | status = "yes" ; |
1073 | else |
1074 | status = "no" ; |
1075 | |
1076 | seq_printf(m: s, fmt: "%-12s %s\n" , client->name, status); |
1077 | } |
1078 | |
1079 | return 0; |
1080 | } |
1081 | |
1082 | DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients); |
1083 | |
1084 | static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) |
1085 | { |
1086 | smmu->debugfs = debugfs_create_dir(name: "smmu" , NULL); |
1087 | |
1088 | debugfs_create_file(name: "swgroups" , S_IRUGO, parent: smmu->debugfs, data: smmu, |
1089 | fops: &tegra_smmu_swgroups_fops); |
1090 | debugfs_create_file(name: "clients" , S_IRUGO, parent: smmu->debugfs, data: smmu, |
1091 | fops: &tegra_smmu_clients_fops); |
1092 | } |
1093 | |
1094 | static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) |
1095 | { |
1096 | debugfs_remove_recursive(dentry: smmu->debugfs); |
1097 | } |
1098 | |
1099 | struct tegra_smmu *tegra_smmu_probe(struct device *dev, |
1100 | const struct tegra_smmu_soc *soc, |
1101 | struct tegra_mc *mc) |
1102 | { |
1103 | struct tegra_smmu *smmu; |
1104 | u32 value; |
1105 | int err; |
1106 | |
1107 | smmu = devm_kzalloc(dev, size: sizeof(*smmu), GFP_KERNEL); |
1108 | if (!smmu) |
1109 | return ERR_PTR(error: -ENOMEM); |
1110 | |
1111 | /* |
1112 | * This is a bit of a hack. Ideally we'd want to simply return this |
1113 | * value. However iommu_device_register() will attempt to add |
1114 | * all devices to the IOMMU before we get that far. In order |
1115 | * not to rely on global variables to track the IOMMU instance, we |
1116 | * set it here so that it can be looked up from the .probe_device() |
1117 | * callback via the IOMMU device's .drvdata field. |
1118 | */ |
1119 | mc->smmu = smmu; |
1120 | |
1121 | smmu->asids = devm_bitmap_zalloc(dev, nbits: soc->num_asids, GFP_KERNEL); |
1122 | if (!smmu->asids) |
1123 | return ERR_PTR(error: -ENOMEM); |
1124 | |
1125 | INIT_LIST_HEAD(list: &smmu->groups); |
1126 | mutex_init(&smmu->lock); |
1127 | |
1128 | smmu->regs = mc->regs; |
1129 | smmu->soc = soc; |
1130 | smmu->dev = dev; |
1131 | smmu->mc = mc; |
1132 | |
1133 | smmu->pfn_mask = |
1134 | BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1; |
1135 | dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n" , |
1136 | mc->soc->num_address_bits, smmu->pfn_mask); |
1137 | smmu->tlb_mask = (1 << fls(x: smmu->soc->num_tlb_lines)) - 1; |
1138 | dev_dbg(dev, "TLB lines: %u, mask: %#lx\n" , smmu->soc->num_tlb_lines, |
1139 | smmu->tlb_mask); |
1140 | |
1141 | value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); |
1142 | |
1143 | if (soc->supports_request_limit) |
1144 | value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); |
1145 | |
1146 | smmu_writel(smmu, value, SMMU_PTC_CONFIG); |
1147 | |
1148 | value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | |
1149 | SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); |
1150 | |
1151 | if (soc->supports_round_robin_arbitration) |
1152 | value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; |
1153 | |
1154 | smmu_writel(smmu, value, SMMU_TLB_CONFIG); |
1155 | |
1156 | smmu_flush_ptc_all(smmu); |
1157 | smmu_flush_tlb(smmu); |
1158 | smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); |
1159 | smmu_flush(smmu); |
1160 | |
1161 | tegra_smmu_ahb_enable(); |
1162 | |
1163 | err = iommu_device_sysfs_add(iommu: &smmu->iommu, parent: dev, NULL, fmt: dev_name(dev)); |
1164 | if (err) |
1165 | return ERR_PTR(error: err); |
1166 | |
1167 | err = iommu_device_register(iommu: &smmu->iommu, ops: &tegra_smmu_ops, hwdev: dev); |
1168 | if (err) { |
1169 | iommu_device_sysfs_remove(iommu: &smmu->iommu); |
1170 | return ERR_PTR(error: err); |
1171 | } |
1172 | |
1173 | if (IS_ENABLED(CONFIG_DEBUG_FS)) |
1174 | tegra_smmu_debugfs_init(smmu); |
1175 | |
1176 | return smmu; |
1177 | } |
1178 | |
1179 | void tegra_smmu_remove(struct tegra_smmu *smmu) |
1180 | { |
1181 | iommu_device_unregister(iommu: &smmu->iommu); |
1182 | iommu_device_sysfs_remove(iommu: &smmu->iommu); |
1183 | |
1184 | if (IS_ENABLED(CONFIG_DEBUG_FS)) |
1185 | tegra_smmu_debugfs_exit(smmu); |
1186 | } |
1187 | |