1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Tegra host1x driver |
4 | * |
5 | * Copyright (c) 2010-2013, NVIDIA Corporation. |
6 | */ |
7 | |
8 | #include <linux/clk.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/io.h> |
12 | #include <linux/list.h> |
13 | #include <linux/module.h> |
14 | #include <linux/of.h> |
15 | #include <linux/of_platform.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/pm_runtime.h> |
18 | #include <linux/slab.h> |
19 | |
20 | #include <soc/tegra/common.h> |
21 | |
22 | #define CREATE_TRACE_POINTS |
23 | #include <trace/events/host1x.h> |
24 | #undef CREATE_TRACE_POINTS |
25 | |
26 | #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
27 | #include <asm/dma-iommu.h> |
28 | #endif |
29 | |
30 | #include "bus.h" |
31 | #include "channel.h" |
32 | #include "context.h" |
33 | #include "debug.h" |
34 | #include "dev.h" |
35 | #include "intr.h" |
36 | |
37 | #include "hw/host1x01.h" |
38 | #include "hw/host1x02.h" |
39 | #include "hw/host1x04.h" |
40 | #include "hw/host1x05.h" |
41 | #include "hw/host1x06.h" |
42 | #include "hw/host1x07.h" |
43 | #include "hw/host1x08.h" |
44 | |
45 | void host1x_common_writel(struct host1x *host1x, u32 v, u32 r) |
46 | { |
47 | writel(val: v, addr: host1x->common_regs + r); |
48 | } |
49 | |
50 | void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r) |
51 | { |
52 | writel(val: v, addr: host1x->hv_regs + r); |
53 | } |
54 | |
55 | u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r) |
56 | { |
57 | return readl(addr: host1x->hv_regs + r); |
58 | } |
59 | |
60 | void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) |
61 | { |
62 | void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; |
63 | |
64 | writel(val: v, addr: sync_regs + r); |
65 | } |
66 | |
67 | u32 host1x_sync_readl(struct host1x *host1x, u32 r) |
68 | { |
69 | void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; |
70 | |
71 | return readl(addr: sync_regs + r); |
72 | } |
73 | |
74 | void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r) |
75 | { |
76 | writel(val: v, addr: ch->regs + r); |
77 | } |
78 | |
79 | u32 host1x_ch_readl(struct host1x_channel *ch, u32 r) |
80 | { |
81 | return readl(addr: ch->regs + r); |
82 | } |
83 | |
84 | static const struct host1x_info host1x01_info = { |
85 | .nb_channels = 8, |
86 | .nb_pts = 32, |
87 | .nb_mlocks = 16, |
88 | .nb_bases = 8, |
89 | .init = host1x01_init, |
90 | .sync_offset = 0x3000, |
91 | .dma_mask = DMA_BIT_MASK(32), |
92 | .has_wide_gather = false, |
93 | .has_hypervisor = false, |
94 | .num_sid_entries = 0, |
95 | .sid_table = NULL, |
96 | .reserve_vblank_syncpts = true, |
97 | }; |
98 | |
99 | static const struct host1x_info host1x02_info = { |
100 | .nb_channels = 9, |
101 | .nb_pts = 32, |
102 | .nb_mlocks = 16, |
103 | .nb_bases = 12, |
104 | .init = host1x02_init, |
105 | .sync_offset = 0x3000, |
106 | .dma_mask = DMA_BIT_MASK(32), |
107 | .has_wide_gather = false, |
108 | .has_hypervisor = false, |
109 | .num_sid_entries = 0, |
110 | .sid_table = NULL, |
111 | .reserve_vblank_syncpts = true, |
112 | }; |
113 | |
114 | static const struct host1x_info host1x04_info = { |
115 | .nb_channels = 12, |
116 | .nb_pts = 192, |
117 | .nb_mlocks = 16, |
118 | .nb_bases = 64, |
119 | .init = host1x04_init, |
120 | .sync_offset = 0x2100, |
121 | .dma_mask = DMA_BIT_MASK(34), |
122 | .has_wide_gather = false, |
123 | .has_hypervisor = false, |
124 | .num_sid_entries = 0, |
125 | .sid_table = NULL, |
126 | .reserve_vblank_syncpts = false, |
127 | }; |
128 | |
129 | static const struct host1x_info host1x05_info = { |
130 | .nb_channels = 14, |
131 | .nb_pts = 192, |
132 | .nb_mlocks = 16, |
133 | .nb_bases = 64, |
134 | .init = host1x05_init, |
135 | .sync_offset = 0x2100, |
136 | .dma_mask = DMA_BIT_MASK(34), |
137 | .has_wide_gather = false, |
138 | .has_hypervisor = false, |
139 | .num_sid_entries = 0, |
140 | .sid_table = NULL, |
141 | .reserve_vblank_syncpts = false, |
142 | }; |
143 | |
144 | static const struct host1x_sid_entry tegra186_sid_table[] = { |
145 | { |
146 | /* VIC */ |
147 | .base = 0x1af0, |
148 | .offset = 0x30, |
149 | .limit = 0x34 |
150 | }, |
151 | { |
152 | /* NVDEC */ |
153 | .base = 0x1b00, |
154 | .offset = 0x30, |
155 | .limit = 0x34 |
156 | }, |
157 | }; |
158 | |
159 | static const struct host1x_info host1x06_info = { |
160 | .nb_channels = 63, |
161 | .nb_pts = 576, |
162 | .nb_mlocks = 24, |
163 | .nb_bases = 16, |
164 | .init = host1x06_init, |
165 | .sync_offset = 0x0, |
166 | .dma_mask = DMA_BIT_MASK(40), |
167 | .has_wide_gather = true, |
168 | .has_hypervisor = true, |
169 | .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), |
170 | .sid_table = tegra186_sid_table, |
171 | .reserve_vblank_syncpts = false, |
172 | .skip_reset_assert = true, |
173 | }; |
174 | |
175 | static const struct host1x_sid_entry tegra194_sid_table[] = { |
176 | { |
177 | /* VIC */ |
178 | .base = 0x1af0, |
179 | .offset = 0x30, |
180 | .limit = 0x34 |
181 | }, |
182 | { |
183 | /* NVDEC */ |
184 | .base = 0x1b00, |
185 | .offset = 0x30, |
186 | .limit = 0x34 |
187 | }, |
188 | { |
189 | /* NVDEC1 */ |
190 | .base = 0x1bc0, |
191 | .offset = 0x30, |
192 | .limit = 0x34 |
193 | }, |
194 | }; |
195 | |
196 | static const struct host1x_info host1x07_info = { |
197 | .nb_channels = 63, |
198 | .nb_pts = 704, |
199 | .nb_mlocks = 32, |
200 | .nb_bases = 0, |
201 | .init = host1x07_init, |
202 | .sync_offset = 0x0, |
203 | .dma_mask = DMA_BIT_MASK(40), |
204 | .has_wide_gather = true, |
205 | .has_hypervisor = true, |
206 | .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), |
207 | .sid_table = tegra194_sid_table, |
208 | .reserve_vblank_syncpts = false, |
209 | }; |
210 | |
211 | /* |
212 | * Tegra234 has two stream ID protection tables, one for setting stream IDs |
213 | * through the channel path via SETSTREAMID, and one for setting them via |
214 | * MMIO. We program each engine's data stream ID in the channel path table |
215 | * and firmware stream ID in the MMIO path table. |
216 | */ |
217 | static const struct host1x_sid_entry tegra234_sid_table[] = { |
218 | { |
219 | /* VIC channel */ |
220 | .base = 0x17b8, |
221 | .offset = 0x30, |
222 | .limit = 0x30 |
223 | }, |
224 | { |
225 | /* VIC MMIO */ |
226 | .base = 0x1688, |
227 | .offset = 0x34, |
228 | .limit = 0x34 |
229 | }, |
230 | { |
231 | /* NVDEC channel */ |
232 | .base = 0x17c8, |
233 | .offset = 0x30, |
234 | .limit = 0x30, |
235 | }, |
236 | { |
237 | /* NVDEC MMIO */ |
238 | .base = 0x1698, |
239 | .offset = 0x34, |
240 | .limit = 0x34, |
241 | }, |
242 | }; |
243 | |
244 | static const struct host1x_info host1x08_info = { |
245 | .nb_channels = 63, |
246 | .nb_pts = 1024, |
247 | .nb_mlocks = 24, |
248 | .nb_bases = 0, |
249 | .init = host1x08_init, |
250 | .sync_offset = 0x0, |
251 | .dma_mask = DMA_BIT_MASK(40), |
252 | .has_wide_gather = true, |
253 | .has_hypervisor = true, |
254 | .has_common = true, |
255 | .num_sid_entries = ARRAY_SIZE(tegra234_sid_table), |
256 | .sid_table = tegra234_sid_table, |
257 | .streamid_vm_table = { 0x1004, 128 }, |
258 | .classid_vm_table = { 0x1404, 25 }, |
259 | .mmio_vm_table = { 0x1504, 25 }, |
260 | .reserve_vblank_syncpts = false, |
261 | }; |
262 | |
263 | static const struct of_device_id host1x_of_match[] = { |
264 | { .compatible = "nvidia,tegra234-host1x" , .data = &host1x08_info, }, |
265 | { .compatible = "nvidia,tegra194-host1x" , .data = &host1x07_info, }, |
266 | { .compatible = "nvidia,tegra186-host1x" , .data = &host1x06_info, }, |
267 | { .compatible = "nvidia,tegra210-host1x" , .data = &host1x05_info, }, |
268 | { .compatible = "nvidia,tegra124-host1x" , .data = &host1x04_info, }, |
269 | { .compatible = "nvidia,tegra114-host1x" , .data = &host1x02_info, }, |
270 | { .compatible = "nvidia,tegra30-host1x" , .data = &host1x01_info, }, |
271 | { .compatible = "nvidia,tegra20-host1x" , .data = &host1x01_info, }, |
272 | { }, |
273 | }; |
274 | MODULE_DEVICE_TABLE(of, host1x_of_match); |
275 | |
276 | static void host1x_setup_virtualization_tables(struct host1x *host) |
277 | { |
278 | const struct host1x_info *info = host->info; |
279 | unsigned int i; |
280 | |
281 | if (!info->has_hypervisor) |
282 | return; |
283 | |
284 | for (i = 0; i < info->num_sid_entries; i++) { |
285 | const struct host1x_sid_entry *entry = &info->sid_table[i]; |
286 | |
287 | host1x_hypervisor_writel(host1x: host, v: entry->offset, r: entry->base); |
288 | host1x_hypervisor_writel(host1x: host, v: entry->limit, r: entry->base + 4); |
289 | } |
290 | |
291 | for (i = 0; i < info->streamid_vm_table.count; i++) { |
292 | /* Allow access to all stream IDs to all VMs. */ |
293 | host1x_hypervisor_writel(host1x: host, v: 0xff, r: info->streamid_vm_table.base + 4 * i); |
294 | } |
295 | |
296 | for (i = 0; i < info->classid_vm_table.count; i++) { |
297 | /* Allow access to all classes to all VMs. */ |
298 | host1x_hypervisor_writel(host1x: host, v: 0xff, r: info->classid_vm_table.base + 4 * i); |
299 | } |
300 | |
301 | for (i = 0; i < info->mmio_vm_table.count; i++) { |
302 | /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */ |
303 | host1x_hypervisor_writel(host1x: host, v: 0x1, r: info->mmio_vm_table.base + 4 * i); |
304 | } |
305 | } |
306 | |
307 | static bool host1x_wants_iommu(struct host1x *host1x) |
308 | { |
309 | /* Our IOMMU usage policy doesn't currently play well with GART */ |
310 | if (of_machine_is_compatible(compat: "nvidia,tegra20" )) |
311 | return false; |
312 | |
313 | /* |
314 | * If we support addressing a maximum of 32 bits of physical memory |
315 | * and if the host1x firewall is enabled, there's no need to enable |
316 | * IOMMU support. This can happen for example on Tegra20, Tegra30 |
317 | * and Tegra114. |
318 | * |
319 | * Tegra124 and later can address up to 34 bits of physical memory and |
320 | * many platforms come equipped with more than 2 GiB of system memory, |
321 | * which requires crossing the 4 GiB boundary. But there's a catch: on |
322 | * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can |
323 | * only address up to 32 bits of memory in GATHER opcodes, which means |
324 | * that command buffers need to either be in the first 2 GiB of system |
325 | * memory (which could quickly lead to memory exhaustion), or command |
326 | * buffers need to be treated differently from other buffers (which is |
327 | * not possible with the current ABI). |
328 | * |
329 | * A third option is to use the IOMMU in these cases to make sure all |
330 | * buffers will be mapped into a 32-bit IOVA space that host1x can |
331 | * address. This allows all of the system memory to be used and works |
332 | * within the limitations of the host1x on these SoCs. |
333 | * |
334 | * In summary, default to enable IOMMU on Tegra124 and later. For any |
335 | * of the earlier SoCs, only use the IOMMU for additional safety when |
336 | * the host1x firewall is disabled. |
337 | */ |
338 | if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) { |
339 | if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) |
340 | return false; |
341 | } |
342 | |
343 | return true; |
344 | } |
345 | |
346 | static struct iommu_domain *host1x_iommu_attach(struct host1x *host) |
347 | { |
348 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev: host->dev); |
349 | int err; |
350 | |
351 | #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
352 | if (host->dev->archdata.mapping) { |
353 | struct dma_iommu_mapping *mapping = |
354 | to_dma_iommu_mapping(host->dev); |
355 | arm_iommu_detach_device(host->dev); |
356 | arm_iommu_release_mapping(mapping); |
357 | |
358 | domain = iommu_get_domain_for_dev(host->dev); |
359 | } |
360 | #endif |
361 | |
362 | /* |
363 | * We may not always want to enable IOMMU support (for example if the |
364 | * host1x firewall is already enabled and we don't support addressing |
365 | * more than 32 bits of physical memory), so check for that first. |
366 | * |
367 | * Similarly, if host1x is already attached to an IOMMU (via the DMA |
368 | * API), don't try to attach again. |
369 | */ |
370 | if (!host1x_wants_iommu(host1x: host) || domain) |
371 | return domain; |
372 | |
373 | host->group = iommu_group_get(dev: host->dev); |
374 | if (host->group) { |
375 | struct iommu_domain_geometry *geometry; |
376 | dma_addr_t start, end; |
377 | unsigned long order; |
378 | |
379 | err = iova_cache_get(); |
380 | if (err < 0) |
381 | goto put_group; |
382 | |
383 | host->domain = iommu_domain_alloc(bus: &platform_bus_type); |
384 | if (!host->domain) { |
385 | err = -ENOMEM; |
386 | goto put_cache; |
387 | } |
388 | |
389 | err = iommu_attach_group(domain: host->domain, group: host->group); |
390 | if (err) { |
391 | if (err == -ENODEV) |
392 | err = 0; |
393 | |
394 | goto free_domain; |
395 | } |
396 | |
397 | geometry = &host->domain->geometry; |
398 | start = geometry->aperture_start & host->info->dma_mask; |
399 | end = geometry->aperture_end & host->info->dma_mask; |
400 | |
401 | order = __ffs(host->domain->pgsize_bitmap); |
402 | init_iova_domain(iovad: &host->iova, granule: 1UL << order, start_pfn: start >> order); |
403 | host->iova_end = end; |
404 | |
405 | domain = host->domain; |
406 | } |
407 | |
408 | return domain; |
409 | |
410 | free_domain: |
411 | iommu_domain_free(domain: host->domain); |
412 | host->domain = NULL; |
413 | put_cache: |
414 | iova_cache_put(); |
415 | put_group: |
416 | iommu_group_put(group: host->group); |
417 | host->group = NULL; |
418 | |
419 | return ERR_PTR(error: err); |
420 | } |
421 | |
422 | static int host1x_iommu_init(struct host1x *host) |
423 | { |
424 | u64 mask = host->info->dma_mask; |
425 | struct iommu_domain *domain; |
426 | int err; |
427 | |
428 | domain = host1x_iommu_attach(host); |
429 | if (IS_ERR(ptr: domain)) { |
430 | err = PTR_ERR(ptr: domain); |
431 | dev_err(host->dev, "failed to attach to IOMMU: %d\n" , err); |
432 | return err; |
433 | } |
434 | |
435 | /* |
436 | * If we're not behind an IOMMU make sure we don't get push buffers |
437 | * that are allocated outside of the range addressable by the GATHER |
438 | * opcode. |
439 | * |
440 | * Newer generations of Tegra (Tegra186 and later) support a wide |
441 | * variant of the GATHER opcode that allows addressing more bits. |
442 | */ |
443 | if (!domain && !host->info->has_wide_gather) |
444 | mask = DMA_BIT_MASK(32); |
445 | |
446 | err = dma_coerce_mask_and_coherent(dev: host->dev, mask); |
447 | if (err < 0) { |
448 | dev_err(host->dev, "failed to set DMA mask: %d\n" , err); |
449 | return err; |
450 | } |
451 | |
452 | return 0; |
453 | } |
454 | |
455 | static void host1x_iommu_exit(struct host1x *host) |
456 | { |
457 | if (host->domain) { |
458 | put_iova_domain(iovad: &host->iova); |
459 | iommu_detach_group(domain: host->domain, group: host->group); |
460 | |
461 | iommu_domain_free(domain: host->domain); |
462 | host->domain = NULL; |
463 | |
464 | iova_cache_put(); |
465 | |
466 | iommu_group_put(group: host->group); |
467 | host->group = NULL; |
468 | } |
469 | } |
470 | |
471 | static int host1x_get_resets(struct host1x *host) |
472 | { |
473 | int err; |
474 | |
475 | host->resets[0].id = "mc" ; |
476 | host->resets[1].id = "host1x" ; |
477 | host->nresets = ARRAY_SIZE(host->resets); |
478 | |
479 | err = devm_reset_control_bulk_get_optional_exclusive_released( |
480 | dev: host->dev, num_rstcs: host->nresets, rstcs: host->resets); |
481 | if (err) { |
482 | dev_err(host->dev, "failed to get reset: %d\n" , err); |
483 | return err; |
484 | } |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | static int host1x_probe(struct platform_device *pdev) |
490 | { |
491 | struct host1x *host; |
492 | int err, i; |
493 | |
494 | host = devm_kzalloc(dev: &pdev->dev, size: sizeof(*host), GFP_KERNEL); |
495 | if (!host) |
496 | return -ENOMEM; |
497 | |
498 | host->info = of_device_get_match_data(dev: &pdev->dev); |
499 | |
500 | if (host->info->has_hypervisor) { |
501 | host->regs = devm_platform_ioremap_resource_byname(pdev, name: "vm" ); |
502 | if (IS_ERR(ptr: host->regs)) |
503 | return PTR_ERR(ptr: host->regs); |
504 | |
505 | host->hv_regs = devm_platform_ioremap_resource_byname(pdev, name: "hypervisor" ); |
506 | if (IS_ERR(ptr: host->hv_regs)) |
507 | return PTR_ERR(ptr: host->hv_regs); |
508 | |
509 | if (host->info->has_common) { |
510 | host->common_regs = devm_platform_ioremap_resource_byname(pdev, name: "common" ); |
511 | if (IS_ERR(ptr: host->common_regs)) |
512 | return PTR_ERR(ptr: host->common_regs); |
513 | } |
514 | } else { |
515 | host->regs = devm_platform_ioremap_resource(pdev, index: 0); |
516 | if (IS_ERR(ptr: host->regs)) |
517 | return PTR_ERR(ptr: host->regs); |
518 | } |
519 | |
520 | for (i = 0; i < ARRAY_SIZE(host->syncpt_irqs); i++) { |
521 | char irq_name[] = "syncptX" ; |
522 | |
523 | sprintf(buf: irq_name, fmt: "syncpt%d" , i); |
524 | |
525 | err = platform_get_irq_byname_optional(dev: pdev, name: irq_name); |
526 | if (err == -ENXIO) |
527 | break; |
528 | if (err < 0) |
529 | return err; |
530 | |
531 | host->syncpt_irqs[i] = err; |
532 | } |
533 | |
534 | host->num_syncpt_irqs = i; |
535 | |
536 | /* Device tree without irq names */ |
537 | if (i == 0) { |
538 | host->syncpt_irqs[0] = platform_get_irq(pdev, 0); |
539 | if (host->syncpt_irqs[0] < 0) |
540 | return host->syncpt_irqs[0]; |
541 | |
542 | host->num_syncpt_irqs = 1; |
543 | } |
544 | |
545 | mutex_init(&host->devices_lock); |
546 | INIT_LIST_HEAD(list: &host->devices); |
547 | INIT_LIST_HEAD(list: &host->list); |
548 | host->dev = &pdev->dev; |
549 | |
550 | /* set common host1x device data */ |
551 | platform_set_drvdata(pdev, data: host); |
552 | |
553 | host->dev->dma_parms = &host->dma_parms; |
554 | dma_set_max_seg_size(dev: host->dev, UINT_MAX); |
555 | |
556 | if (host->info->init) { |
557 | err = host->info->init(host); |
558 | if (err) |
559 | return err; |
560 | } |
561 | |
562 | host->clk = devm_clk_get(dev: &pdev->dev, NULL); |
563 | if (IS_ERR(ptr: host->clk)) { |
564 | err = PTR_ERR(ptr: host->clk); |
565 | |
566 | if (err != -EPROBE_DEFER) |
567 | dev_err(&pdev->dev, "failed to get clock: %d\n" , err); |
568 | |
569 | return err; |
570 | } |
571 | |
572 | err = host1x_get_resets(host); |
573 | if (err) |
574 | return err; |
575 | |
576 | host1x_bo_cache_init(cache: &host->cache); |
577 | |
578 | err = host1x_iommu_init(host); |
579 | if (err < 0) { |
580 | dev_err(&pdev->dev, "failed to setup IOMMU: %d\n" , err); |
581 | goto destroy_cache; |
582 | } |
583 | |
584 | err = host1x_channel_list_init(chlist: &host->channel_list, |
585 | num_channels: host->info->nb_channels); |
586 | if (err) { |
587 | dev_err(&pdev->dev, "failed to initialize channel list\n" ); |
588 | goto iommu_exit; |
589 | } |
590 | |
591 | err = host1x_memory_context_list_init(host1x: host); |
592 | if (err) { |
593 | dev_err(&pdev->dev, "failed to initialize context list\n" ); |
594 | goto free_channels; |
595 | } |
596 | |
597 | err = host1x_syncpt_init(host); |
598 | if (err) { |
599 | dev_err(&pdev->dev, "failed to initialize syncpts\n" ); |
600 | goto free_contexts; |
601 | } |
602 | |
603 | err = host1x_intr_init(host); |
604 | if (err) { |
605 | dev_err(&pdev->dev, "failed to initialize interrupts\n" ); |
606 | goto deinit_syncpt; |
607 | } |
608 | |
609 | pm_runtime_enable(dev: &pdev->dev); |
610 | |
611 | err = devm_tegra_core_dev_init_opp_table_common(dev: &pdev->dev); |
612 | if (err) |
613 | goto pm_disable; |
614 | |
615 | /* the driver's code isn't ready yet for the dynamic RPM */ |
616 | err = pm_runtime_resume_and_get(dev: &pdev->dev); |
617 | if (err) |
618 | goto pm_disable; |
619 | |
620 | host1x_debug_init(host1x: host); |
621 | |
622 | err = host1x_register(host1x: host); |
623 | if (err < 0) |
624 | goto deinit_debugfs; |
625 | |
626 | err = devm_of_platform_populate(dev: &pdev->dev); |
627 | if (err < 0) |
628 | goto unregister; |
629 | |
630 | return 0; |
631 | |
632 | unregister: |
633 | host1x_unregister(host1x: host); |
634 | deinit_debugfs: |
635 | host1x_debug_deinit(host1x: host); |
636 | |
637 | pm_runtime_put_sync_suspend(dev: &pdev->dev); |
638 | pm_disable: |
639 | pm_runtime_disable(dev: &pdev->dev); |
640 | |
641 | host1x_intr_deinit(host); |
642 | deinit_syncpt: |
643 | host1x_syncpt_deinit(host); |
644 | free_contexts: |
645 | host1x_memory_context_list_free(cdl: &host->context_list); |
646 | free_channels: |
647 | host1x_channel_list_free(chlist: &host->channel_list); |
648 | iommu_exit: |
649 | host1x_iommu_exit(host); |
650 | destroy_cache: |
651 | host1x_bo_cache_destroy(cache: &host->cache); |
652 | |
653 | return err; |
654 | } |
655 | |
656 | static int host1x_remove(struct platform_device *pdev) |
657 | { |
658 | struct host1x *host = platform_get_drvdata(pdev); |
659 | |
660 | host1x_unregister(host1x: host); |
661 | host1x_debug_deinit(host1x: host); |
662 | |
663 | pm_runtime_force_suspend(dev: &pdev->dev); |
664 | |
665 | host1x_intr_deinit(host); |
666 | host1x_syncpt_deinit(host); |
667 | host1x_memory_context_list_free(cdl: &host->context_list); |
668 | host1x_channel_list_free(chlist: &host->channel_list); |
669 | host1x_iommu_exit(host); |
670 | host1x_bo_cache_destroy(cache: &host->cache); |
671 | |
672 | return 0; |
673 | } |
674 | |
675 | static int __maybe_unused host1x_runtime_suspend(struct device *dev) |
676 | { |
677 | struct host1x *host = dev_get_drvdata(dev); |
678 | int err; |
679 | |
680 | host1x_channel_stop_all(host); |
681 | host1x_intr_stop(host); |
682 | host1x_syncpt_save(host); |
683 | |
684 | if (!host->info->skip_reset_assert) { |
685 | err = reset_control_bulk_assert(num_rstcs: host->nresets, rstcs: host->resets); |
686 | if (err) { |
687 | dev_err(dev, "failed to assert reset: %d\n" , err); |
688 | goto resume_host1x; |
689 | } |
690 | |
691 | usleep_range(min: 1000, max: 2000); |
692 | } |
693 | |
694 | clk_disable_unprepare(clk: host->clk); |
695 | reset_control_bulk_release(num_rstcs: host->nresets, rstcs: host->resets); |
696 | |
697 | return 0; |
698 | |
699 | resume_host1x: |
700 | host1x_setup_virtualization_tables(host); |
701 | host1x_syncpt_restore(host); |
702 | host1x_intr_start(host); |
703 | |
704 | return err; |
705 | } |
706 | |
707 | static int __maybe_unused host1x_runtime_resume(struct device *dev) |
708 | { |
709 | struct host1x *host = dev_get_drvdata(dev); |
710 | int err; |
711 | |
712 | err = reset_control_bulk_acquire(num_rstcs: host->nresets, rstcs: host->resets); |
713 | if (err) { |
714 | dev_err(dev, "failed to acquire reset: %d\n" , err); |
715 | return err; |
716 | } |
717 | |
718 | err = clk_prepare_enable(clk: host->clk); |
719 | if (err) { |
720 | dev_err(dev, "failed to enable clock: %d\n" , err); |
721 | goto release_reset; |
722 | } |
723 | |
724 | err = reset_control_bulk_deassert(num_rstcs: host->nresets, rstcs: host->resets); |
725 | if (err < 0) { |
726 | dev_err(dev, "failed to deassert reset: %d\n" , err); |
727 | goto disable_clk; |
728 | } |
729 | |
730 | host1x_setup_virtualization_tables(host); |
731 | host1x_syncpt_restore(host); |
732 | host1x_intr_start(host); |
733 | |
734 | return 0; |
735 | |
736 | disable_clk: |
737 | clk_disable_unprepare(clk: host->clk); |
738 | release_reset: |
739 | reset_control_bulk_release(num_rstcs: host->nresets, rstcs: host->resets); |
740 | |
741 | return err; |
742 | } |
743 | |
744 | static const struct dev_pm_ops host1x_pm_ops = { |
745 | SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume, |
746 | NULL) |
747 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) |
748 | }; |
749 | |
750 | static struct platform_driver tegra_host1x_driver = { |
751 | .driver = { |
752 | .name = "tegra-host1x" , |
753 | .of_match_table = host1x_of_match, |
754 | .pm = &host1x_pm_ops, |
755 | }, |
756 | .probe = host1x_probe, |
757 | .remove = host1x_remove, |
758 | }; |
759 | |
760 | static struct platform_driver * const drivers[] = { |
761 | &tegra_host1x_driver, |
762 | &tegra_mipi_driver, |
763 | }; |
764 | |
765 | static int __init tegra_host1x_init(void) |
766 | { |
767 | int err; |
768 | |
769 | err = bus_register(bus: &host1x_bus_type); |
770 | if (err < 0) |
771 | return err; |
772 | |
773 | err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
774 | if (err < 0) |
775 | bus_unregister(bus: &host1x_bus_type); |
776 | |
777 | return err; |
778 | } |
779 | module_init(tegra_host1x_init); |
780 | |
781 | static void __exit tegra_host1x_exit(void) |
782 | { |
783 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
784 | bus_unregister(bus: &host1x_bus_type); |
785 | } |
786 | module_exit(tegra_host1x_exit); |
787 | |
788 | /** |
789 | * host1x_get_dma_mask() - query the supported DMA mask for host1x |
790 | * @host1x: host1x instance |
791 | * |
792 | * Note that this returns the supported DMA mask for host1x, which can be |
793 | * different from the applicable DMA mask under certain circumstances. |
794 | */ |
795 | u64 host1x_get_dma_mask(struct host1x *host1x) |
796 | { |
797 | return host1x->info->dma_mask; |
798 | } |
799 | EXPORT_SYMBOL(host1x_get_dma_mask); |
800 | |
801 | MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>" ); |
802 | MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>" ); |
803 | MODULE_DESCRIPTION("Host1x driver for Tegra products" ); |
804 | MODULE_LICENSE("GPL" ); |
805 | |