1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2022, NVIDIA Corporation.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/dma-mapping.h>
9#include <linux/host1x.h>
10#include <linux/iommu.h>
11#include <linux/iopoll.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h>
16#include <linux/reset.h>
17
18#include <soc/tegra/mc.h>
19
20#include "drm.h"
21#include "falcon.h"
22#include "riscv.h"
23#include "vic.h"
24
25#define NVDEC_FALCON_DEBUGINFO 0x1094
26#define NVDEC_TFBIF_TRANSCFG 0x2c44
27
28struct nvdec_config {
29 const char *firmware;
30 unsigned int version;
31 bool supports_sid;
32 bool has_riscv;
33 bool has_extra_clocks;
34};
35
36struct nvdec {
37 struct falcon falcon;
38
39 void __iomem *regs;
40 struct tegra_drm_client client;
41 struct host1x_channel *channel;
42 struct device *dev;
43 struct clk_bulk_data clks[3];
44 unsigned int num_clks;
45 struct reset_control *reset;
46
47 /* Platform configuration */
48 const struct nvdec_config *config;
49
50 /* RISC-V specific data */
51 struct tegra_drm_riscv riscv;
52 phys_addr_t carveout_base;
53};
54
55static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
56{
57 return container_of(client, struct nvdec, client);
58}
59
60static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
61 unsigned int offset)
62{
63 writel(val: value, addr: nvdec->regs + offset);
64}
65
66static int nvdec_boot_falcon(struct nvdec *nvdec)
67{
68 u32 stream_id;
69 int err;
70
71 if (nvdec->config->supports_sid && tegra_dev_iommu_get_stream_id(dev: nvdec->dev, stream_id: &stream_id)) {
72 u32 value;
73
74 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
75 nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
76
77 nvdec_writel(nvdec, value: stream_id, VIC_THI_STREAMID0);
78 nvdec_writel(nvdec, value: stream_id, VIC_THI_STREAMID1);
79 }
80
81 err = falcon_boot(falcon: &nvdec->falcon);
82 if (err < 0)
83 return err;
84
85 err = falcon_wait_idle(falcon: &nvdec->falcon);
86 if (err < 0) {
87 dev_err(nvdec->dev, "falcon boot timed out\n");
88 return err;
89 }
90
91 return 0;
92}
93
94static int nvdec_wait_debuginfo(struct nvdec *nvdec, const char *phase)
95{
96 int err;
97 u32 val;
98
99 err = readl_poll_timeout(nvdec->regs + NVDEC_FALCON_DEBUGINFO, val, val == 0x0, 10, 100000);
100 if (err) {
101 dev_err(nvdec->dev, "failed to boot %s, debuginfo=0x%x\n", phase, val);
102 return err;
103 }
104
105 return 0;
106}
107
108static int nvdec_boot_riscv(struct nvdec *nvdec)
109{
110 int err;
111
112 err = reset_control_acquire(rstc: nvdec->reset);
113 if (err)
114 return err;
115
116 nvdec_writel(nvdec, value: 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
117
118 err = tegra_drm_riscv_boot_bootrom(riscv: &nvdec->riscv, image_address: nvdec->carveout_base, gscid: 1,
119 desc: &nvdec->riscv.bl_desc);
120 if (err) {
121 dev_err(nvdec->dev, "failed to execute bootloader\n");
122 goto release_reset;
123 }
124
125 err = nvdec_wait_debuginfo(nvdec, phase: "bootloader");
126 if (err)
127 goto release_reset;
128
129 err = reset_control_reset(rstc: nvdec->reset);
130 if (err)
131 goto release_reset;
132
133 nvdec_writel(nvdec, value: 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
134
135 err = tegra_drm_riscv_boot_bootrom(riscv: &nvdec->riscv, image_address: nvdec->carveout_base, gscid: 1,
136 desc: &nvdec->riscv.os_desc);
137 if (err) {
138 dev_err(nvdec->dev, "failed to execute firmware\n");
139 goto release_reset;
140 }
141
142 err = nvdec_wait_debuginfo(nvdec, phase: "firmware");
143 if (err)
144 goto release_reset;
145
146release_reset:
147 reset_control_release(rstc: nvdec->reset);
148
149 return err;
150}
151
152static int nvdec_init(struct host1x_client *client)
153{
154 struct tegra_drm_client *drm = host1x_to_drm_client(client);
155 struct drm_device *dev = dev_get_drvdata(dev: client->host);
156 struct tegra_drm *tegra = dev->dev_private;
157 struct nvdec *nvdec = to_nvdec(client: drm);
158 int err;
159
160 err = host1x_client_iommu_attach(client);
161 if (err < 0 && err != -ENODEV) {
162 dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
163 return err;
164 }
165
166 nvdec->channel = host1x_channel_request(client);
167 if (!nvdec->channel) {
168 err = -ENOMEM;
169 goto detach;
170 }
171
172 client->syncpts[0] = host1x_syncpt_request(client, flags: 0);
173 if (!client->syncpts[0]) {
174 err = -ENOMEM;
175 goto free_channel;
176 }
177
178 err = tegra_drm_register_client(tegra, client: drm);
179 if (err < 0)
180 goto free_syncpt;
181
182 /*
183 * Inherit the DMA parameters (such as maximum segment size) from the
184 * parent host1x device.
185 */
186 client->dev->dma_parms = client->host->dma_parms;
187
188 return 0;
189
190free_syncpt:
191 host1x_syncpt_put(sp: client->syncpts[0]);
192free_channel:
193 host1x_channel_put(channel: nvdec->channel);
194detach:
195 host1x_client_iommu_detach(client);
196
197 return err;
198}
199
200static int nvdec_exit(struct host1x_client *client)
201{
202 struct tegra_drm_client *drm = host1x_to_drm_client(client);
203 struct drm_device *dev = dev_get_drvdata(dev: client->host);
204 struct tegra_drm *tegra = dev->dev_private;
205 struct nvdec *nvdec = to_nvdec(client: drm);
206 int err;
207
208 /* avoid a dangling pointer just in case this disappears */
209 client->dev->dma_parms = NULL;
210
211 err = tegra_drm_unregister_client(tegra, client: drm);
212 if (err < 0)
213 return err;
214
215 pm_runtime_dont_use_autosuspend(dev: client->dev);
216 pm_runtime_force_suspend(dev: client->dev);
217
218 host1x_syncpt_put(sp: client->syncpts[0]);
219 host1x_channel_put(channel: nvdec->channel);
220 host1x_client_iommu_detach(client);
221
222 nvdec->channel = NULL;
223
224 if (client->group) {
225 dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
226 nvdec->falcon.firmware.size, DMA_TO_DEVICE);
227 tegra_drm_free(tegra, size: nvdec->falcon.firmware.size,
228 virt: nvdec->falcon.firmware.virt,
229 iova: nvdec->falcon.firmware.iova);
230 } else {
231 dma_free_coherent(dev: nvdec->dev, size: nvdec->falcon.firmware.size,
232 cpu_addr: nvdec->falcon.firmware.virt,
233 dma_handle: nvdec->falcon.firmware.iova);
234 }
235
236 return 0;
237}
238
239static const struct host1x_client_ops nvdec_client_ops = {
240 .init = nvdec_init,
241 .exit = nvdec_exit,
242};
243
244static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
245{
246 struct host1x_client *client = &nvdec->client.base;
247 struct tegra_drm *tegra = nvdec->client.drm;
248 dma_addr_t iova;
249 size_t size;
250 void *virt;
251 int err;
252
253 if (nvdec->falcon.firmware.virt)
254 return 0;
255
256 err = falcon_read_firmware(falcon: &nvdec->falcon, firmware_name: nvdec->config->firmware);
257 if (err < 0)
258 return err;
259
260 size = nvdec->falcon.firmware.size;
261
262 if (!client->group) {
263 virt = dma_alloc_coherent(dev: nvdec->dev, size, dma_handle: &iova, GFP_KERNEL);
264
265 err = dma_mapping_error(dev: nvdec->dev, dma_addr: iova);
266 if (err < 0)
267 return err;
268 } else {
269 virt = tegra_drm_alloc(tegra, size, iova: &iova);
270 if (IS_ERR(ptr: virt))
271 return PTR_ERR(ptr: virt);
272 }
273
274 nvdec->falcon.firmware.virt = virt;
275 nvdec->falcon.firmware.iova = iova;
276
277 err = falcon_load_firmware(falcon: &nvdec->falcon);
278 if (err < 0)
279 goto cleanup;
280
281 /*
282 * In this case we have received an IOVA from the shared domain, so we
283 * need to make sure to get the physical address so that the DMA API
284 * knows what memory pages to flush the cache for.
285 */
286 if (client->group) {
287 dma_addr_t phys;
288
289 phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
290
291 err = dma_mapping_error(dev: nvdec->dev, dma_addr: phys);
292 if (err < 0)
293 goto cleanup;
294
295 nvdec->falcon.firmware.phys = phys;
296 }
297
298 return 0;
299
300cleanup:
301 if (!client->group)
302 dma_free_coherent(dev: nvdec->dev, size, cpu_addr: virt, dma_handle: iova);
303 else
304 tegra_drm_free(tegra, size, virt, iova);
305
306 return err;
307}
308
309static __maybe_unused int nvdec_runtime_resume(struct device *dev)
310{
311 struct nvdec *nvdec = dev_get_drvdata(dev);
312 int err;
313
314 err = clk_bulk_prepare_enable(num_clks: nvdec->num_clks, clks: nvdec->clks);
315 if (err < 0)
316 return err;
317
318 usleep_range(min: 10, max: 20);
319
320 if (nvdec->config->has_riscv) {
321 err = nvdec_boot_riscv(nvdec);
322 if (err < 0)
323 goto disable;
324 } else {
325 err = nvdec_load_falcon_firmware(nvdec);
326 if (err < 0)
327 goto disable;
328
329 err = nvdec_boot_falcon(nvdec);
330 if (err < 0)
331 goto disable;
332 }
333
334 return 0;
335
336disable:
337 clk_bulk_disable_unprepare(num_clks: nvdec->num_clks, clks: nvdec->clks);
338 return err;
339}
340
341static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
342{
343 struct nvdec *nvdec = dev_get_drvdata(dev);
344
345 host1x_channel_stop(channel: nvdec->channel);
346
347 clk_bulk_disable_unprepare(num_clks: nvdec->num_clks, clks: nvdec->clks);
348
349 return 0;
350}
351
352static int nvdec_open_channel(struct tegra_drm_client *client,
353 struct tegra_drm_context *context)
354{
355 struct nvdec *nvdec = to_nvdec(client);
356
357 context->channel = host1x_channel_get(channel: nvdec->channel);
358 if (!context->channel)
359 return -ENOMEM;
360
361 return 0;
362}
363
364static void nvdec_close_channel(struct tegra_drm_context *context)
365{
366 host1x_channel_put(channel: context->channel);
367}
368
369static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
370{
371 *supported = true;
372
373 return 0;
374}
375
376static const struct tegra_drm_client_ops nvdec_ops = {
377 .open_channel = nvdec_open_channel,
378 .close_channel = nvdec_close_channel,
379 .submit = tegra_drm_submit,
380 .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
381 .can_use_memory_ctx = nvdec_can_use_memory_ctx,
382};
383
384#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
385
386static const struct nvdec_config nvdec_t210_config = {
387 .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
388 .version = 0x21,
389 .supports_sid = false,
390};
391
392#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
393
394static const struct nvdec_config nvdec_t186_config = {
395 .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
396 .version = 0x18,
397 .supports_sid = true,
398};
399
400#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
401
402static const struct nvdec_config nvdec_t194_config = {
403 .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
404 .version = 0x19,
405 .supports_sid = true,
406};
407
408static const struct nvdec_config nvdec_t234_config = {
409 .version = 0x23,
410 .supports_sid = true,
411 .has_riscv = true,
412 .has_extra_clocks = true,
413};
414
415static const struct of_device_id tegra_nvdec_of_match[] = {
416 { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
417 { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
418 { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
419 { .compatible = "nvidia,tegra234-nvdec", .data = &nvdec_t234_config },
420 { },
421};
422MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
423
424static int nvdec_probe(struct platform_device *pdev)
425{
426 struct device *dev = &pdev->dev;
427 struct host1x_syncpt **syncpts;
428 struct nvdec *nvdec;
429 u32 host_class;
430 int err;
431
432 /* inherit DMA mask from host1x parent */
433 err = dma_coerce_mask_and_coherent(dev, mask: *dev->parent->dma_mask);
434 if (err < 0) {
435 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
436 return err;
437 }
438
439 nvdec = devm_kzalloc(dev, size: sizeof(*nvdec), GFP_KERNEL);
440 if (!nvdec)
441 return -ENOMEM;
442
443 nvdec->config = of_device_get_match_data(dev);
444
445 syncpts = devm_kzalloc(dev, size: sizeof(*syncpts), GFP_KERNEL);
446 if (!syncpts)
447 return -ENOMEM;
448
449 nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, NULL);
450 if (IS_ERR(ptr: nvdec->regs))
451 return PTR_ERR(ptr: nvdec->regs);
452
453 nvdec->clks[0].id = "nvdec";
454 nvdec->num_clks = 1;
455
456 if (nvdec->config->has_extra_clocks) {
457 nvdec->num_clks = 3;
458 nvdec->clks[1].id = "fuse";
459 nvdec->clks[2].id = "tsec_pka";
460 }
461
462 err = devm_clk_bulk_get(dev, num_clks: nvdec->num_clks, clks: nvdec->clks);
463 if (err) {
464 dev_err(&pdev->dev, "failed to get clock(s)\n");
465 return err;
466 }
467
468 err = clk_set_rate(clk: nvdec->clks[0].clk, ULONG_MAX);
469 if (err < 0) {
470 dev_err(&pdev->dev, "failed to set clock rate\n");
471 return err;
472 }
473
474 err = of_property_read_u32(np: dev->of_node, propname: "nvidia,host1x-class", out_value: &host_class);
475 if (err < 0)
476 host_class = HOST1X_CLASS_NVDEC;
477
478 if (nvdec->config->has_riscv) {
479 struct tegra_mc *mc;
480
481 mc = devm_tegra_memory_controller_get(dev);
482 if (IS_ERR(ptr: mc)) {
483 dev_err_probe(dev, err: PTR_ERR(ptr: mc),
484 fmt: "failed to get memory controller handle\n");
485 return PTR_ERR(ptr: mc);
486 }
487
488 err = tegra_mc_get_carveout_info(mc, id: 1, base: &nvdec->carveout_base, NULL);
489 if (err) {
490 dev_err(dev, "failed to get carveout info: %d\n", err);
491 return err;
492 }
493
494 nvdec->reset = devm_reset_control_get_exclusive_released(dev, id: "nvdec");
495 if (IS_ERR(ptr: nvdec->reset)) {
496 dev_err_probe(dev, err: PTR_ERR(ptr: nvdec->reset), fmt: "failed to get reset\n");
497 return PTR_ERR(ptr: nvdec->reset);
498 }
499
500 nvdec->riscv.dev = dev;
501 nvdec->riscv.regs = nvdec->regs;
502
503 err = tegra_drm_riscv_read_descriptors(riscv: &nvdec->riscv);
504 if (err < 0)
505 return err;
506 } else {
507 nvdec->falcon.dev = dev;
508 nvdec->falcon.regs = nvdec->regs;
509
510 err = falcon_init(falcon: &nvdec->falcon);
511 if (err < 0)
512 return err;
513 }
514
515 platform_set_drvdata(pdev, data: nvdec);
516
517 INIT_LIST_HEAD(list: &nvdec->client.base.list);
518 nvdec->client.base.ops = &nvdec_client_ops;
519 nvdec->client.base.dev = dev;
520 nvdec->client.base.class = host_class;
521 nvdec->client.base.syncpts = syncpts;
522 nvdec->client.base.num_syncpts = 1;
523 nvdec->dev = dev;
524
525 INIT_LIST_HEAD(list: &nvdec->client.list);
526 nvdec->client.version = nvdec->config->version;
527 nvdec->client.ops = &nvdec_ops;
528
529 err = host1x_client_register(&nvdec->client.base);
530 if (err < 0) {
531 dev_err(dev, "failed to register host1x client: %d\n", err);
532 goto exit_falcon;
533 }
534
535 pm_runtime_enable(dev);
536 pm_runtime_use_autosuspend(dev);
537 pm_runtime_set_autosuspend_delay(dev, delay: 500);
538
539 return 0;
540
541exit_falcon:
542 falcon_exit(falcon: &nvdec->falcon);
543
544 return err;
545}
546
547static void nvdec_remove(struct platform_device *pdev)
548{
549 struct nvdec *nvdec = platform_get_drvdata(pdev);
550
551 pm_runtime_disable(dev: &pdev->dev);
552 host1x_client_unregister(client: &nvdec->client.base);
553 falcon_exit(falcon: &nvdec->falcon);
554}
555
556static const struct dev_pm_ops nvdec_pm_ops = {
557 SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
558 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
559 pm_runtime_force_resume)
560};
561
562struct platform_driver tegra_nvdec_driver = {
563 .driver = {
564 .name = "tegra-nvdec",
565 .of_match_table = tegra_nvdec_of_match,
566 .pm = &nvdec_pm_ops
567 },
568 .probe = nvdec_probe,
569 .remove_new = nvdec_remove,
570};
571
572#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
573MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
574#endif
575#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
576MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
577#endif
578#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
579MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
580#endif
581

source code of linux/drivers/gpu/drm/tegra/nvdec.c