1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * TI K3 R5F (MCU) Remote Processor driver |
4 | * |
5 | * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/ |
6 | * Suman Anna <s-anna@ti.com> |
7 | */ |
8 | |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/err.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/mailbox_client.h> |
14 | #include <linux/module.h> |
15 | #include <linux/of.h> |
16 | #include <linux/of_address.h> |
17 | #include <linux/of_reserved_mem.h> |
18 | #include <linux/of_platform.h> |
19 | #include <linux/omap-mailbox.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/pm_runtime.h> |
22 | #include <linux/remoteproc.h> |
23 | #include <linux/reset.h> |
24 | #include <linux/slab.h> |
25 | |
26 | #include "omap_remoteproc.h" |
27 | #include "remoteproc_internal.h" |
28 | #include "ti_sci_proc.h" |
29 | |
30 | /* This address can either be for ATCM or BTCM with the other at address 0x0 */ |
31 | #define K3_R5_TCM_DEV_ADDR 0x41010000 |
32 | |
33 | /* R5 TI-SCI Processor Configuration Flags */ |
34 | #define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001 |
35 | #define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002 |
36 | #define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100 |
37 | #define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200 |
38 | #define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400 |
39 | #define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800 |
40 | #define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000 |
41 | #define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000 |
42 | /* Available from J7200 SoCs onwards */ |
43 | #define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000 |
44 | /* Applicable to only AM64x SoCs */ |
45 | #define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000 |
46 | |
47 | /* R5 TI-SCI Processor Control Flags */ |
48 | #define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001 |
49 | |
50 | /* R5 TI-SCI Processor Status Flags */ |
51 | #define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001 |
52 | #define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002 |
53 | #define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004 |
54 | #define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100 |
55 | /* Applicable to only AM64x SoCs */ |
56 | #define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200 |
57 | |
58 | /** |
59 | * struct k3_r5_mem - internal memory structure |
60 | * @cpu_addr: MPU virtual address of the memory region |
61 | * @bus_addr: Bus address used to access the memory region |
62 | * @dev_addr: Device address from remoteproc view |
63 | * @size: Size of the memory region |
64 | */ |
65 | struct k3_r5_mem { |
66 | void __iomem *cpu_addr; |
67 | phys_addr_t bus_addr; |
68 | u32 dev_addr; |
69 | size_t size; |
70 | }; |
71 | |
72 | /* |
73 | * All cluster mode values are not applicable on all SoCs. The following |
74 | * are the modes supported on various SoCs: |
75 | * Split mode : AM65x, J721E, J7200 and AM64x SoCs |
76 | * LockStep mode : AM65x, J721E and J7200 SoCs |
77 | * Single-CPU mode : AM64x SoCs only |
78 | * Single-Core mode : AM62x, AM62A SoCs |
79 | */ |
80 | enum cluster_mode { |
81 | CLUSTER_MODE_SPLIT = 0, |
82 | CLUSTER_MODE_LOCKSTEP, |
83 | CLUSTER_MODE_SINGLECPU, |
84 | CLUSTER_MODE_SINGLECORE |
85 | }; |
86 | |
87 | /** |
88 | * struct k3_r5_soc_data - match data to handle SoC variations |
89 | * @tcm_is_double: flag to denote the larger unified TCMs in certain modes |
90 | * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC |
91 | * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode |
92 | * @is_single_core: flag to denote if SoC/IP has only single core R5 |
93 | */ |
94 | struct k3_r5_soc_data { |
95 | bool tcm_is_double; |
96 | bool tcm_ecc_autoinit; |
97 | bool single_cpu_mode; |
98 | bool is_single_core; |
99 | }; |
100 | |
101 | /** |
102 | * struct k3_r5_cluster - K3 R5F Cluster structure |
103 | * @dev: cached device pointer |
104 | * @mode: Mode to configure the Cluster - Split or LockStep |
105 | * @cores: list of R5 cores within the cluster |
106 | * @soc_data: SoC-specific feature data for a R5FSS |
107 | */ |
108 | struct k3_r5_cluster { |
109 | struct device *dev; |
110 | enum cluster_mode mode; |
111 | struct list_head cores; |
112 | const struct k3_r5_soc_data *soc_data; |
113 | }; |
114 | |
115 | /** |
116 | * struct k3_r5_core - K3 R5 core structure |
117 | * @elem: linked list item |
118 | * @dev: cached device pointer |
119 | * @rproc: rproc handle representing this core |
120 | * @mem: internal memory regions data |
121 | * @sram: on-chip SRAM memory regions data |
122 | * @num_mems: number of internal memory regions |
123 | * @num_sram: number of on-chip SRAM memory regions |
124 | * @reset: reset control handle |
125 | * @tsp: TI-SCI processor control handle |
126 | * @ti_sci: TI-SCI handle |
127 | * @ti_sci_id: TI-SCI device identifier |
128 | * @atcm_enable: flag to control ATCM enablement |
129 | * @btcm_enable: flag to control BTCM enablement |
130 | * @loczrama: flag to dictate which TCM is at device address 0x0 |
131 | */ |
132 | struct k3_r5_core { |
133 | struct list_head elem; |
134 | struct device *dev; |
135 | struct rproc *rproc; |
136 | struct k3_r5_mem *mem; |
137 | struct k3_r5_mem *sram; |
138 | int num_mems; |
139 | int num_sram; |
140 | struct reset_control *reset; |
141 | struct ti_sci_proc *tsp; |
142 | const struct ti_sci_handle *ti_sci; |
143 | u32 ti_sci_id; |
144 | u32 atcm_enable; |
145 | u32 btcm_enable; |
146 | u32 loczrama; |
147 | }; |
148 | |
149 | /** |
150 | * struct k3_r5_rproc - K3 remote processor state |
151 | * @dev: cached device pointer |
152 | * @cluster: cached pointer to parent cluster structure |
153 | * @mbox: mailbox channel handle |
154 | * @client: mailbox client to request the mailbox channel |
155 | * @rproc: rproc handle |
156 | * @core: cached pointer to r5 core structure being used |
157 | * @rmem: reserved memory regions data |
158 | * @num_rmems: number of reserved memory regions |
159 | */ |
160 | struct k3_r5_rproc { |
161 | struct device *dev; |
162 | struct k3_r5_cluster *cluster; |
163 | struct mbox_chan *mbox; |
164 | struct mbox_client client; |
165 | struct rproc *rproc; |
166 | struct k3_r5_core *core; |
167 | struct k3_r5_mem *rmem; |
168 | int num_rmems; |
169 | }; |
170 | |
171 | /** |
172 | * k3_r5_rproc_mbox_callback() - inbound mailbox message handler |
173 | * @client: mailbox client pointer used for requesting the mailbox channel |
174 | * @data: mailbox payload |
175 | * |
176 | * This handler is invoked by the OMAP mailbox driver whenever a mailbox |
177 | * message is received. Usually, the mailbox payload simply contains |
178 | * the index of the virtqueue that is kicked by the remote processor, |
179 | * and we let remoteproc core handle it. |
180 | * |
181 | * In addition to virtqueue indices, we also have some out-of-band values |
182 | * that indicate different events. Those values are deliberately very |
183 | * large so they don't coincide with virtqueue indices. |
184 | */ |
185 | static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data) |
186 | { |
187 | struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc, |
188 | client); |
189 | struct device *dev = kproc->rproc->dev.parent; |
190 | const char *name = kproc->rproc->name; |
191 | u32 msg = omap_mbox_message(data); |
192 | |
193 | dev_dbg(dev, "mbox msg: 0x%x\n" , msg); |
194 | |
195 | switch (msg) { |
196 | case RP_MBOX_CRASH: |
197 | /* |
198 | * remoteproc detected an exception, but error recovery is not |
199 | * supported. So, just log this for now |
200 | */ |
201 | dev_err(dev, "K3 R5F rproc %s crashed\n" , name); |
202 | break; |
203 | case RP_MBOX_ECHO_REPLY: |
204 | dev_info(dev, "received echo reply from %s\n" , name); |
205 | break; |
206 | default: |
207 | /* silently handle all other valid messages */ |
208 | if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) |
209 | return; |
210 | if (msg > kproc->rproc->max_notifyid) { |
211 | dev_dbg(dev, "dropping unknown message 0x%x" , msg); |
212 | return; |
213 | } |
214 | /* msg contains the index of the triggered vring */ |
215 | if (rproc_vq_interrupt(rproc: kproc->rproc, vq_id: msg) == IRQ_NONE) |
216 | dev_dbg(dev, "no message was found in vqid %d\n" , msg); |
217 | } |
218 | } |
219 | |
220 | /* kick a virtqueue */ |
221 | static void k3_r5_rproc_kick(struct rproc *rproc, int vqid) |
222 | { |
223 | struct k3_r5_rproc *kproc = rproc->priv; |
224 | struct device *dev = rproc->dev.parent; |
225 | mbox_msg_t msg = (mbox_msg_t)vqid; |
226 | int ret; |
227 | |
228 | /* send the index of the triggered virtqueue in the mailbox payload */ |
229 | ret = mbox_send_message(chan: kproc->mbox, mssg: (void *)msg); |
230 | if (ret < 0) |
231 | dev_err(dev, "failed to send mailbox message, status = %d\n" , |
232 | ret); |
233 | } |
234 | |
235 | static int k3_r5_split_reset(struct k3_r5_core *core) |
236 | { |
237 | int ret; |
238 | |
239 | ret = reset_control_assert(rstc: core->reset); |
240 | if (ret) { |
241 | dev_err(core->dev, "local-reset assert failed, ret = %d\n" , |
242 | ret); |
243 | return ret; |
244 | } |
245 | |
246 | ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci, |
247 | core->ti_sci_id); |
248 | if (ret) { |
249 | dev_err(core->dev, "module-reset assert failed, ret = %d\n" , |
250 | ret); |
251 | if (reset_control_deassert(rstc: core->reset)) |
252 | dev_warn(core->dev, "local-reset deassert back failed\n" ); |
253 | } |
254 | |
255 | return ret; |
256 | } |
257 | |
258 | static int k3_r5_split_release(struct k3_r5_core *core) |
259 | { |
260 | int ret; |
261 | |
262 | ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci, |
263 | core->ti_sci_id); |
264 | if (ret) { |
265 | dev_err(core->dev, "module-reset deassert failed, ret = %d\n" , |
266 | ret); |
267 | return ret; |
268 | } |
269 | |
270 | ret = reset_control_deassert(rstc: core->reset); |
271 | if (ret) { |
272 | dev_err(core->dev, "local-reset deassert failed, ret = %d\n" , |
273 | ret); |
274 | if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, |
275 | core->ti_sci_id)) |
276 | dev_warn(core->dev, "module-reset assert back failed\n" ); |
277 | } |
278 | |
279 | return ret; |
280 | } |
281 | |
282 | static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster) |
283 | { |
284 | struct k3_r5_core *core; |
285 | int ret; |
286 | |
287 | /* assert local reset on all applicable cores */ |
288 | list_for_each_entry(core, &cluster->cores, elem) { |
289 | ret = reset_control_assert(rstc: core->reset); |
290 | if (ret) { |
291 | dev_err(core->dev, "local-reset assert failed, ret = %d\n" , |
292 | ret); |
293 | core = list_prev_entry(core, elem); |
294 | goto unroll_local_reset; |
295 | } |
296 | } |
297 | |
298 | /* disable PSC modules on all applicable cores */ |
299 | list_for_each_entry(core, &cluster->cores, elem) { |
300 | ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci, |
301 | core->ti_sci_id); |
302 | if (ret) { |
303 | dev_err(core->dev, "module-reset assert failed, ret = %d\n" , |
304 | ret); |
305 | goto unroll_module_reset; |
306 | } |
307 | } |
308 | |
309 | return 0; |
310 | |
311 | unroll_module_reset: |
312 | list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { |
313 | if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, |
314 | core->ti_sci_id)) |
315 | dev_warn(core->dev, "module-reset assert back failed\n" ); |
316 | } |
317 | core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); |
318 | unroll_local_reset: |
319 | list_for_each_entry_from_reverse(core, &cluster->cores, elem) { |
320 | if (reset_control_deassert(rstc: core->reset)) |
321 | dev_warn(core->dev, "local-reset deassert back failed\n" ); |
322 | } |
323 | |
324 | return ret; |
325 | } |
326 | |
327 | static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster) |
328 | { |
329 | struct k3_r5_core *core; |
330 | int ret; |
331 | |
332 | /* enable PSC modules on all applicable cores */ |
333 | list_for_each_entry_reverse(core, &cluster->cores, elem) { |
334 | ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci, |
335 | core->ti_sci_id); |
336 | if (ret) { |
337 | dev_err(core->dev, "module-reset deassert failed, ret = %d\n" , |
338 | ret); |
339 | core = list_next_entry(core, elem); |
340 | goto unroll_module_reset; |
341 | } |
342 | } |
343 | |
344 | /* deassert local reset on all applicable cores */ |
345 | list_for_each_entry_reverse(core, &cluster->cores, elem) { |
346 | ret = reset_control_deassert(rstc: core->reset); |
347 | if (ret) { |
348 | dev_err(core->dev, "module-reset deassert failed, ret = %d\n" , |
349 | ret); |
350 | goto unroll_local_reset; |
351 | } |
352 | } |
353 | |
354 | return 0; |
355 | |
356 | unroll_local_reset: |
357 | list_for_each_entry_continue(core, &cluster->cores, elem) { |
358 | if (reset_control_assert(rstc: core->reset)) |
359 | dev_warn(core->dev, "local-reset assert back failed\n" ); |
360 | } |
361 | core = list_first_entry(&cluster->cores, struct k3_r5_core, elem); |
362 | unroll_module_reset: |
363 | list_for_each_entry_from(core, &cluster->cores, elem) { |
364 | if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, |
365 | core->ti_sci_id)) |
366 | dev_warn(core->dev, "module-reset assert back failed\n" ); |
367 | } |
368 | |
369 | return ret; |
370 | } |
371 | |
372 | static inline int k3_r5_core_halt(struct k3_r5_core *core) |
373 | { |
374 | return ti_sci_proc_set_control(tsp: core->tsp, |
375 | PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, ctrl_clr: 0); |
376 | } |
377 | |
378 | static inline int k3_r5_core_run(struct k3_r5_core *core) |
379 | { |
380 | return ti_sci_proc_set_control(tsp: core->tsp, |
381 | ctrl_set: 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT); |
382 | } |
383 | |
384 | static int k3_r5_rproc_request_mbox(struct rproc *rproc) |
385 | { |
386 | struct k3_r5_rproc *kproc = rproc->priv; |
387 | struct mbox_client *client = &kproc->client; |
388 | struct device *dev = kproc->dev; |
389 | int ret; |
390 | |
391 | client->dev = dev; |
392 | client->tx_done = NULL; |
393 | client->rx_callback = k3_r5_rproc_mbox_callback; |
394 | client->tx_block = false; |
395 | client->knows_txdone = false; |
396 | |
397 | kproc->mbox = mbox_request_channel(cl: client, index: 0); |
398 | if (IS_ERR(ptr: kproc->mbox)) { |
399 | ret = -EBUSY; |
400 | dev_err(dev, "mbox_request_channel failed: %ld\n" , |
401 | PTR_ERR(kproc->mbox)); |
402 | return ret; |
403 | } |
404 | |
405 | /* |
406 | * Ping the remote processor, this is only for sanity-sake for now; |
407 | * there is no functional effect whatsoever. |
408 | * |
409 | * Note that the reply will _not_ arrive immediately: this message |
410 | * will wait in the mailbox fifo until the remote processor is booted. |
411 | */ |
412 | ret = mbox_send_message(chan: kproc->mbox, mssg: (void *)RP_MBOX_ECHO_REQUEST); |
413 | if (ret < 0) { |
414 | dev_err(dev, "mbox_send_message failed: %d\n" , ret); |
415 | mbox_free_channel(chan: kproc->mbox); |
416 | return ret; |
417 | } |
418 | |
419 | return 0; |
420 | } |
421 | |
422 | /* |
423 | * The R5F cores have controls for both a reset and a halt/run. The code |
424 | * execution from DDR requires the initial boot-strapping code to be run |
425 | * from the internal TCMs. This function is used to release the resets on |
426 | * applicable cores to allow loading into the TCMs. The .prepare() ops is |
427 | * invoked by remoteproc core before any firmware loading, and is followed |
428 | * by the .start() ops after loading to actually let the R5 cores run. |
429 | * |
430 | * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to |
431 | * execute code, but combines the TCMs from both cores. The resets for both |
432 | * cores need to be released to make this possible, as the TCMs are in general |
433 | * private to each core. Only Core0 needs to be unhalted for running the |
434 | * cluster in this mode. The function uses the same reset logic as LockStep |
435 | * mode for this (though the behavior is agnostic of the reset release order). |
436 | * This callback is invoked only in remoteproc mode. |
437 | */ |
438 | static int k3_r5_rproc_prepare(struct rproc *rproc) |
439 | { |
440 | struct k3_r5_rproc *kproc = rproc->priv; |
441 | struct k3_r5_cluster *cluster = kproc->cluster; |
442 | struct k3_r5_core *core = kproc->core; |
443 | struct device *dev = kproc->dev; |
444 | u32 ctrl = 0, cfg = 0, stat = 0; |
445 | u64 boot_vec = 0; |
446 | bool mem_init_dis; |
447 | int ret; |
448 | |
449 | ret = ti_sci_proc_get_status(tsp: core->tsp, boot_vector: &boot_vec, cfg_flags: &cfg, ctrl_flags: &ctrl, status_flags: &stat); |
450 | if (ret < 0) |
451 | return ret; |
452 | mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS); |
453 | |
454 | /* Re-use LockStep-mode reset logic for Single-CPU mode */ |
455 | ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || |
456 | cluster->mode == CLUSTER_MODE_SINGLECPU) ? |
457 | k3_r5_lockstep_release(cluster) : k3_r5_split_release(core); |
458 | if (ret) { |
459 | dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n" , |
460 | ret); |
461 | return ret; |
462 | } |
463 | |
464 | /* |
465 | * Newer IP revisions like on J7200 SoCs support h/w auto-initialization |
466 | * of TCMs, so there is no need to perform the s/w memzero. This bit is |
467 | * configurable through System Firmware, the default value does perform |
468 | * auto-init, but account for it in case it is disabled |
469 | */ |
470 | if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) { |
471 | dev_dbg(dev, "leveraging h/w init for TCM memories\n" ); |
472 | return 0; |
473 | } |
474 | |
475 | /* |
476 | * Zero out both TCMs unconditionally (access from v8 Arm core is not |
477 | * affected by ATCM & BTCM enable configuration values) so that ECC |
478 | * can be effective on all TCM addresses. |
479 | */ |
480 | dev_dbg(dev, "zeroing out ATCM memory\n" ); |
481 | memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size); |
482 | |
483 | dev_dbg(dev, "zeroing out BTCM memory\n" ); |
484 | memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size); |
485 | |
486 | return 0; |
487 | } |
488 | |
489 | /* |
490 | * This function implements the .unprepare() ops and performs the complimentary |
491 | * operations to that of the .prepare() ops. The function is used to assert the |
492 | * resets on all applicable cores for the rproc device (depending on LockStep |
493 | * or Split mode). This completes the second portion of powering down the R5F |
494 | * cores. The cores themselves are only halted in the .stop() ops, and the |
495 | * .unprepare() ops is invoked by the remoteproc core after the remoteproc is |
496 | * stopped. |
497 | * |
498 | * The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from |
499 | * both cores. The access is made possible only with releasing the resets for |
500 | * both cores, but with only Core0 unhalted. This function re-uses the same |
501 | * reset assert logic as LockStep mode for this mode (though the behavior is |
502 | * agnostic of the reset assert order). This callback is invoked only in |
503 | * remoteproc mode. |
504 | */ |
505 | static int k3_r5_rproc_unprepare(struct rproc *rproc) |
506 | { |
507 | struct k3_r5_rproc *kproc = rproc->priv; |
508 | struct k3_r5_cluster *cluster = kproc->cluster; |
509 | struct k3_r5_core *core = kproc->core; |
510 | struct device *dev = kproc->dev; |
511 | int ret; |
512 | |
513 | /* Re-use LockStep-mode reset logic for Single-CPU mode */ |
514 | ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || |
515 | cluster->mode == CLUSTER_MODE_SINGLECPU) ? |
516 | k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core); |
517 | if (ret) |
518 | dev_err(dev, "unable to disable cores, ret = %d\n" , ret); |
519 | |
520 | return ret; |
521 | } |
522 | |
523 | /* |
524 | * The R5F start sequence includes two different operations |
525 | * 1. Configure the boot vector for R5F core(s) |
526 | * 2. Unhalt/Run the R5F core(s) |
527 | * |
528 | * The sequence is different between LockStep and Split modes. The LockStep |
529 | * mode requires the boot vector to be configured only for Core0, and then |
530 | * unhalt both the cores to start the execution - Core1 needs to be unhalted |
531 | * first followed by Core0. The Split-mode requires that Core0 to be maintained |
532 | * always in a higher power state that Core1 (implying Core1 needs to be started |
533 | * always only after Core0 is started). |
534 | * |
535 | * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute |
536 | * code, so only Core0 needs to be unhalted. The function uses the same logic |
537 | * flow as Split-mode for this. This callback is invoked only in remoteproc |
538 | * mode. |
539 | */ |
540 | static int k3_r5_rproc_start(struct rproc *rproc) |
541 | { |
542 | struct k3_r5_rproc *kproc = rproc->priv; |
543 | struct k3_r5_cluster *cluster = kproc->cluster; |
544 | struct device *dev = kproc->dev; |
545 | struct k3_r5_core *core; |
546 | u32 boot_addr; |
547 | int ret; |
548 | |
549 | ret = k3_r5_rproc_request_mbox(rproc); |
550 | if (ret) |
551 | return ret; |
552 | |
553 | boot_addr = rproc->bootaddr; |
554 | /* TODO: add boot_addr sanity checking */ |
555 | dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n" , boot_addr); |
556 | |
557 | /* boot vector need not be programmed for Core1 in LockStep mode */ |
558 | core = kproc->core; |
559 | ret = ti_sci_proc_set_config(tsp: core->tsp, boot_vector: boot_addr, cfg_set: 0, cfg_clr: 0); |
560 | if (ret) |
561 | goto put_mbox; |
562 | |
563 | /* unhalt/run all applicable cores */ |
564 | if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { |
565 | list_for_each_entry_reverse(core, &cluster->cores, elem) { |
566 | ret = k3_r5_core_run(core); |
567 | if (ret) |
568 | goto unroll_core_run; |
569 | } |
570 | } else { |
571 | ret = k3_r5_core_run(core); |
572 | if (ret) |
573 | goto put_mbox; |
574 | } |
575 | |
576 | return 0; |
577 | |
578 | unroll_core_run: |
579 | list_for_each_entry_continue(core, &cluster->cores, elem) { |
580 | if (k3_r5_core_halt(core)) |
581 | dev_warn(core->dev, "core halt back failed\n" ); |
582 | } |
583 | put_mbox: |
584 | mbox_free_channel(chan: kproc->mbox); |
585 | return ret; |
586 | } |
587 | |
588 | /* |
589 | * The R5F stop function includes the following operations |
590 | * 1. Halt R5F core(s) |
591 | * |
592 | * The sequence is different between LockStep and Split modes, and the order |
593 | * of cores the operations are performed are also in general reverse to that |
594 | * of the start function. The LockStep mode requires each operation to be |
595 | * performed first on Core0 followed by Core1. The Split-mode requires that |
596 | * Core0 to be maintained always in a higher power state that Core1 (implying |
597 | * Core1 needs to be stopped first before Core0). |
598 | * |
599 | * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute |
600 | * code, so only Core0 needs to be halted. The function uses the same logic |
601 | * flow as Split-mode for this. |
602 | * |
603 | * Note that the R5F halt operation in general is not effective when the R5F |
604 | * core is running, but is needed to make sure the core won't run after |
605 | * deasserting the reset the subsequent time. The asserting of reset can |
606 | * be done here, but is preferred to be done in the .unprepare() ops - this |
607 | * maintains the symmetric behavior between the .start(), .stop(), .prepare() |
608 | * and .unprepare() ops, and also balances them well between sysfs 'state' |
609 | * flow and device bind/unbind or module removal. This callback is invoked |
610 | * only in remoteproc mode. |
611 | */ |
612 | static int k3_r5_rproc_stop(struct rproc *rproc) |
613 | { |
614 | struct k3_r5_rproc *kproc = rproc->priv; |
615 | struct k3_r5_cluster *cluster = kproc->cluster; |
616 | struct k3_r5_core *core = kproc->core; |
617 | int ret; |
618 | |
619 | /* halt all applicable cores */ |
620 | if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { |
621 | list_for_each_entry(core, &cluster->cores, elem) { |
622 | ret = k3_r5_core_halt(core); |
623 | if (ret) { |
624 | core = list_prev_entry(core, elem); |
625 | goto unroll_core_halt; |
626 | } |
627 | } |
628 | } else { |
629 | ret = k3_r5_core_halt(core); |
630 | if (ret) |
631 | goto out; |
632 | } |
633 | |
634 | mbox_free_channel(chan: kproc->mbox); |
635 | |
636 | return 0; |
637 | |
638 | unroll_core_halt: |
639 | list_for_each_entry_from_reverse(core, &cluster->cores, elem) { |
640 | if (k3_r5_core_run(core)) |
641 | dev_warn(core->dev, "core run back failed\n" ); |
642 | } |
643 | out: |
644 | return ret; |
645 | } |
646 | |
647 | /* |
648 | * Attach to a running R5F remote processor (IPC-only mode) |
649 | * |
650 | * The R5F attach callback only needs to request the mailbox, the remote |
651 | * processor is already booted, so there is no need to issue any TI-SCI |
652 | * commands to boot the R5F cores in IPC-only mode. This callback is invoked |
653 | * only in IPC-only mode. |
654 | */ |
655 | static int k3_r5_rproc_attach(struct rproc *rproc) |
656 | { |
657 | struct k3_r5_rproc *kproc = rproc->priv; |
658 | struct device *dev = kproc->dev; |
659 | int ret; |
660 | |
661 | ret = k3_r5_rproc_request_mbox(rproc); |
662 | if (ret) |
663 | return ret; |
664 | |
665 | dev_info(dev, "R5F core initialized in IPC-only mode\n" ); |
666 | return 0; |
667 | } |
668 | |
669 | /* |
670 | * Detach from a running R5F remote processor (IPC-only mode) |
671 | * |
672 | * The R5F detach callback performs the opposite operation to attach callback |
673 | * and only needs to release the mailbox, the R5F cores are not stopped and |
674 | * will be left in booted state in IPC-only mode. This callback is invoked |
675 | * only in IPC-only mode. |
676 | */ |
677 | static int k3_r5_rproc_detach(struct rproc *rproc) |
678 | { |
679 | struct k3_r5_rproc *kproc = rproc->priv; |
680 | struct device *dev = kproc->dev; |
681 | |
682 | mbox_free_channel(chan: kproc->mbox); |
683 | dev_info(dev, "R5F core deinitialized in IPC-only mode\n" ); |
684 | return 0; |
685 | } |
686 | |
687 | /* |
688 | * This function implements the .get_loaded_rsc_table() callback and is used |
689 | * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F |
690 | * firmwares follow a design-by-contract approach and are expected to have the |
691 | * resource table at the base of the DDR region reserved for firmware usage. |
692 | * This provides flexibility for the remote processor to be booted by different |
693 | * bootloaders that may or may not have the ability to publish the resource table |
694 | * address and size through a DT property. This callback is invoked only in |
695 | * IPC-only mode. |
696 | */ |
697 | static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc, |
698 | size_t *rsc_table_sz) |
699 | { |
700 | struct k3_r5_rproc *kproc = rproc->priv; |
701 | struct device *dev = kproc->dev; |
702 | |
703 | if (!kproc->rmem[0].cpu_addr) { |
704 | dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found" ); |
705 | return ERR_PTR(error: -ENOMEM); |
706 | } |
707 | |
708 | /* |
709 | * NOTE: The resource table size is currently hard-coded to a maximum |
710 | * of 256 bytes. The most common resource table usage for K3 firmwares |
711 | * is to only have the vdev resource entry and an optional trace entry. |
712 | * The exact size could be computed based on resource table address, but |
713 | * the hard-coded value suffices to support the IPC-only mode. |
714 | */ |
715 | *rsc_table_sz = 256; |
716 | return (struct resource_table *)kproc->rmem[0].cpu_addr; |
717 | } |
718 | |
719 | /* |
720 | * Internal Memory translation helper |
721 | * |
722 | * Custom function implementing the rproc .da_to_va ops to provide address |
723 | * translation (device address to kernel virtual address) for internal RAMs |
724 | * present in a DSP or IPU device). The translated addresses can be used |
725 | * either by the remoteproc core for loading, or by any rpmsg bus drivers. |
726 | */ |
727 | static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) |
728 | { |
729 | struct k3_r5_rproc *kproc = rproc->priv; |
730 | struct k3_r5_core *core = kproc->core; |
731 | void __iomem *va = NULL; |
732 | phys_addr_t bus_addr; |
733 | u32 dev_addr, offset; |
734 | size_t size; |
735 | int i; |
736 | |
737 | if (len == 0) |
738 | return NULL; |
739 | |
740 | /* handle both R5 and SoC views of ATCM and BTCM */ |
741 | for (i = 0; i < core->num_mems; i++) { |
742 | bus_addr = core->mem[i].bus_addr; |
743 | dev_addr = core->mem[i].dev_addr; |
744 | size = core->mem[i].size; |
745 | |
746 | /* handle R5-view addresses of TCMs */ |
747 | if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { |
748 | offset = da - dev_addr; |
749 | va = core->mem[i].cpu_addr + offset; |
750 | return (__force void *)va; |
751 | } |
752 | |
753 | /* handle SoC-view addresses of TCMs */ |
754 | if (da >= bus_addr && ((da + len) <= (bus_addr + size))) { |
755 | offset = da - bus_addr; |
756 | va = core->mem[i].cpu_addr + offset; |
757 | return (__force void *)va; |
758 | } |
759 | } |
760 | |
761 | /* handle any SRAM regions using SoC-view addresses */ |
762 | for (i = 0; i < core->num_sram; i++) { |
763 | dev_addr = core->sram[i].dev_addr; |
764 | size = core->sram[i].size; |
765 | |
766 | if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { |
767 | offset = da - dev_addr; |
768 | va = core->sram[i].cpu_addr + offset; |
769 | return (__force void *)va; |
770 | } |
771 | } |
772 | |
773 | /* handle static DDR reserved memory regions */ |
774 | for (i = 0; i < kproc->num_rmems; i++) { |
775 | dev_addr = kproc->rmem[i].dev_addr; |
776 | size = kproc->rmem[i].size; |
777 | |
778 | if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { |
779 | offset = da - dev_addr; |
780 | va = kproc->rmem[i].cpu_addr + offset; |
781 | return (__force void *)va; |
782 | } |
783 | } |
784 | |
785 | return NULL; |
786 | } |
787 | |
788 | static const struct rproc_ops k3_r5_rproc_ops = { |
789 | .prepare = k3_r5_rproc_prepare, |
790 | .unprepare = k3_r5_rproc_unprepare, |
791 | .start = k3_r5_rproc_start, |
792 | .stop = k3_r5_rproc_stop, |
793 | .kick = k3_r5_rproc_kick, |
794 | .da_to_va = k3_r5_rproc_da_to_va, |
795 | }; |
796 | |
797 | /* |
798 | * Internal R5F Core configuration |
799 | * |
800 | * Each R5FSS has a cluster-level setting for configuring the processor |
801 | * subsystem either in a safety/fault-tolerant LockStep mode or a performance |
802 | * oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode |
803 | * as an alternate for LockStep mode that exercises only a single R5F core |
804 | * called Single-CPU mode. Each R5F core has a number of settings to either |
805 | * enable/disable each of the TCMs, control which TCM appears at the R5F core's |
806 | * address 0x0. These settings need to be configured before the resets for the |
807 | * corresponding core are released. These settings are all protected and managed |
808 | * by the System Processor. |
809 | * |
810 | * This function is used to pre-configure these settings for each R5F core, and |
811 | * the configuration is all done through various ti_sci_proc functions that |
812 | * communicate with the System Processor. The function also ensures that both |
813 | * the cores are halted before the .prepare() step. |
814 | * |
815 | * The function is called from k3_r5_cluster_rproc_init() and is invoked either |
816 | * once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support |
817 | * for LockStep-mode is dictated by an eFUSE register bit, and the config |
818 | * settings retrieved from DT are adjusted accordingly as per the permitted |
819 | * cluster mode. Another eFUSE register bit dictates if the R5F cluster only |
820 | * supports a Single-CPU mode. All cluster level settings like Cluster mode and |
821 | * TEINIT (exception handling state dictating ARM or Thumb mode) can only be set |
822 | * and retrieved using Core0. |
823 | * |
824 | * The function behavior is different based on the cluster mode. The R5F cores |
825 | * are configured independently as per their individual settings in Split mode. |
826 | * They are identically configured in LockStep mode using the primary Core0 |
827 | * settings. However, some individual settings cannot be set in LockStep mode. |
828 | * This is overcome by switching to Split-mode initially and then programming |
829 | * both the cores with the same settings, before reconfiguing again for |
830 | * LockStep mode. |
831 | */ |
832 | static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) |
833 | { |
834 | struct k3_r5_cluster *cluster = kproc->cluster; |
835 | struct device *dev = kproc->dev; |
836 | struct k3_r5_core *core0, *core, *temp; |
837 | u32 ctrl = 0, cfg = 0, stat = 0; |
838 | u32 set_cfg = 0, clr_cfg = 0; |
839 | u64 boot_vec = 0; |
840 | bool lockstep_en; |
841 | bool single_cpu; |
842 | int ret; |
843 | |
844 | core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); |
845 | if (cluster->mode == CLUSTER_MODE_LOCKSTEP || |
846 | cluster->mode == CLUSTER_MODE_SINGLECPU || |
847 | cluster->mode == CLUSTER_MODE_SINGLECORE) { |
848 | core = core0; |
849 | } else { |
850 | core = kproc->core; |
851 | } |
852 | |
853 | ret = ti_sci_proc_get_status(tsp: core->tsp, boot_vector: &boot_vec, cfg_flags: &cfg, ctrl_flags: &ctrl, |
854 | status_flags: &stat); |
855 | if (ret < 0) |
856 | return ret; |
857 | |
858 | dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n" , |
859 | boot_vec, cfg, ctrl, stat); |
860 | |
861 | single_cpu = !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY); |
862 | lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED); |
863 | |
864 | /* Override to single CPU mode if set in status flag */ |
865 | if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) { |
866 | dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n" ); |
867 | cluster->mode = CLUSTER_MODE_SINGLECPU; |
868 | } |
869 | |
870 | /* Override to split mode if lockstep enable bit is not set in status flag */ |
871 | if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) { |
872 | dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n" ); |
873 | cluster->mode = CLUSTER_MODE_SPLIT; |
874 | } |
875 | |
876 | /* always enable ARM mode and set boot vector to 0 */ |
877 | boot_vec = 0x0; |
878 | if (core == core0) { |
879 | clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT; |
880 | /* |
881 | * Single-CPU configuration bit can only be configured |
882 | * on Core0 and system firmware will NACK any requests |
883 | * with the bit configured, so program it only on |
884 | * permitted cores |
885 | */ |
886 | if (cluster->mode == CLUSTER_MODE_SINGLECPU || |
887 | cluster->mode == CLUSTER_MODE_SINGLECORE) { |
888 | set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE; |
889 | } else { |
890 | /* |
891 | * LockStep configuration bit is Read-only on Split-mode |
892 | * _only_ devices and system firmware will NACK any |
893 | * requests with the bit configured, so program it only |
894 | * on permitted devices |
895 | */ |
896 | if (lockstep_en) |
897 | clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; |
898 | } |
899 | } |
900 | |
901 | if (core->atcm_enable) |
902 | set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN; |
903 | else |
904 | clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN; |
905 | |
906 | if (core->btcm_enable) |
907 | set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN; |
908 | else |
909 | clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN; |
910 | |
911 | if (core->loczrama) |
912 | set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE; |
913 | else |
914 | clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE; |
915 | |
916 | if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { |
917 | /* |
918 | * work around system firmware limitations to make sure both |
919 | * cores are programmed symmetrically in LockStep. LockStep |
920 | * and TEINIT config is only allowed with Core0. |
921 | */ |
922 | list_for_each_entry(temp, &cluster->cores, elem) { |
923 | ret = k3_r5_core_halt(core: temp); |
924 | if (ret) |
925 | goto out; |
926 | |
927 | if (temp != core) { |
928 | clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; |
929 | clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT; |
930 | } |
931 | ret = ti_sci_proc_set_config(tsp: temp->tsp, boot_vector: boot_vec, |
932 | cfg_set: set_cfg, cfg_clr: clr_cfg); |
933 | if (ret) |
934 | goto out; |
935 | } |
936 | |
937 | set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; |
938 | clr_cfg = 0; |
939 | ret = ti_sci_proc_set_config(tsp: core->tsp, boot_vector: boot_vec, |
940 | cfg_set: set_cfg, cfg_clr: clr_cfg); |
941 | } else { |
942 | ret = k3_r5_core_halt(core); |
943 | if (ret) |
944 | goto out; |
945 | |
946 | ret = ti_sci_proc_set_config(tsp: core->tsp, boot_vector: boot_vec, |
947 | cfg_set: set_cfg, cfg_clr: clr_cfg); |
948 | } |
949 | |
950 | out: |
951 | return ret; |
952 | } |
953 | |
954 | static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) |
955 | { |
956 | struct device *dev = kproc->dev; |
957 | struct device_node *np = dev_of_node(dev); |
958 | struct device_node *rmem_np; |
959 | struct reserved_mem *rmem; |
960 | int num_rmems; |
961 | int ret, i; |
962 | |
963 | num_rmems = of_property_count_elems_of_size(np, propname: "memory-region" , |
964 | elem_size: sizeof(phandle)); |
965 | if (num_rmems <= 0) { |
966 | dev_err(dev, "device does not have reserved memory regions, ret = %d\n" , |
967 | num_rmems); |
968 | return -EINVAL; |
969 | } |
970 | if (num_rmems < 2) { |
971 | dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n" , |
972 | num_rmems); |
973 | return -EINVAL; |
974 | } |
975 | |
976 | /* use reserved memory region 0 for vring DMA allocations */ |
977 | ret = of_reserved_mem_device_init_by_idx(dev, np, idx: 0); |
978 | if (ret) { |
979 | dev_err(dev, "device cannot initialize DMA pool, ret = %d\n" , |
980 | ret); |
981 | return ret; |
982 | } |
983 | |
984 | num_rmems--; |
985 | kproc->rmem = kcalloc(n: num_rmems, size: sizeof(*kproc->rmem), GFP_KERNEL); |
986 | if (!kproc->rmem) { |
987 | ret = -ENOMEM; |
988 | goto release_rmem; |
989 | } |
990 | |
991 | /* use remaining reserved memory regions for static carveouts */ |
992 | for (i = 0; i < num_rmems; i++) { |
993 | rmem_np = of_parse_phandle(np, phandle_name: "memory-region" , index: i + 1); |
994 | if (!rmem_np) { |
995 | ret = -EINVAL; |
996 | goto unmap_rmem; |
997 | } |
998 | |
999 | rmem = of_reserved_mem_lookup(np: rmem_np); |
1000 | if (!rmem) { |
1001 | of_node_put(node: rmem_np); |
1002 | ret = -EINVAL; |
1003 | goto unmap_rmem; |
1004 | } |
1005 | of_node_put(node: rmem_np); |
1006 | |
1007 | kproc->rmem[i].bus_addr = rmem->base; |
1008 | /* |
1009 | * R5Fs do not have an MMU, but have a Region Address Translator |
1010 | * (RAT) module that provides a fixed entry translation between |
1011 | * the 32-bit processor addresses to 64-bit bus addresses. The |
1012 | * RAT is programmable only by the R5F cores. Support for RAT |
1013 | * is currently not supported, so 64-bit address regions are not |
1014 | * supported. The absence of MMUs implies that the R5F device |
1015 | * addresses/supported memory regions are restricted to 32-bit |
1016 | * bus addresses, and are identical |
1017 | */ |
1018 | kproc->rmem[i].dev_addr = (u32)rmem->base; |
1019 | kproc->rmem[i].size = rmem->size; |
1020 | kproc->rmem[i].cpu_addr = ioremap_wc(offset: rmem->base, size: rmem->size); |
1021 | if (!kproc->rmem[i].cpu_addr) { |
1022 | dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n" , |
1023 | i + 1, &rmem->base, &rmem->size); |
1024 | ret = -ENOMEM; |
1025 | goto unmap_rmem; |
1026 | } |
1027 | |
1028 | dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n" , |
1029 | i + 1, &kproc->rmem[i].bus_addr, |
1030 | kproc->rmem[i].size, kproc->rmem[i].cpu_addr, |
1031 | kproc->rmem[i].dev_addr); |
1032 | } |
1033 | kproc->num_rmems = num_rmems; |
1034 | |
1035 | return 0; |
1036 | |
1037 | unmap_rmem: |
1038 | for (i--; i >= 0; i--) |
1039 | iounmap(addr: kproc->rmem[i].cpu_addr); |
1040 | kfree(objp: kproc->rmem); |
1041 | release_rmem: |
1042 | of_reserved_mem_device_release(dev); |
1043 | return ret; |
1044 | } |
1045 | |
1046 | static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc) |
1047 | { |
1048 | int i; |
1049 | |
1050 | for (i = 0; i < kproc->num_rmems; i++) |
1051 | iounmap(addr: kproc->rmem[i].cpu_addr); |
1052 | kfree(objp: kproc->rmem); |
1053 | |
1054 | of_reserved_mem_device_release(dev: kproc->dev); |
1055 | } |
1056 | |
1057 | /* |
1058 | * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs, |
1059 | * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both |
1060 | * cores are usable in Split-mode, but only the Core0 TCMs can be used in |
1061 | * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by |
1062 | * leveraging the Core1 TCMs as well in certain modes where they would have |
1063 | * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on |
1064 | * AM64x SoCs). This is done by making a Core1 TCM visible immediately after the |
1065 | * corresponding Core0 TCM. The SoC memory map uses the larger 64 KB sizes for |
1066 | * the Core0 TCMs, and the dts representation reflects this increased size on |
1067 | * supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only |
1068 | * half the original size in Split mode. |
1069 | */ |
1070 | static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc) |
1071 | { |
1072 | struct k3_r5_cluster *cluster = kproc->cluster; |
1073 | struct k3_r5_core *core = kproc->core; |
1074 | struct device *cdev = core->dev; |
1075 | struct k3_r5_core *core0; |
1076 | |
1077 | if (cluster->mode == CLUSTER_MODE_LOCKSTEP || |
1078 | cluster->mode == CLUSTER_MODE_SINGLECPU || |
1079 | cluster->mode == CLUSTER_MODE_SINGLECORE || |
1080 | !cluster->soc_data->tcm_is_double) |
1081 | return; |
1082 | |
1083 | core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); |
1084 | if (core == core0) { |
1085 | WARN_ON(core->mem[0].size != SZ_64K); |
1086 | WARN_ON(core->mem[1].size != SZ_64K); |
1087 | |
1088 | core->mem[0].size /= 2; |
1089 | core->mem[1].size /= 2; |
1090 | |
1091 | dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n" , |
1092 | core->mem[0].size, core->mem[1].size); |
1093 | } |
1094 | } |
1095 | |
1096 | /* |
1097 | * This function checks and configures a R5F core for IPC-only or remoteproc |
1098 | * mode. The driver is configured to be in IPC-only mode for a R5F core when |
1099 | * the core has been loaded and started by a bootloader. The IPC-only mode is |
1100 | * detected by querying the System Firmware for reset, power on and halt status |
1101 | * and ensuring that the core is running. Any incomplete steps at bootloader |
1102 | * are validated and errored out. |
1103 | * |
1104 | * In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings |
1105 | * and cluster mode parsed originally from kernel DT are updated to reflect the |
1106 | * actual values configured by bootloader. The driver internal device memory |
1107 | * addresses for TCMs are also updated. |
1108 | */ |
1109 | static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) |
1110 | { |
1111 | struct k3_r5_cluster *cluster = kproc->cluster; |
1112 | struct k3_r5_core *core = kproc->core; |
1113 | struct device *cdev = core->dev; |
1114 | bool r_state = false, c_state = false, lockstep_en = false, single_cpu = false; |
1115 | u32 ctrl = 0, cfg = 0, stat = 0, halted = 0; |
1116 | u64 boot_vec = 0; |
1117 | u32 atcm_enable, btcm_enable, loczrama; |
1118 | struct k3_r5_core *core0; |
1119 | enum cluster_mode mode = cluster->mode; |
1120 | int ret; |
1121 | |
1122 | core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); |
1123 | |
1124 | ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id, |
1125 | &r_state, &c_state); |
1126 | if (ret) { |
1127 | dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n" , |
1128 | ret); |
1129 | return ret; |
1130 | } |
1131 | if (r_state != c_state) { |
1132 | dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n" , |
1133 | r_state, c_state); |
1134 | } |
1135 | |
1136 | ret = reset_control_status(rstc: core->reset); |
1137 | if (ret < 0) { |
1138 | dev_err(cdev, "failed to get initial local reset status, ret = %d\n" , |
1139 | ret); |
1140 | return ret; |
1141 | } |
1142 | |
1143 | ret = ti_sci_proc_get_status(tsp: core->tsp, boot_vector: &boot_vec, cfg_flags: &cfg, ctrl_flags: &ctrl, |
1144 | status_flags: &stat); |
1145 | if (ret < 0) { |
1146 | dev_err(cdev, "failed to get initial processor status, ret = %d\n" , |
1147 | ret); |
1148 | return ret; |
1149 | } |
1150 | atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ? 1 : 0; |
1151 | btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ? 1 : 0; |
1152 | loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ? 1 : 0; |
1153 | single_cpu = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ? 1 : 0; |
1154 | lockstep_en = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ? 1 : 0; |
1155 | |
1156 | if (single_cpu && mode != CLUSTER_MODE_SINGLECORE) |
1157 | mode = CLUSTER_MODE_SINGLECPU; |
1158 | if (lockstep_en) |
1159 | mode = CLUSTER_MODE_LOCKSTEP; |
1160 | |
1161 | halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT; |
1162 | |
1163 | /* |
1164 | * IPC-only mode detection requires both local and module resets to |
1165 | * be deasserted and R5F core to be unhalted. Local reset status is |
1166 | * irrelevant if module reset is asserted (POR value has local reset |
1167 | * deasserted), and is deemed as remoteproc mode |
1168 | */ |
1169 | if (c_state && !ret && !halted) { |
1170 | dev_info(cdev, "configured R5F for IPC-only mode\n" ); |
1171 | kproc->rproc->state = RPROC_DETACHED; |
1172 | ret = 1; |
1173 | /* override rproc ops with only required IPC-only mode ops */ |
1174 | kproc->rproc->ops->prepare = NULL; |
1175 | kproc->rproc->ops->unprepare = NULL; |
1176 | kproc->rproc->ops->start = NULL; |
1177 | kproc->rproc->ops->stop = NULL; |
1178 | kproc->rproc->ops->attach = k3_r5_rproc_attach; |
1179 | kproc->rproc->ops->detach = k3_r5_rproc_detach; |
1180 | kproc->rproc->ops->get_loaded_rsc_table = |
1181 | k3_r5_get_loaded_rsc_table; |
1182 | } else if (!c_state) { |
1183 | dev_info(cdev, "configured R5F for remoteproc mode\n" ); |
1184 | ret = 0; |
1185 | } else { |
1186 | dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n" , |
1187 | !ret ? "deasserted" : "asserted" , |
1188 | c_state ? "deasserted" : "asserted" , |
1189 | halted ? "halted" : "unhalted" ); |
1190 | ret = -EINVAL; |
1191 | } |
1192 | |
1193 | /* fixup TCMs, cluster & core flags to actual values in IPC-only mode */ |
1194 | if (ret > 0) { |
1195 | if (core == core0) |
1196 | cluster->mode = mode; |
1197 | core->atcm_enable = atcm_enable; |
1198 | core->btcm_enable = btcm_enable; |
1199 | core->loczrama = loczrama; |
1200 | core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR; |
1201 | core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0; |
1202 | } |
1203 | |
1204 | return ret; |
1205 | } |
1206 | |
1207 | static int k3_r5_cluster_rproc_init(struct platform_device *pdev) |
1208 | { |
1209 | struct k3_r5_cluster *cluster = platform_get_drvdata(pdev); |
1210 | struct device *dev = &pdev->dev; |
1211 | struct k3_r5_rproc *kproc; |
1212 | struct k3_r5_core *core, *core1; |
1213 | struct device *cdev; |
1214 | const char *fw_name; |
1215 | struct rproc *rproc; |
1216 | int ret, ret1; |
1217 | |
1218 | core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); |
1219 | list_for_each_entry(core, &cluster->cores, elem) { |
1220 | cdev = core->dev; |
1221 | ret = rproc_of_parse_firmware(dev: cdev, index: 0, fw_name: &fw_name); |
1222 | if (ret) { |
1223 | dev_err(dev, "failed to parse firmware-name property, ret = %d\n" , |
1224 | ret); |
1225 | goto out; |
1226 | } |
1227 | |
1228 | rproc = rproc_alloc(dev: cdev, name: dev_name(dev: cdev), ops: &k3_r5_rproc_ops, |
1229 | firmware: fw_name, len: sizeof(*kproc)); |
1230 | if (!rproc) { |
1231 | ret = -ENOMEM; |
1232 | goto out; |
1233 | } |
1234 | |
1235 | /* K3 R5s have a Region Address Translator (RAT) but no MMU */ |
1236 | rproc->has_iommu = false; |
1237 | /* error recovery is not supported at present */ |
1238 | rproc->recovery_disabled = true; |
1239 | |
1240 | kproc = rproc->priv; |
1241 | kproc->cluster = cluster; |
1242 | kproc->core = core; |
1243 | kproc->dev = cdev; |
1244 | kproc->rproc = rproc; |
1245 | core->rproc = rproc; |
1246 | |
1247 | ret = k3_r5_rproc_configure_mode(kproc); |
1248 | if (ret < 0) |
1249 | goto err_config; |
1250 | if (ret) |
1251 | goto init_rmem; |
1252 | |
1253 | ret = k3_r5_rproc_configure(kproc); |
1254 | if (ret) { |
1255 | dev_err(dev, "initial configure failed, ret = %d\n" , |
1256 | ret); |
1257 | goto err_config; |
1258 | } |
1259 | |
1260 | init_rmem: |
1261 | k3_r5_adjust_tcm_sizes(kproc); |
1262 | |
1263 | ret = k3_r5_reserved_mem_init(kproc); |
1264 | if (ret) { |
1265 | dev_err(dev, "reserved memory init failed, ret = %d\n" , |
1266 | ret); |
1267 | goto err_config; |
1268 | } |
1269 | |
1270 | ret = rproc_add(rproc); |
1271 | if (ret) { |
1272 | dev_err(dev, "rproc_add failed, ret = %d\n" , ret); |
1273 | goto err_add; |
1274 | } |
1275 | |
1276 | /* create only one rproc in lockstep, single-cpu or |
1277 | * single core mode |
1278 | */ |
1279 | if (cluster->mode == CLUSTER_MODE_LOCKSTEP || |
1280 | cluster->mode == CLUSTER_MODE_SINGLECPU || |
1281 | cluster->mode == CLUSTER_MODE_SINGLECORE) |
1282 | break; |
1283 | } |
1284 | |
1285 | return 0; |
1286 | |
1287 | err_split: |
1288 | if (rproc->state == RPROC_ATTACHED) { |
1289 | ret1 = rproc_detach(rproc); |
1290 | if (ret1) { |
1291 | dev_err(kproc->dev, "failed to detach rproc, ret = %d\n" , |
1292 | ret1); |
1293 | return ret1; |
1294 | } |
1295 | } |
1296 | |
1297 | rproc_del(rproc); |
1298 | err_add: |
1299 | k3_r5_reserved_mem_exit(kproc); |
1300 | err_config: |
1301 | rproc_free(rproc); |
1302 | core->rproc = NULL; |
1303 | out: |
1304 | /* undo core0 upon any failures on core1 in split-mode */ |
1305 | if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) { |
1306 | core = list_prev_entry(core, elem); |
1307 | rproc = core->rproc; |
1308 | kproc = rproc->priv; |
1309 | goto err_split; |
1310 | } |
1311 | return ret; |
1312 | } |
1313 | |
1314 | static void k3_r5_cluster_rproc_exit(void *data) |
1315 | { |
1316 | struct k3_r5_cluster *cluster = platform_get_drvdata(pdev: data); |
1317 | struct k3_r5_rproc *kproc; |
1318 | struct k3_r5_core *core; |
1319 | struct rproc *rproc; |
1320 | int ret; |
1321 | |
1322 | /* |
1323 | * lockstep mode and single-cpu modes have only one rproc associated |
1324 | * with first core, whereas split-mode has two rprocs associated with |
1325 | * each core, and requires that core1 be powered down first |
1326 | */ |
1327 | core = (cluster->mode == CLUSTER_MODE_LOCKSTEP || |
1328 | cluster->mode == CLUSTER_MODE_SINGLECPU) ? |
1329 | list_first_entry(&cluster->cores, struct k3_r5_core, elem) : |
1330 | list_last_entry(&cluster->cores, struct k3_r5_core, elem); |
1331 | |
1332 | list_for_each_entry_from_reverse(core, &cluster->cores, elem) { |
1333 | rproc = core->rproc; |
1334 | kproc = rproc->priv; |
1335 | |
1336 | if (rproc->state == RPROC_ATTACHED) { |
1337 | ret = rproc_detach(rproc); |
1338 | if (ret) { |
1339 | dev_err(kproc->dev, "failed to detach rproc, ret = %d\n" , ret); |
1340 | return; |
1341 | } |
1342 | } |
1343 | |
1344 | rproc_del(rproc); |
1345 | |
1346 | k3_r5_reserved_mem_exit(kproc); |
1347 | |
1348 | rproc_free(rproc); |
1349 | core->rproc = NULL; |
1350 | } |
1351 | } |
1352 | |
1353 | static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev, |
1354 | struct k3_r5_core *core) |
1355 | { |
1356 | static const char * const mem_names[] = {"atcm" , "btcm" }; |
1357 | struct device *dev = &pdev->dev; |
1358 | struct resource *res; |
1359 | int num_mems; |
1360 | int i; |
1361 | |
1362 | num_mems = ARRAY_SIZE(mem_names); |
1363 | core->mem = devm_kcalloc(dev, n: num_mems, size: sizeof(*core->mem), GFP_KERNEL); |
1364 | if (!core->mem) |
1365 | return -ENOMEM; |
1366 | |
1367 | for (i = 0; i < num_mems; i++) { |
1368 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
1369 | mem_names[i]); |
1370 | if (!res) { |
1371 | dev_err(dev, "found no memory resource for %s\n" , |
1372 | mem_names[i]); |
1373 | return -EINVAL; |
1374 | } |
1375 | if (!devm_request_mem_region(dev, res->start, |
1376 | resource_size(res), |
1377 | dev_name(dev))) { |
1378 | dev_err(dev, "could not request %s region for resource\n" , |
1379 | mem_names[i]); |
1380 | return -EBUSY; |
1381 | } |
1382 | |
1383 | /* |
1384 | * TCMs are designed in general to support RAM-like backing |
1385 | * memories. So, map these as Normal Non-Cached memories. This |
1386 | * also avoids/fixes any potential alignment faults due to |
1387 | * unaligned data accesses when using memcpy() or memset() |
1388 | * functions (normally seen with device type memory). |
1389 | */ |
1390 | core->mem[i].cpu_addr = devm_ioremap_wc(dev, offset: res->start, |
1391 | size: resource_size(res)); |
1392 | if (!core->mem[i].cpu_addr) { |
1393 | dev_err(dev, "failed to map %s memory\n" , mem_names[i]); |
1394 | return -ENOMEM; |
1395 | } |
1396 | core->mem[i].bus_addr = res->start; |
1397 | |
1398 | /* |
1399 | * TODO: |
1400 | * The R5F cores can place ATCM & BTCM anywhere in its address |
1401 | * based on the corresponding Region Registers in the System |
1402 | * Control coprocessor. For now, place ATCM and BTCM at |
1403 | * addresses 0 and 0x41010000 (same as the bus address on AM65x |
1404 | * SoCs) based on loczrama setting |
1405 | */ |
1406 | if (!strcmp(mem_names[i], "atcm" )) { |
1407 | core->mem[i].dev_addr = core->loczrama ? |
1408 | 0 : K3_R5_TCM_DEV_ADDR; |
1409 | } else { |
1410 | core->mem[i].dev_addr = core->loczrama ? |
1411 | K3_R5_TCM_DEV_ADDR : 0; |
1412 | } |
1413 | core->mem[i].size = resource_size(res); |
1414 | |
1415 | dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n" , |
1416 | mem_names[i], &core->mem[i].bus_addr, |
1417 | core->mem[i].size, core->mem[i].cpu_addr, |
1418 | core->mem[i].dev_addr); |
1419 | } |
1420 | core->num_mems = num_mems; |
1421 | |
1422 | return 0; |
1423 | } |
1424 | |
1425 | static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev, |
1426 | struct k3_r5_core *core) |
1427 | { |
1428 | struct device_node *np = pdev->dev.of_node; |
1429 | struct device *dev = &pdev->dev; |
1430 | struct device_node *sram_np; |
1431 | struct resource res; |
1432 | int num_sram; |
1433 | int i, ret; |
1434 | |
1435 | num_sram = of_property_count_elems_of_size(np, propname: "sram" , elem_size: sizeof(phandle)); |
1436 | if (num_sram <= 0) { |
1437 | dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n" , |
1438 | num_sram); |
1439 | return 0; |
1440 | } |
1441 | |
1442 | core->sram = devm_kcalloc(dev, n: num_sram, size: sizeof(*core->sram), GFP_KERNEL); |
1443 | if (!core->sram) |
1444 | return -ENOMEM; |
1445 | |
1446 | for (i = 0; i < num_sram; i++) { |
1447 | sram_np = of_parse_phandle(np, phandle_name: "sram" , index: i); |
1448 | if (!sram_np) |
1449 | return -EINVAL; |
1450 | |
1451 | if (!of_device_is_available(device: sram_np)) { |
1452 | of_node_put(node: sram_np); |
1453 | return -EINVAL; |
1454 | } |
1455 | |
1456 | ret = of_address_to_resource(dev: sram_np, index: 0, r: &res); |
1457 | of_node_put(node: sram_np); |
1458 | if (ret) |
1459 | return -EINVAL; |
1460 | |
1461 | core->sram[i].bus_addr = res.start; |
1462 | core->sram[i].dev_addr = res.start; |
1463 | core->sram[i].size = resource_size(res: &res); |
1464 | core->sram[i].cpu_addr = devm_ioremap_wc(dev, offset: res.start, |
1465 | size: resource_size(res: &res)); |
1466 | if (!core->sram[i].cpu_addr) { |
1467 | dev_err(dev, "failed to parse and map sram%d memory at %pad\n" , |
1468 | i, &res.start); |
1469 | return -ENOMEM; |
1470 | } |
1471 | |
1472 | dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n" , |
1473 | i, &core->sram[i].bus_addr, |
1474 | core->sram[i].size, core->sram[i].cpu_addr, |
1475 | core->sram[i].dev_addr); |
1476 | } |
1477 | core->num_sram = num_sram; |
1478 | |
1479 | return 0; |
1480 | } |
1481 | |
1482 | static |
1483 | struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev, |
1484 | const struct ti_sci_handle *sci) |
1485 | { |
1486 | struct ti_sci_proc *tsp; |
1487 | u32 temp[2]; |
1488 | int ret; |
1489 | |
1490 | ret = of_property_read_u32_array(np: dev_of_node(dev), propname: "ti,sci-proc-ids" , |
1491 | out_values: temp, sz: 2); |
1492 | if (ret < 0) |
1493 | return ERR_PTR(error: ret); |
1494 | |
1495 | tsp = devm_kzalloc(dev, size: sizeof(*tsp), GFP_KERNEL); |
1496 | if (!tsp) |
1497 | return ERR_PTR(error: -ENOMEM); |
1498 | |
1499 | tsp->dev = dev; |
1500 | tsp->sci = sci; |
1501 | tsp->ops = &sci->ops.proc_ops; |
1502 | tsp->proc_id = temp[0]; |
1503 | tsp->host_id = temp[1]; |
1504 | |
1505 | return tsp; |
1506 | } |
1507 | |
1508 | static int k3_r5_core_of_init(struct platform_device *pdev) |
1509 | { |
1510 | struct device *dev = &pdev->dev; |
1511 | struct device_node *np = dev_of_node(dev); |
1512 | struct k3_r5_core *core; |
1513 | int ret; |
1514 | |
1515 | if (!devres_open_group(dev, id: k3_r5_core_of_init, GFP_KERNEL)) |
1516 | return -ENOMEM; |
1517 | |
1518 | core = devm_kzalloc(dev, size: sizeof(*core), GFP_KERNEL); |
1519 | if (!core) { |
1520 | ret = -ENOMEM; |
1521 | goto err; |
1522 | } |
1523 | |
1524 | core->dev = dev; |
1525 | /* |
1526 | * Use SoC Power-on-Reset values as default if no DT properties are |
1527 | * used to dictate the TCM configurations |
1528 | */ |
1529 | core->atcm_enable = 0; |
1530 | core->btcm_enable = 1; |
1531 | core->loczrama = 1; |
1532 | |
1533 | ret = of_property_read_u32(np, propname: "ti,atcm-enable" , out_value: &core->atcm_enable); |
1534 | if (ret < 0 && ret != -EINVAL) { |
1535 | dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n" , |
1536 | ret); |
1537 | goto err; |
1538 | } |
1539 | |
1540 | ret = of_property_read_u32(np, propname: "ti,btcm-enable" , out_value: &core->btcm_enable); |
1541 | if (ret < 0 && ret != -EINVAL) { |
1542 | dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n" , |
1543 | ret); |
1544 | goto err; |
1545 | } |
1546 | |
1547 | ret = of_property_read_u32(np, propname: "ti,loczrama" , out_value: &core->loczrama); |
1548 | if (ret < 0 && ret != -EINVAL) { |
1549 | dev_err(dev, "invalid format for ti,loczrama, ret = %d\n" , ret); |
1550 | goto err; |
1551 | } |
1552 | |
1553 | core->ti_sci = devm_ti_sci_get_by_phandle(dev, property: "ti,sci" ); |
1554 | if (IS_ERR(ptr: core->ti_sci)) { |
1555 | ret = PTR_ERR(ptr: core->ti_sci); |
1556 | if (ret != -EPROBE_DEFER) { |
1557 | dev_err(dev, "failed to get ti-sci handle, ret = %d\n" , |
1558 | ret); |
1559 | } |
1560 | core->ti_sci = NULL; |
1561 | goto err; |
1562 | } |
1563 | |
1564 | ret = of_property_read_u32(np, propname: "ti,sci-dev-id" , out_value: &core->ti_sci_id); |
1565 | if (ret) { |
1566 | dev_err(dev, "missing 'ti,sci-dev-id' property\n" ); |
1567 | goto err; |
1568 | } |
1569 | |
1570 | core->reset = devm_reset_control_get_exclusive(dev, NULL); |
1571 | if (IS_ERR_OR_NULL(ptr: core->reset)) { |
1572 | ret = PTR_ERR_OR_ZERO(ptr: core->reset); |
1573 | if (!ret) |
1574 | ret = -ENODEV; |
1575 | if (ret != -EPROBE_DEFER) { |
1576 | dev_err(dev, "failed to get reset handle, ret = %d\n" , |
1577 | ret); |
1578 | } |
1579 | goto err; |
1580 | } |
1581 | |
1582 | core->tsp = k3_r5_core_of_get_tsp(dev, sci: core->ti_sci); |
1583 | if (IS_ERR(ptr: core->tsp)) { |
1584 | ret = PTR_ERR(ptr: core->tsp); |
1585 | dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n" , |
1586 | ret); |
1587 | goto err; |
1588 | } |
1589 | |
1590 | ret = k3_r5_core_of_get_internal_memories(pdev, core); |
1591 | if (ret) { |
1592 | dev_err(dev, "failed to get internal memories, ret = %d\n" , |
1593 | ret); |
1594 | goto err; |
1595 | } |
1596 | |
1597 | ret = k3_r5_core_of_get_sram_memories(pdev, core); |
1598 | if (ret) { |
1599 | dev_err(dev, "failed to get sram memories, ret = %d\n" , ret); |
1600 | goto err; |
1601 | } |
1602 | |
1603 | ret = ti_sci_proc_request(tsp: core->tsp); |
1604 | if (ret < 0) { |
1605 | dev_err(dev, "ti_sci_proc_request failed, ret = %d\n" , ret); |
1606 | goto err; |
1607 | } |
1608 | |
1609 | platform_set_drvdata(pdev, data: core); |
1610 | devres_close_group(dev, id: k3_r5_core_of_init); |
1611 | |
1612 | return 0; |
1613 | |
1614 | err: |
1615 | devres_release_group(dev, id: k3_r5_core_of_init); |
1616 | return ret; |
1617 | } |
1618 | |
1619 | /* |
1620 | * free the resources explicitly since driver model is not being used |
1621 | * for the child R5F devices |
1622 | */ |
1623 | static void k3_r5_core_of_exit(struct platform_device *pdev) |
1624 | { |
1625 | struct k3_r5_core *core = platform_get_drvdata(pdev); |
1626 | struct device *dev = &pdev->dev; |
1627 | int ret; |
1628 | |
1629 | ret = ti_sci_proc_release(tsp: core->tsp); |
1630 | if (ret) |
1631 | dev_err(dev, "failed to release proc, ret = %d\n" , ret); |
1632 | |
1633 | platform_set_drvdata(pdev, NULL); |
1634 | devres_release_group(dev, id: k3_r5_core_of_init); |
1635 | } |
1636 | |
1637 | static void k3_r5_cluster_of_exit(void *data) |
1638 | { |
1639 | struct k3_r5_cluster *cluster = platform_get_drvdata(pdev: data); |
1640 | struct platform_device *cpdev; |
1641 | struct k3_r5_core *core, *temp; |
1642 | |
1643 | list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) { |
1644 | list_del(entry: &core->elem); |
1645 | cpdev = to_platform_device(core->dev); |
1646 | k3_r5_core_of_exit(pdev: cpdev); |
1647 | } |
1648 | } |
1649 | |
1650 | static int k3_r5_cluster_of_init(struct platform_device *pdev) |
1651 | { |
1652 | struct k3_r5_cluster *cluster = platform_get_drvdata(pdev); |
1653 | struct device *dev = &pdev->dev; |
1654 | struct device_node *np = dev_of_node(dev); |
1655 | struct platform_device *cpdev; |
1656 | struct device_node *child; |
1657 | struct k3_r5_core *core; |
1658 | int ret; |
1659 | |
1660 | for_each_available_child_of_node(np, child) { |
1661 | cpdev = of_find_device_by_node(np: child); |
1662 | if (!cpdev) { |
1663 | ret = -ENODEV; |
1664 | dev_err(dev, "could not get R5 core platform device\n" ); |
1665 | of_node_put(node: child); |
1666 | goto fail; |
1667 | } |
1668 | |
1669 | ret = k3_r5_core_of_init(pdev: cpdev); |
1670 | if (ret) { |
1671 | dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n" , |
1672 | ret); |
1673 | put_device(dev: &cpdev->dev); |
1674 | of_node_put(node: child); |
1675 | goto fail; |
1676 | } |
1677 | |
1678 | core = platform_get_drvdata(pdev: cpdev); |
1679 | put_device(dev: &cpdev->dev); |
1680 | list_add_tail(new: &core->elem, head: &cluster->cores); |
1681 | } |
1682 | |
1683 | return 0; |
1684 | |
1685 | fail: |
1686 | k3_r5_cluster_of_exit(data: pdev); |
1687 | return ret; |
1688 | } |
1689 | |
1690 | static int k3_r5_probe(struct platform_device *pdev) |
1691 | { |
1692 | struct device *dev = &pdev->dev; |
1693 | struct device_node *np = dev_of_node(dev); |
1694 | struct k3_r5_cluster *cluster; |
1695 | const struct k3_r5_soc_data *data; |
1696 | int ret; |
1697 | int num_cores; |
1698 | |
1699 | data = of_device_get_match_data(dev: &pdev->dev); |
1700 | if (!data) { |
1701 | dev_err(dev, "SoC-specific data is not defined\n" ); |
1702 | return -ENODEV; |
1703 | } |
1704 | |
1705 | cluster = devm_kzalloc(dev, size: sizeof(*cluster), GFP_KERNEL); |
1706 | if (!cluster) |
1707 | return -ENOMEM; |
1708 | |
1709 | cluster->dev = dev; |
1710 | cluster->soc_data = data; |
1711 | INIT_LIST_HEAD(list: &cluster->cores); |
1712 | |
1713 | ret = of_property_read_u32(np, propname: "ti,cluster-mode" , out_value: &cluster->mode); |
1714 | if (ret < 0 && ret != -EINVAL) { |
1715 | dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n" , |
1716 | ret); |
1717 | return ret; |
1718 | } |
1719 | |
1720 | if (ret == -EINVAL) { |
1721 | /* |
1722 | * default to most common efuse configurations - Split-mode on AM64x |
1723 | * and LockStep-mode on all others |
1724 | * default to most common efuse configurations - |
1725 | * Split-mode on AM64x |
1726 | * Single core on AM62x |
1727 | * LockStep-mode on all others |
1728 | */ |
1729 | if (!data->is_single_core) |
1730 | cluster->mode = data->single_cpu_mode ? |
1731 | CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP; |
1732 | else |
1733 | cluster->mode = CLUSTER_MODE_SINGLECORE; |
1734 | } |
1735 | |
1736 | if ((cluster->mode == CLUSTER_MODE_SINGLECPU && !data->single_cpu_mode) || |
1737 | (cluster->mode == CLUSTER_MODE_SINGLECORE && !data->is_single_core)) { |
1738 | dev_err(dev, "Cluster mode = %d is not supported on this SoC\n" , cluster->mode); |
1739 | return -EINVAL; |
1740 | } |
1741 | |
1742 | num_cores = of_get_available_child_count(np); |
1743 | if (num_cores != 2 && !data->is_single_core) { |
1744 | dev_err(dev, "MCU cluster requires both R5F cores to be enabled but num_cores is set to = %d\n" , |
1745 | num_cores); |
1746 | return -ENODEV; |
1747 | } |
1748 | |
1749 | if (num_cores != 1 && data->is_single_core) { |
1750 | dev_err(dev, "SoC supports only single core R5 but num_cores is set to %d\n" , |
1751 | num_cores); |
1752 | return -ENODEV; |
1753 | } |
1754 | |
1755 | platform_set_drvdata(pdev, data: cluster); |
1756 | |
1757 | ret = devm_of_platform_populate(dev); |
1758 | if (ret) { |
1759 | dev_err(dev, "devm_of_platform_populate failed, ret = %d\n" , |
1760 | ret); |
1761 | return ret; |
1762 | } |
1763 | |
1764 | ret = k3_r5_cluster_of_init(pdev); |
1765 | if (ret) { |
1766 | dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n" , ret); |
1767 | return ret; |
1768 | } |
1769 | |
1770 | ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev); |
1771 | if (ret) |
1772 | return ret; |
1773 | |
1774 | ret = k3_r5_cluster_rproc_init(pdev); |
1775 | if (ret) { |
1776 | dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n" , |
1777 | ret); |
1778 | return ret; |
1779 | } |
1780 | |
1781 | ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev); |
1782 | if (ret) |
1783 | return ret; |
1784 | |
1785 | return 0; |
1786 | } |
1787 | |
1788 | static const struct k3_r5_soc_data am65_j721e_soc_data = { |
1789 | .tcm_is_double = false, |
1790 | .tcm_ecc_autoinit = false, |
1791 | .single_cpu_mode = false, |
1792 | .is_single_core = false, |
1793 | }; |
1794 | |
1795 | static const struct k3_r5_soc_data j7200_j721s2_soc_data = { |
1796 | .tcm_is_double = true, |
1797 | .tcm_ecc_autoinit = true, |
1798 | .single_cpu_mode = false, |
1799 | .is_single_core = false, |
1800 | }; |
1801 | |
1802 | static const struct k3_r5_soc_data am64_soc_data = { |
1803 | .tcm_is_double = true, |
1804 | .tcm_ecc_autoinit = true, |
1805 | .single_cpu_mode = true, |
1806 | .is_single_core = false, |
1807 | }; |
1808 | |
1809 | static const struct k3_r5_soc_data am62_soc_data = { |
1810 | .tcm_is_double = false, |
1811 | .tcm_ecc_autoinit = true, |
1812 | .single_cpu_mode = false, |
1813 | .is_single_core = true, |
1814 | }; |
1815 | |
1816 | static const struct of_device_id k3_r5_of_match[] = { |
1817 | { .compatible = "ti,am654-r5fss" , .data = &am65_j721e_soc_data, }, |
1818 | { .compatible = "ti,j721e-r5fss" , .data = &am65_j721e_soc_data, }, |
1819 | { .compatible = "ti,j7200-r5fss" , .data = &j7200_j721s2_soc_data, }, |
1820 | { .compatible = "ti,am64-r5fss" , .data = &am64_soc_data, }, |
1821 | { .compatible = "ti,am62-r5fss" , .data = &am62_soc_data, }, |
1822 | { .compatible = "ti,j721s2-r5fss" , .data = &j7200_j721s2_soc_data, }, |
1823 | { /* sentinel */ }, |
1824 | }; |
1825 | MODULE_DEVICE_TABLE(of, k3_r5_of_match); |
1826 | |
1827 | static struct platform_driver k3_r5_rproc_driver = { |
1828 | .probe = k3_r5_probe, |
1829 | .driver = { |
1830 | .name = "k3_r5_rproc" , |
1831 | .of_match_table = k3_r5_of_match, |
1832 | }, |
1833 | }; |
1834 | |
1835 | module_platform_driver(k3_r5_rproc_driver); |
1836 | |
1837 | MODULE_LICENSE("GPL v2" ); |
1838 | MODULE_DESCRIPTION("TI K3 R5F remote processor driver" ); |
1839 | MODULE_AUTHOR("Suman Anna <s-anna@ti.com>" ); |
1840 | |