1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Common prep/pmac/chrp boot and setup code. |
4 | */ |
5 | |
6 | #include <linux/module.h> |
7 | #include <linux/string.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/reboot.h> |
12 | #include <linux/delay.h> |
13 | #include <linux/initrd.h> |
14 | #include <linux/tty.h> |
15 | #include <linux/seq_file.h> |
16 | #include <linux/root_dev.h> |
17 | #include <linux/cpu.h> |
18 | #include <linux/console.h> |
19 | #include <linux/memblock.h> |
20 | #include <linux/export.h> |
21 | #include <linux/nvram.h> |
22 | #include <linux/pgtable.h> |
23 | #include <linux/of_fdt.h> |
24 | #include <linux/irq.h> |
25 | |
26 | #include <asm/io.h> |
27 | #include <asm/processor.h> |
28 | #include <asm/setup.h> |
29 | #include <asm/smp.h> |
30 | #include <asm/elf.h> |
31 | #include <asm/cputable.h> |
32 | #include <asm/bootx.h> |
33 | #include <asm/btext.h> |
34 | #include <asm/machdep.h> |
35 | #include <linux/uaccess.h> |
36 | #include <asm/pmac_feature.h> |
37 | #include <asm/sections.h> |
38 | #include <asm/nvram.h> |
39 | #include <asm/xmon.h> |
40 | #include <asm/time.h> |
41 | #include <asm/serial.h> |
42 | #include <asm/udbg.h> |
43 | #include <asm/code-patching.h> |
44 | #include <asm/cpu_has_feature.h> |
45 | #include <asm/asm-prototypes.h> |
46 | #include <asm/kdump.h> |
47 | #include <asm/feature-fixups.h> |
48 | #include <asm/early_ioremap.h> |
49 | |
50 | #include "setup.h" |
51 | |
52 | #define DBG(fmt...) |
53 | |
54 | extern void bootx_init(unsigned long r4, unsigned long phys); |
55 | |
56 | int boot_cpuid_phys; |
57 | EXPORT_SYMBOL_GPL(boot_cpuid_phys); |
58 | |
59 | int smp_hw_index[NR_CPUS]; |
60 | EXPORT_SYMBOL(smp_hw_index); |
61 | |
62 | unsigned int DMA_MODE_READ; |
63 | unsigned int DMA_MODE_WRITE; |
64 | |
65 | EXPORT_SYMBOL(DMA_MODE_READ); |
66 | EXPORT_SYMBOL(DMA_MODE_WRITE); |
67 | |
68 | /* |
69 | * This is run before start_kernel(), the kernel has been relocated |
70 | * and we are running with enough of the MMU enabled to have our |
71 | * proper kernel virtual addresses |
72 | * |
73 | * We do the initial parsing of the flat device-tree and prepares |
74 | * for the MMU to be fully initialized. |
75 | */ |
76 | notrace void __init machine_init(u64 dt_ptr) |
77 | { |
78 | u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache); |
79 | ppc_inst_t insn; |
80 | |
81 | /* Configure static keys first, now that we're relocated. */ |
82 | setup_feature_keys(); |
83 | |
84 | early_ioremap_init(); |
85 | |
86 | /* Enable early debugging if any specified (see udbg.h) */ |
87 | udbg_early_init(); |
88 | |
89 | patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_RAW_NOP())); |
90 | |
91 | create_cond_branch(&insn, addr, branch_target(addr), 0x820000); |
92 | patch_instruction(addr, insn); /* replace b by bne cr0 */ |
93 | |
94 | /* Do some early initialization based on the flat device tree */ |
95 | early_init_devtree(__va(dt_ptr)); |
96 | |
97 | early_init_mmu(); |
98 | |
99 | setup_kdump_trampoline(); |
100 | } |
101 | |
102 | /* Checks "l2cr=xxxx" command-line option */ |
103 | static int __init ppc_setup_l2cr(char *str) |
104 | { |
105 | if (cpu_has_feature(CPU_FTR_L2CR)) { |
106 | unsigned long val = simple_strtoul(str, NULL, 0); |
107 | printk(KERN_INFO "l2cr set to %lx\n" , val); |
108 | _set_L2CR(0); /* force invalidate by disable cache */ |
109 | _set_L2CR(val); /* and enable it */ |
110 | } |
111 | return 1; |
112 | } |
113 | __setup("l2cr=" , ppc_setup_l2cr); |
114 | |
115 | /* Checks "l3cr=xxxx" command-line option */ |
116 | static int __init ppc_setup_l3cr(char *str) |
117 | { |
118 | if (cpu_has_feature(CPU_FTR_L3CR)) { |
119 | unsigned long val = simple_strtoul(str, NULL, 0); |
120 | printk(KERN_INFO "l3cr set to %lx\n" , val); |
121 | _set_L3CR(val); /* and enable it */ |
122 | } |
123 | return 1; |
124 | } |
125 | __setup("l3cr=" , ppc_setup_l3cr); |
126 | |
127 | static int __init ppc_init(void) |
128 | { |
129 | /* clear the progress line */ |
130 | if (ppc_md.progress) |
131 | ppc_md.progress(" " , 0xffff); |
132 | |
133 | /* call platform init */ |
134 | if (ppc_md.init != NULL) { |
135 | ppc_md.init(); |
136 | } |
137 | return 0; |
138 | } |
139 | arch_initcall(ppc_init); |
140 | |
141 | static void *__init alloc_stack(void) |
142 | { |
143 | void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN); |
144 | |
145 | if (!ptr) |
146 | panic(fmt: "cannot allocate %d bytes for stack at %pS\n" , |
147 | THREAD_SIZE, (void *)_RET_IP_); |
148 | |
149 | return ptr; |
150 | } |
151 | |
152 | void __init irqstack_early_init(void) |
153 | { |
154 | unsigned int i; |
155 | |
156 | if (IS_ENABLED(CONFIG_VMAP_STACK)) |
157 | return; |
158 | |
159 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
160 | * as the memblock is limited to lowmem by default */ |
161 | for_each_possible_cpu(i) { |
162 | softirq_ctx[i] = alloc_stack(); |
163 | hardirq_ctx[i] = alloc_stack(); |
164 | } |
165 | } |
166 | |
167 | #ifdef CONFIG_VMAP_STACK |
168 | void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack}; |
169 | |
170 | void __init emergency_stack_init(void) |
171 | { |
172 | unsigned int i; |
173 | |
174 | for_each_possible_cpu(i) |
175 | emergency_ctx[i] = alloc_stack(); |
176 | } |
177 | #endif |
178 | |
179 | #ifdef CONFIG_BOOKE_OR_40x |
180 | void __init exc_lvl_early_init(void) |
181 | { |
182 | unsigned int i, hw_cpu; |
183 | |
184 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
185 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ |
186 | for_each_possible_cpu(i) { |
187 | #ifdef CONFIG_SMP |
188 | hw_cpu = get_hard_smp_processor_id(i); |
189 | #else |
190 | hw_cpu = 0; |
191 | #endif |
192 | |
193 | critirq_ctx[hw_cpu] = alloc_stack(); |
194 | #ifdef CONFIG_BOOKE |
195 | dbgirq_ctx[hw_cpu] = alloc_stack(); |
196 | mcheckirq_ctx[hw_cpu] = alloc_stack(); |
197 | #endif |
198 | } |
199 | } |
200 | #endif |
201 | |
202 | void __init setup_power_save(void) |
203 | { |
204 | #ifdef CONFIG_PPC_BOOK3S_32 |
205 | if (cpu_has_feature(CPU_FTR_CAN_DOZE) || |
206 | cpu_has_feature(CPU_FTR_CAN_NAP)) |
207 | ppc_md.power_save = ppc6xx_idle; |
208 | #endif |
209 | |
210 | #ifdef CONFIG_PPC_E500 |
211 | if (cpu_has_feature(CPU_FTR_CAN_DOZE) || |
212 | cpu_has_feature(CPU_FTR_CAN_NAP)) |
213 | ppc_md.power_save = e500_idle; |
214 | #endif |
215 | } |
216 | |
217 | __init void initialize_cache_info(void) |
218 | { |
219 | /* |
220 | * Set cache line size based on type of cpu as a default. |
221 | * Systems with OF can look in the properties on the cpu node(s) |
222 | * for a possibly more accurate value. |
223 | */ |
224 | dcache_bsize = cur_cpu_spec->dcache_bsize; |
225 | icache_bsize = cur_cpu_spec->icache_bsize; |
226 | } |
227 | |