1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * SMP initialisation and IPI support |
4 | * Based on arch/arm64/kernel/smp.c |
5 | * |
6 | * Copyright (C) 2012 ARM Ltd. |
7 | * Copyright (C) 2015 Regents of the University of California |
8 | * Copyright (C) 2017 SiFive |
9 | */ |
10 | |
11 | #include <linux/acpi.h> |
12 | #include <linux/arch_topology.h> |
13 | #include <linux/module.h> |
14 | #include <linux/init.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/kernel_stat.h> |
19 | #include <linux/notifier.h> |
20 | #include <linux/cpu.h> |
21 | #include <linux/percpu.h> |
22 | #include <linux/delay.h> |
23 | #include <linux/err.h> |
24 | #include <linux/irq.h> |
25 | #include <linux/of.h> |
26 | #include <linux/sched/task_stack.h> |
27 | #include <linux/sched/mm.h> |
28 | |
29 | #include <asm/cacheflush.h> |
30 | #include <asm/cpu_ops.h> |
31 | #include <asm/irq.h> |
32 | #include <asm/mmu_context.h> |
33 | #include <asm/numa.h> |
34 | #include <asm/tlbflush.h> |
35 | #include <asm/sections.h> |
36 | #include <asm/smp.h> |
37 | #include <uapi/asm/hwcap.h> |
38 | #include <asm/vector.h> |
39 | |
40 | #include "head.h" |
41 | |
42 | static DECLARE_COMPLETION(cpu_running); |
43 | |
44 | void __init smp_prepare_cpus(unsigned int max_cpus) |
45 | { |
46 | int cpuid; |
47 | unsigned int curr_cpuid; |
48 | |
49 | init_cpu_topology(); |
50 | |
51 | curr_cpuid = smp_processor_id(); |
52 | store_cpu_topology(curr_cpuid); |
53 | numa_store_cpu_info(curr_cpuid); |
54 | numa_add_cpu(cpu: curr_cpuid); |
55 | |
56 | /* This covers non-smp usecase mandated by "nosmp" option */ |
57 | if (max_cpus == 0) |
58 | return; |
59 | |
60 | for_each_possible_cpu(cpuid) { |
61 | if (cpuid == curr_cpuid) |
62 | continue; |
63 | set_cpu_present(cpuid, true); |
64 | numa_store_cpu_info(cpuid); |
65 | } |
66 | } |
67 | |
68 | #ifdef CONFIG_ACPI |
69 | static unsigned int cpu_count = 1; |
70 | |
71 | static int __init acpi_parse_rintc(union acpi_subtable_headers *, const unsigned long end) |
72 | { |
73 | unsigned long hart; |
74 | static bool found_boot_cpu; |
75 | struct acpi_madt_rintc *processor = (struct acpi_madt_rintc *)header; |
76 | |
77 | /* |
78 | * Each RINTC structure in MADT will have a flag. If ACPI_MADT_ENABLED |
79 | * bit in the flag is not enabled, it means OS should not try to enable |
80 | * the cpu to which RINTC belongs. |
81 | */ |
82 | if (!(processor->flags & ACPI_MADT_ENABLED)) |
83 | return 0; |
84 | |
85 | if (BAD_MADT_ENTRY(processor, end)) |
86 | return -EINVAL; |
87 | |
88 | acpi_table_print_madt_entry(madt: &header->common); |
89 | |
90 | hart = processor->hart_id; |
91 | if (hart == INVALID_HARTID) { |
92 | pr_warn("Invalid hartid\n" ); |
93 | return 0; |
94 | } |
95 | |
96 | if (hart == cpuid_to_hartid_map(0)) { |
97 | BUG_ON(found_boot_cpu); |
98 | found_boot_cpu = true; |
99 | return 0; |
100 | } |
101 | |
102 | if (cpu_count >= NR_CPUS) { |
103 | pr_warn("NR_CPUS is too small for the number of ACPI tables.\n" ); |
104 | return 0; |
105 | } |
106 | |
107 | cpuid_to_hartid_map(cpu_count) = hart; |
108 | cpu_count++; |
109 | |
110 | return 0; |
111 | } |
112 | |
113 | static void __init acpi_parse_and_init_cpus(void) |
114 | { |
115 | acpi_table_parse_madt(id: ACPI_MADT_TYPE_RINTC, handler: acpi_parse_rintc, max_entries: 0); |
116 | } |
117 | #else |
118 | #define acpi_parse_and_init_cpus(...) do { } while (0) |
119 | #endif |
120 | |
121 | static void __init of_parse_and_init_cpus(void) |
122 | { |
123 | struct device_node *dn; |
124 | unsigned long hart; |
125 | bool found_boot_cpu = false; |
126 | int cpuid = 1; |
127 | int rc; |
128 | |
129 | for_each_of_cpu_node(dn) { |
130 | rc = riscv_early_of_processor_hartid(dn, &hart); |
131 | if (rc < 0) |
132 | continue; |
133 | |
134 | if (hart == cpuid_to_hartid_map(0)) { |
135 | BUG_ON(found_boot_cpu); |
136 | found_boot_cpu = 1; |
137 | early_map_cpu_to_node(0, of_node_to_nid(np: dn)); |
138 | continue; |
139 | } |
140 | if (cpuid >= NR_CPUS) { |
141 | pr_warn("Invalid cpuid [%d] for hartid [%lu]\n" , |
142 | cpuid, hart); |
143 | continue; |
144 | } |
145 | |
146 | cpuid_to_hartid_map(cpuid) = hart; |
147 | early_map_cpu_to_node(cpuid, of_node_to_nid(np: dn)); |
148 | cpuid++; |
149 | } |
150 | |
151 | BUG_ON(!found_boot_cpu); |
152 | |
153 | if (cpuid > nr_cpu_ids) |
154 | pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n" , |
155 | cpuid, nr_cpu_ids); |
156 | } |
157 | |
158 | void __init setup_smp(void) |
159 | { |
160 | int cpuid; |
161 | |
162 | cpu_set_ops(); |
163 | |
164 | if (acpi_disabled) |
165 | of_parse_and_init_cpus(); |
166 | else |
167 | acpi_parse_and_init_cpus(); |
168 | |
169 | for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) |
170 | if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) |
171 | set_cpu_possible(cpuid, true); |
172 | } |
173 | |
174 | static int start_secondary_cpu(int cpu, struct task_struct *tidle) |
175 | { |
176 | if (cpu_ops->cpu_start) |
177 | return cpu_ops->cpu_start(cpu, tidle); |
178 | |
179 | return -EOPNOTSUPP; |
180 | } |
181 | |
182 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
183 | { |
184 | int ret = 0; |
185 | tidle->thread_info.cpu = cpu; |
186 | |
187 | ret = start_secondary_cpu(cpu, tidle); |
188 | if (!ret) { |
189 | wait_for_completion_timeout(x: &cpu_running, |
190 | timeout: msecs_to_jiffies(m: 1000)); |
191 | |
192 | if (!cpu_online(cpu)) { |
193 | pr_crit("CPU%u: failed to come online\n" , cpu); |
194 | ret = -EIO; |
195 | } |
196 | } else { |
197 | pr_crit("CPU%u: failed to start\n" , cpu); |
198 | } |
199 | |
200 | return ret; |
201 | } |
202 | |
203 | void __init smp_cpus_done(unsigned int max_cpus) |
204 | { |
205 | } |
206 | |
207 | /* |
208 | * C entry point for a secondary processor. |
209 | */ |
210 | asmlinkage __visible void smp_callin(void) |
211 | { |
212 | struct mm_struct *mm = &init_mm; |
213 | unsigned int curr_cpuid = smp_processor_id(); |
214 | |
215 | if (has_vector()) { |
216 | /* |
217 | * Return as early as possible so the hart with a mismatching |
218 | * vlen won't boot. |
219 | */ |
220 | if (riscv_v_setup_vsize()) |
221 | return; |
222 | } |
223 | |
224 | /* All kernel threads share the same mm context. */ |
225 | mmgrab(mm); |
226 | current->active_mm = mm; |
227 | |
228 | store_cpu_topology(curr_cpuid); |
229 | notify_cpu_starting(cpu: curr_cpuid); |
230 | |
231 | riscv_ipi_enable(); |
232 | |
233 | numa_add_cpu(cpu: curr_cpuid); |
234 | |
235 | pr_debug("CPU%u: Booted secondary hartid %lu\n" , curr_cpuid, |
236 | cpuid_to_hartid_map(curr_cpuid)); |
237 | |
238 | set_cpu_online(cpu: curr_cpuid, online: true); |
239 | |
240 | /* |
241 | * Remote cache and TLB flushes are ignored while the CPU is offline, |
242 | * so flush them both right now just in case. |
243 | */ |
244 | local_flush_icache_all(); |
245 | local_flush_tlb_all(); |
246 | complete(&cpu_running); |
247 | /* |
248 | * Disable preemption before enabling interrupts, so we don't try to |
249 | * schedule a CPU that hasn't actually started yet. |
250 | */ |
251 | local_irq_enable(); |
252 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
253 | } |
254 | |