1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/export.h> |
6 | #include <linux/init.h> |
7 | #include <linux/memblock.h> |
8 | #include <linux/percpu.h> |
9 | #include <linux/kexec.h> |
10 | #include <linux/crash_dump.h> |
11 | #include <linux/smp.h> |
12 | #include <linux/topology.h> |
13 | #include <linux/pfn.h> |
14 | #include <linux/stackprotector.h> |
15 | #include <asm/sections.h> |
16 | #include <asm/processor.h> |
17 | #include <asm/desc.h> |
18 | #include <asm/setup.h> |
19 | #include <asm/mpspec.h> |
20 | #include <asm/apicdef.h> |
21 | #include <asm/highmem.h> |
22 | #include <asm/proto.h> |
23 | #include <asm/cpumask.h> |
24 | #include <asm/cpu.h> |
25 | |
26 | #ifdef CONFIG_X86_64 |
27 | #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) |
28 | #else |
29 | #define BOOT_PERCPU_OFFSET 0 |
30 | #endif |
31 | |
32 | DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; |
33 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); |
34 | |
35 | unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = { |
36 | [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, |
37 | }; |
38 | EXPORT_SYMBOL(__per_cpu_offset); |
39 | |
40 | /* |
41 | * On x86_64 symbols referenced from code should be reachable using |
42 | * 32bit relocations. Reserve space for static percpu variables in |
43 | * modules so that they are always served from the first chunk which |
44 | * is located at the percpu segment base. On x86_32, anything can |
45 | * address anywhere. No need to reserve space in the first chunk. |
46 | */ |
47 | #ifdef CONFIG_X86_64 |
48 | #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE |
49 | #else |
50 | #define PERCPU_FIRST_CHUNK_RESERVE 0 |
51 | #endif |
52 | |
53 | #ifdef CONFIG_X86_32 |
54 | /** |
55 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA |
56 | * |
57 | * If NUMA is not configured or there is only one NUMA node available, |
58 | * there is no reason to consider NUMA. This function determines |
59 | * whether percpu allocation should consider NUMA or not. |
60 | * |
61 | * RETURNS: |
62 | * true if NUMA should be considered; otherwise, false. |
63 | */ |
64 | static bool __init pcpu_need_numa(void) |
65 | { |
66 | #ifdef CONFIG_NUMA |
67 | pg_data_t *last = NULL; |
68 | unsigned int cpu; |
69 | |
70 | for_each_possible_cpu(cpu) { |
71 | int node = early_cpu_to_node(cpu); |
72 | |
73 | if (node_online(node) && NODE_DATA(node) && |
74 | last && last != NODE_DATA(node)) |
75 | return true; |
76 | |
77 | last = NODE_DATA(node); |
78 | } |
79 | #endif |
80 | return false; |
81 | } |
82 | #endif |
83 | |
84 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
85 | { |
86 | #ifdef CONFIG_NUMA |
87 | if (early_cpu_to_node(cpu: from) == early_cpu_to_node(cpu: to)) |
88 | return LOCAL_DISTANCE; |
89 | else |
90 | return REMOTE_DISTANCE; |
91 | #else |
92 | return LOCAL_DISTANCE; |
93 | #endif |
94 | } |
95 | |
96 | static int __init pcpu_cpu_to_node(int cpu) |
97 | { |
98 | return early_cpu_to_node(cpu); |
99 | } |
100 | |
101 | void __init pcpu_populate_pte(unsigned long addr) |
102 | { |
103 | populate_extra_pte(vaddr: addr); |
104 | } |
105 | |
106 | static inline void setup_percpu_segment(int cpu) |
107 | { |
108 | #ifdef CONFIG_X86_32 |
109 | struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu), |
110 | 0xFFFFF); |
111 | |
112 | write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S); |
113 | #endif |
114 | } |
115 | |
116 | void __init setup_per_cpu_areas(void) |
117 | { |
118 | unsigned int cpu; |
119 | unsigned long delta; |
120 | int rc; |
121 | |
122 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n" , |
123 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
124 | |
125 | /* |
126 | * Allocate percpu area. Embedding allocator is our favorite; |
127 | * however, on NUMA configurations, it can result in very |
128 | * sparse unit mapping and vmalloc area isn't spacious enough |
129 | * on 32bit. Use page in that case. |
130 | */ |
131 | #ifdef CONFIG_X86_32 |
132 | if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa()) |
133 | pcpu_chosen_fc = PCPU_FC_PAGE; |
134 | #endif |
135 | rc = -EINVAL; |
136 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { |
137 | const size_t dyn_size = PERCPU_MODULE_RESERVE + |
138 | PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; |
139 | size_t atom_size; |
140 | |
141 | /* |
142 | * On 64bit, use PMD_SIZE for atom_size so that embedded |
143 | * percpu areas are aligned to PMD. This, in the future, |
144 | * can also allow using PMD mappings in vmalloc area. Use |
145 | * PAGE_SIZE on 32bit as vmalloc space is highly contended |
146 | * and large vmalloc area allocs can easily fail. |
147 | */ |
148 | #ifdef CONFIG_X86_64 |
149 | atom_size = PMD_SIZE; |
150 | #else |
151 | atom_size = PAGE_SIZE; |
152 | #endif |
153 | rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
154 | dyn_size, atom_size, |
155 | cpu_distance_fn: pcpu_cpu_distance, |
156 | cpu_to_nd_fn: pcpu_cpu_to_node); |
157 | if (rc < 0) |
158 | pr_warn("%s allocator failed (%d), falling back to page size\n" , |
159 | pcpu_fc_names[pcpu_chosen_fc], rc); |
160 | } |
161 | if (rc < 0) |
162 | rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
163 | cpu_to_nd_fn: pcpu_cpu_to_node); |
164 | if (rc < 0) |
165 | panic(fmt: "cannot initialize percpu area (err=%d)" , rc); |
166 | |
167 | /* alrighty, percpu areas up and running */ |
168 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
169 | for_each_possible_cpu(cpu) { |
170 | per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; |
171 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
172 | per_cpu(pcpu_hot.cpu_number, cpu) = cpu; |
173 | setup_percpu_segment(cpu); |
174 | /* |
175 | * Copy data used in early init routines from the |
176 | * initial arrays to the per cpu data areas. These |
177 | * arrays then become expendable and the *_early_ptr's |
178 | * are zeroed indicating that the static arrays are |
179 | * gone. |
180 | */ |
181 | #ifdef CONFIG_X86_LOCAL_APIC |
182 | per_cpu(x86_cpu_to_apicid, cpu) = |
183 | early_per_cpu_map(x86_cpu_to_apicid, cpu); |
184 | per_cpu(x86_cpu_to_acpiid, cpu) = |
185 | early_per_cpu_map(x86_cpu_to_acpiid, cpu); |
186 | #endif |
187 | #ifdef CONFIG_NUMA |
188 | per_cpu(x86_cpu_to_node_map, cpu) = |
189 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
190 | /* |
191 | * Ensure that the boot cpu numa_node is correct when the boot |
192 | * cpu is on a node that doesn't have memory installed. |
193 | * Also cpu_up() will call cpu_to_node() for APs when |
194 | * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set |
195 | * up later with c_init aka intel_init/amd_init. |
196 | * So set them all (boot cpu and all APs). |
197 | */ |
198 | set_cpu_numa_node(cpu, node: early_cpu_to_node(cpu)); |
199 | #endif |
200 | /* |
201 | * Up to this point, the boot CPU has been using .init.data |
202 | * area. Reload any changed state for the boot CPU. |
203 | */ |
204 | if (!cpu) |
205 | switch_gdt_and_percpu_base(cpu); |
206 | } |
207 | |
208 | /* indicate the early static arrays will soon be gone */ |
209 | #ifdef CONFIG_X86_LOCAL_APIC |
210 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
211 | early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL; |
212 | #endif |
213 | #ifdef CONFIG_NUMA |
214 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
215 | #endif |
216 | |
217 | /* Setup node to cpumask map */ |
218 | setup_node_to_cpumask_map(); |
219 | |
220 | /* Setup cpu initialized, callin, callout masks */ |
221 | setup_cpu_local_masks(); |
222 | |
223 | /* |
224 | * Sync back kernel address range again. We already did this in |
225 | * setup_arch(), but percpu data also needs to be available in |
226 | * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to |
227 | * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available |
228 | * there too. |
229 | * |
230 | * FIXME: Can the later sync in setup_cpu_entry_areas() replace |
231 | * this call? |
232 | */ |
233 | sync_initial_page_table(); |
234 | } |
235 | |