1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Based on arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/cache.h>
15#include <linux/mman.h>
16#include <linux/nodemask.h>
17#include <linux/initrd.h>
18#include <linux/gfp.h>
19#include <linux/math.h>
20#include <linux/memblock.h>
21#include <linux/sort.h>
22#include <linux/of.h>
23#include <linux/of_fdt.h>
24#include <linux/dma-direct.h>
25#include <linux/dma-map-ops.h>
26#include <linux/efi.h>
27#include <linux/swiotlb.h>
28#include <linux/vmalloc.h>
29#include <linux/mm.h>
30#include <linux/kexec.h>
31#include <linux/crash_dump.h>
32#include <linux/hugetlb.h>
33#include <linux/acpi_iort.h>
34#include <linux/kmemleak.h>
35
36#include <asm/boot.h>
37#include <asm/fixmap.h>
38#include <asm/kasan.h>
39#include <asm/kernel-pgtable.h>
40#include <asm/kvm_host.h>
41#include <asm/memory.h>
42#include <asm/numa.h>
43#include <asm/sections.h>
44#include <asm/setup.h>
45#include <linux/sizes.h>
46#include <asm/tlb.h>
47#include <asm/alternative.h>
48#include <asm/xen/swiotlb-xen.h>
49
50/*
51 * We need to be able to catch inadvertent references to memstart_addr
52 * that occur (potentially in generic code) before arm64_memblock_init()
53 * executes, which assigns it its actual value. So use a default value
54 * that cannot be mistaken for a real physical address.
55 */
56s64 memstart_addr __ro_after_init = -1;
57EXPORT_SYMBOL(memstart_addr);
58
59/*
60 * If the corresponding config options are enabled, we create both ZONE_DMA
61 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
62 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
63 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
64 * otherwise it is empty.
65 */
66phys_addr_t __ro_after_init arm64_dma_phys_limit;
67
68/*
69 * To make optimal use of block mappings when laying out the linear
70 * mapping, round down the base of physical memory to a size that can
71 * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
72 * (64k granule), or a multiple that can be mapped using contiguous bits
73 * in the page tables: 32 * PMD_SIZE (16k granule)
74 */
75#if defined(CONFIG_ARM64_4K_PAGES)
76#define ARM64_MEMSTART_SHIFT PUD_SHIFT
77#elif defined(CONFIG_ARM64_16K_PAGES)
78#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
79#else
80#define ARM64_MEMSTART_SHIFT PMD_SHIFT
81#endif
82
83/*
84 * sparsemem vmemmap imposes an additional requirement on the alignment of
85 * memstart_addr, due to the fact that the base of the vmemmap region
86 * has a direct correspondence, and needs to appear sufficiently aligned
87 * in the virtual address space.
88 */
89#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
90#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
91#else
92#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
93#endif
94
95static void __init arch_reserve_crashkernel(void)
96{
97 unsigned long long low_size = 0;
98 unsigned long long crash_base, crash_size;
99 char *cmdline = boot_command_line;
100 bool high = false;
101 int ret;
102
103 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
104 return;
105
106 ret = parse_crashkernel(cmdline, system_ram: memblock_phys_mem_size(),
107 crash_size: &crash_size, crash_base: &crash_base,
108 low_size: &low_size, high: &high);
109 if (ret)
110 return;
111
112 reserve_crashkernel_generic(cmdline, crash_size, crash_base,
113 crash_low_size: low_size, high);
114}
115
116/*
117 * Return the maximum physical address for a zone accessible by the given bits
118 * limit. If DRAM starts above 32-bit, expand the zone to the maximum
119 * available memory, otherwise cap it at 32-bit.
120 */
121static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
122{
123 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
124 phys_addr_t phys_start = memblock_start_of_DRAM();
125
126 if (phys_start > U32_MAX)
127 zone_mask = PHYS_ADDR_MAX;
128 else if (phys_start > zone_mask)
129 zone_mask = U32_MAX;
130
131 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
132}
133
134static void __init zone_sizes_init(void)
135{
136 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
137 unsigned int __maybe_unused acpi_zone_dma_bits;
138 unsigned int __maybe_unused dt_zone_dma_bits;
139 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(zone_bits: 32);
140
141#ifdef CONFIG_ZONE_DMA
142 acpi_zone_dma_bits = fls64(x: acpi_iort_dma_get_max_cpu_address());
143 dt_zone_dma_bits = fls64(x: of_dma_get_max_cpu_address(NULL));
144 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
145 arm64_dma_phys_limit = max_zone_phys(zone_bits: zone_dma_bits);
146 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
147#endif
148#ifdef CONFIG_ZONE_DMA32
149 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
150 if (!arm64_dma_phys_limit)
151 arm64_dma_phys_limit = dma32_phys_limit;
152#endif
153 if (!arm64_dma_phys_limit)
154 arm64_dma_phys_limit = PHYS_MASK + 1;
155 max_zone_pfns[ZONE_NORMAL] = max_pfn;
156
157 free_area_init(max_zone_pfn: max_zone_pfns);
158}
159
160int pfn_is_map_memory(unsigned long pfn)
161{
162 phys_addr_t addr = PFN_PHYS(pfn);
163
164 /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
165 if (PHYS_PFN(addr) != pfn)
166 return 0;
167
168 return memblock_is_map_memory(addr);
169}
170EXPORT_SYMBOL(pfn_is_map_memory);
171
172static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
173
174/*
175 * Limit the memory size that was specified via FDT.
176 */
177static int __init early_mem(char *p)
178{
179 if (!p)
180 return 1;
181
182 memory_limit = memparse(ptr: p, retptr: &p) & PAGE_MASK;
183 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
184
185 return 0;
186}
187early_param("mem", early_mem);
188
189void __init arm64_memblock_init(void)
190{
191 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
192
193 /*
194 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
195 * be limited in their ability to support a linear map that exceeds 51
196 * bits of VA space, depending on the placement of the ID map. Given
197 * that the placement of the ID map may be randomized, let's simply
198 * limit the kernel's linear map to 51 bits as well if we detect this
199 * configuration.
200 */
201 if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
202 is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
203 pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
204 linear_region_size = min_t(u64, linear_region_size, BIT(51));
205 }
206
207 /* Remove memory above our supported physical address size */
208 memblock_remove(base: 1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
209
210 /*
211 * Select a suitable value for the base of physical memory.
212 */
213 memstart_addr = round_down(memblock_start_of_DRAM(),
214 ARM64_MEMSTART_ALIGN);
215
216 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
217 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
218
219 /*
220 * Remove the memory that we will not be able to cover with the
221 * linear mapping. Take care not to clip the kernel which may be
222 * high in memory.
223 */
224 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
225 __pa_symbol(_end)), ULLONG_MAX);
226 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
227 /* ensure that memstart_addr remains sufficiently aligned */
228 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
229 ARM64_MEMSTART_ALIGN);
230 memblock_remove(base: 0, size: memstart_addr);
231 }
232
233 /*
234 * If we are running with a 52-bit kernel VA config on a system that
235 * does not support it, we have to place the available physical
236 * memory in the 48-bit addressable part of the linear region, i.e.,
237 * we have to move it upward. Since memstart_addr represents the
238 * physical address of PAGE_OFFSET, we have to *subtract* from it.
239 */
240 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
241 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
242
243 /*
244 * Apply the memory limit if it was set. Since the kernel may be loaded
245 * high up in memory, add back the kernel region that must be accessible
246 * via the linear mapping.
247 */
248 if (memory_limit != PHYS_ADDR_MAX) {
249 memblock_mem_limit_remove_map(limit: memory_limit);
250 memblock_add(__pa_symbol(_text), size: (u64)(_end - _text));
251 }
252
253 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
254 /*
255 * Add back the memory we just removed if it results in the
256 * initrd to become inaccessible via the linear mapping.
257 * Otherwise, this is a no-op
258 */
259 u64 base = phys_initrd_start & PAGE_MASK;
260 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
261
262 /*
263 * We can only add back the initrd memory if we don't end up
264 * with more memory than we can address via the linear mapping.
265 * It is up to the bootloader to position the kernel and the
266 * initrd reasonably close to each other (i.e., within 32 GB of
267 * each other) so that all granule/#levels combinations can
268 * always access both.
269 */
270 if (WARN(base < memblock_start_of_DRAM() ||
271 base + size > memblock_start_of_DRAM() +
272 linear_region_size,
273 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
274 phys_initrd_size = 0;
275 } else {
276 memblock_add(base, size);
277 memblock_clear_nomap(base, size);
278 memblock_reserve(base, size);
279 }
280 }
281
282 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
283 extern u16 memstart_offset_seed;
284 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
285 int parange = cpuid_feature_extract_unsigned_field(
286 mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
287 s64 range = linear_region_size -
288 BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
289
290 /*
291 * If the size of the linear region exceeds, by a sufficient
292 * margin, the size of the region that the physical memory can
293 * span, randomize the linear region as well.
294 */
295 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
296 range /= ARM64_MEMSTART_ALIGN;
297 memstart_addr -= ARM64_MEMSTART_ALIGN *
298 ((range * memstart_offset_seed) >> 16);
299 }
300 }
301
302 /*
303 * Register the kernel text, kernel data, initrd, and initial
304 * pagetables with memblock.
305 */
306 memblock_reserve(__pa_symbol(_stext), size: _end - _stext);
307 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
308 /* the generic initrd code expects virtual addresses */
309 initrd_start = __phys_to_virt(phys_initrd_start);
310 initrd_end = initrd_start + phys_initrd_size;
311 }
312
313 early_init_fdt_scan_reserved_mem();
314
315 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
316}
317
318void __init bootmem_init(void)
319{
320 unsigned long min, max;
321
322 min = PFN_UP(memblock_start_of_DRAM());
323 max = PFN_DOWN(memblock_end_of_DRAM());
324
325 early_memtest(start: min << PAGE_SHIFT, end: max << PAGE_SHIFT);
326
327 max_pfn = max_low_pfn = max;
328 min_low_pfn = min;
329
330 arch_numa_init();
331
332 /*
333 * must be done after arch_numa_init() which calls numa_init() to
334 * initialize node_online_map that gets used in hugetlb_cma_reserve()
335 * while allocating required CMA size across online nodes.
336 */
337#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
338 arm64_hugetlb_cma_reserve();
339#endif
340
341 kvm_hyp_reserve();
342
343 /*
344 * sparse_init() tries to allocate memory from memblock, so must be
345 * done after the fixed reservations
346 */
347 sparse_init();
348 zone_sizes_init();
349
350 /*
351 * Reserve the CMA area after arm64_dma_phys_limit was initialised.
352 */
353 dma_contiguous_reserve(addr_limit: arm64_dma_phys_limit);
354
355 /*
356 * request_standard_resources() depends on crashkernel's memory being
357 * reserved, so do it here.
358 */
359 arch_reserve_crashkernel();
360
361 memblock_dump_all();
362}
363
364/*
365 * mem_init() marks the free areas in the mem_map and tells us how much memory
366 * is free. This is done after various parts of the system have claimed their
367 * memory after the kernel image.
368 */
369void __init mem_init(void)
370{
371 bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
372
373 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
374 /*
375 * If no bouncing needed for ZONE_DMA, reduce the swiotlb
376 * buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
377 */
378 unsigned long size =
379 DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
380 swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
381 swiotlb = true;
382 }
383
384 swiotlb_init(addressing_limited: swiotlb, SWIOTLB_VERBOSE);
385
386 /* this will put all unused low memory onto the freelists */
387 memblock_free_all();
388
389 /*
390 * Check boundaries twice: Some fundamental inconsistencies can be
391 * detected at build time already.
392 */
393#ifdef CONFIG_COMPAT
394 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
395#endif
396
397 /*
398 * Selected page table levels should match when derived from
399 * scratch using the virtual address range and page size.
400 */
401 BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
402 CONFIG_PGTABLE_LEVELS);
403
404 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
405 extern int sysctl_overcommit_memory;
406 /*
407 * On a machine this small we won't get anywhere without
408 * overcommit, so turn it on by default.
409 */
410 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
411 }
412}
413
414void free_initmem(void)
415{
416 free_reserved_area(lm_alias(__init_begin),
417 lm_alias(__init_end),
418 POISON_FREE_INITMEM, s: "unused kernel");
419 /*
420 * Unmap the __init region but leave the VM area in place. This
421 * prevents the region from being reused for kernel modules, which
422 * is not supported by kallsyms.
423 */
424 vunmap_range(addr: (u64)__init_begin, end: (u64)__init_end);
425}
426
427void dump_mem_limit(void)
428{
429 if (memory_limit != PHYS_ADDR_MAX) {
430 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
431 } else {
432 pr_emerg("Memory Limit: none\n");
433 }
434}
435

source code of linux/arch/arm64/mm/init.c