1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1995 Linus Torvalds |
7 | * Copyright (C) 1995 Waldorf Electronics |
8 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle |
9 | * Copyright (C) 1996 Stoned Elipot |
10 | * Copyright (C) 1999 Silicon Graphics, Inc. |
11 | * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki |
12 | */ |
13 | #include <linux/init.h> |
14 | #include <linux/cpu.h> |
15 | #include <linux/delay.h> |
16 | #include <linux/ioport.h> |
17 | #include <linux/export.h> |
18 | #include <linux/memblock.h> |
19 | #include <linux/initrd.h> |
20 | #include <linux/root_dev.h> |
21 | #include <linux/highmem.h> |
22 | #include <linux/console.h> |
23 | #include <linux/pfn.h> |
24 | #include <linux/debugfs.h> |
25 | #include <linux/kexec.h> |
26 | #include <linux/sizes.h> |
27 | #include <linux/device.h> |
28 | #include <linux/dma-map-ops.h> |
29 | #include <linux/decompress/generic.h> |
30 | #include <linux/of_fdt.h> |
31 | #include <linux/dmi.h> |
32 | #include <linux/crash_dump.h> |
33 | |
34 | #include <asm/addrspace.h> |
35 | #include <asm/bootinfo.h> |
36 | #include <asm/bugs.h> |
37 | #include <asm/cache.h> |
38 | #include <asm/cdmm.h> |
39 | #include <asm/cpu.h> |
40 | #include <asm/debug.h> |
41 | #include <asm/mmzone.h> |
42 | #include <asm/sections.h> |
43 | #include <asm/setup.h> |
44 | #include <asm/smp-ops.h> |
45 | #include <asm/prom.h> |
46 | #include <asm/fw/fw.h> |
47 | |
48 | #ifdef CONFIG_MIPS_ELF_APPENDED_DTB |
49 | char __section(".appended_dtb" ) __appended_dtb[0x100000]; |
50 | #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ |
51 | |
52 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; |
53 | |
54 | EXPORT_SYMBOL(cpu_data); |
55 | |
56 | /* |
57 | * Setup information |
58 | * |
59 | * These are initialized so they are in the .data section |
60 | */ |
61 | unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; |
62 | |
63 | EXPORT_SYMBOL(mips_machtype); |
64 | |
65 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
66 | char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; |
67 | |
68 | #ifdef CONFIG_CMDLINE_BOOL |
69 | static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE; |
70 | #else |
71 | static const char builtin_cmdline[] __initconst = "" ; |
72 | #endif |
73 | |
74 | /* |
75 | * mips_io_port_base is the begin of the address space to which x86 style |
76 | * I/O ports are mapped. |
77 | */ |
78 | unsigned long mips_io_port_base = -1; |
79 | EXPORT_SYMBOL(mips_io_port_base); |
80 | |
81 | static struct resource code_resource = { .name = "Kernel code" , }; |
82 | static struct resource data_resource = { .name = "Kernel data" , }; |
83 | static struct resource bss_resource = { .name = "Kernel bss" , }; |
84 | |
85 | unsigned long __kaslr_offset __ro_after_init; |
86 | EXPORT_SYMBOL(__kaslr_offset); |
87 | |
88 | static void *detect_magic __initdata = detect_memory_region; |
89 | |
90 | #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET |
91 | unsigned long ARCH_PFN_OFFSET; |
92 | EXPORT_SYMBOL(ARCH_PFN_OFFSET); |
93 | #endif |
94 | |
95 | void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) |
96 | { |
97 | void *dm = &detect_magic; |
98 | phys_addr_t size; |
99 | |
100 | for (size = sz_min; size < sz_max; size <<= 1) { |
101 | if (!memcmp(p: dm, q: dm + size, size: sizeof(detect_magic))) |
102 | break; |
103 | } |
104 | |
105 | pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n" , |
106 | ((unsigned long long) size) / SZ_1M, |
107 | (unsigned long long) start, |
108 | ((unsigned long long) sz_min) / SZ_1M, |
109 | ((unsigned long long) sz_max) / SZ_1M); |
110 | |
111 | memblock_add(base: start, size); |
112 | } |
113 | |
114 | /* |
115 | * Manage initrd |
116 | */ |
117 | #ifdef CONFIG_BLK_DEV_INITRD |
118 | |
119 | static int __init rd_start_early(char *p) |
120 | { |
121 | unsigned long start = memparse(ptr: p, retptr: &p); |
122 | |
123 | #ifdef CONFIG_64BIT |
124 | /* Guess if the sign extension was forgotten by bootloader */ |
125 | if (start < XKPHYS) |
126 | start = (int)start; |
127 | #endif |
128 | initrd_start = start; |
129 | initrd_end += start; |
130 | return 0; |
131 | } |
132 | early_param("rd_start" , rd_start_early); |
133 | |
134 | static int __init rd_size_early(char *p) |
135 | { |
136 | initrd_end += memparse(ptr: p, retptr: &p); |
137 | return 0; |
138 | } |
139 | early_param("rd_size" , rd_size_early); |
140 | |
141 | /* it returns the next free pfn after initrd */ |
142 | static unsigned long __init init_initrd(void) |
143 | { |
144 | unsigned long end; |
145 | |
146 | /* |
147 | * Board specific code or command line parser should have |
148 | * already set up initrd_start and initrd_end. In these cases |
149 | * perfom sanity checks and use them if all looks good. |
150 | */ |
151 | if (!initrd_start || initrd_end <= initrd_start) |
152 | goto disable; |
153 | |
154 | if (initrd_start & ~PAGE_MASK) { |
155 | pr_err("initrd start must be page aligned\n" ); |
156 | goto disable; |
157 | } |
158 | |
159 | /* |
160 | * Sanitize initrd addresses. For example firmware |
161 | * can't guess if they need to pass them through |
162 | * 64-bits values if the kernel has been built in pure |
163 | * 32-bit. We need also to switch from KSEG0 to XKPHYS |
164 | * addresses now, so the code can now safely use __pa(). |
165 | */ |
166 | end = __pa(initrd_end); |
167 | initrd_end = (unsigned long)__va(end); |
168 | initrd_start = (unsigned long)__va(__pa(initrd_start)); |
169 | |
170 | if (initrd_start < PAGE_OFFSET) { |
171 | pr_err("initrd start < PAGE_OFFSET\n" ); |
172 | goto disable; |
173 | } |
174 | |
175 | ROOT_DEV = Root_RAM0; |
176 | return PFN_UP(end); |
177 | disable: |
178 | initrd_start = 0; |
179 | initrd_end = 0; |
180 | return 0; |
181 | } |
182 | |
183 | /* In some conditions (e.g. big endian bootloader with a little endian |
184 | kernel), the initrd might appear byte swapped. Try to detect this and |
185 | byte swap it if needed. */ |
186 | static void __init maybe_bswap_initrd(void) |
187 | { |
188 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) |
189 | u64 buf; |
190 | |
191 | /* Check for CPIO signature */ |
192 | if (!memcmp((void *)initrd_start, "070701" , 6)) |
193 | return; |
194 | |
195 | /* Check for compressed initrd */ |
196 | if (decompress_method((unsigned char *)initrd_start, 8, NULL)) |
197 | return; |
198 | |
199 | /* Try again with a byte swapped header */ |
200 | buf = swab64p((u64 *)initrd_start); |
201 | if (!memcmp(&buf, "070701" , 6) || |
202 | decompress_method((unsigned char *)(&buf), 8, NULL)) { |
203 | unsigned long i; |
204 | |
205 | pr_info("Byteswapped initrd detected\n" ); |
206 | for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8) |
207 | swab64s((u64 *)i); |
208 | } |
209 | #endif |
210 | } |
211 | |
212 | static void __init finalize_initrd(void) |
213 | { |
214 | unsigned long size = initrd_end - initrd_start; |
215 | |
216 | if (size == 0) { |
217 | printk(KERN_INFO "Initrd not found or empty" ); |
218 | goto disable; |
219 | } |
220 | if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { |
221 | printk(KERN_ERR "Initrd extends beyond end of memory" ); |
222 | goto disable; |
223 | } |
224 | |
225 | maybe_bswap_initrd(); |
226 | |
227 | memblock_reserve(__pa(initrd_start), size); |
228 | initrd_below_start_ok = 1; |
229 | |
230 | pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n" , |
231 | initrd_start, size); |
232 | return; |
233 | disable: |
234 | printk(KERN_CONT " - disabling initrd\n" ); |
235 | initrd_start = 0; |
236 | initrd_end = 0; |
237 | } |
238 | |
239 | #else /* !CONFIG_BLK_DEV_INITRD */ |
240 | |
241 | static unsigned long __init init_initrd(void) |
242 | { |
243 | return 0; |
244 | } |
245 | |
246 | #define finalize_initrd() do {} while (0) |
247 | |
248 | #endif |
249 | |
250 | /* |
251 | * Initialize the bootmem allocator. It also setup initrd related data |
252 | * if needed. |
253 | */ |
254 | #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA)) |
255 | |
256 | static void __init bootmem_init(void) |
257 | { |
258 | init_initrd(); |
259 | finalize_initrd(); |
260 | } |
261 | |
262 | #else /* !CONFIG_SGI_IP27 */ |
263 | |
264 | static void __init bootmem_init(void) |
265 | { |
266 | phys_addr_t ramstart, ramend; |
267 | unsigned long start, end; |
268 | int i; |
269 | |
270 | ramstart = memblock_start_of_DRAM(); |
271 | ramend = memblock_end_of_DRAM(); |
272 | |
273 | /* |
274 | * Sanity check any INITRD first. We don't take it into account |
275 | * for bootmem setup initially, rely on the end-of-kernel-code |
276 | * as our memory range starting point. Once bootmem is inited we |
277 | * will reserve the area used for the initrd. |
278 | */ |
279 | init_initrd(); |
280 | |
281 | /* Reserve memory occupied by kernel. */ |
282 | memblock_reserve(__pa_symbol(&_text), |
283 | __pa_symbol(&_end) - __pa_symbol(&_text)); |
284 | |
285 | /* max_low_pfn is not a number of pages but the end pfn of low mem */ |
286 | |
287 | #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET |
288 | ARCH_PFN_OFFSET = PFN_UP(ramstart); |
289 | #else |
290 | /* |
291 | * Reserve any memory between the start of RAM and PHYS_OFFSET |
292 | */ |
293 | if (ramstart > PHYS_OFFSET) |
294 | memblock_reserve(base: PHYS_OFFSET, size: ramstart - PHYS_OFFSET); |
295 | |
296 | if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) { |
297 | pr_info("Wasting %lu bytes for tracking %lu unused pages\n" , |
298 | (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)), |
299 | (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET)); |
300 | } |
301 | #endif |
302 | |
303 | min_low_pfn = ARCH_PFN_OFFSET; |
304 | max_pfn = PFN_DOWN(ramend); |
305 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { |
306 | /* |
307 | * Skip highmem here so we get an accurate max_low_pfn if low |
308 | * memory stops short of high memory. |
309 | * If the region overlaps HIGHMEM_START, end is clipped so |
310 | * max_pfn excludes the highmem portion. |
311 | */ |
312 | if (start >= PFN_DOWN(HIGHMEM_START)) |
313 | continue; |
314 | if (end > PFN_DOWN(HIGHMEM_START)) |
315 | end = PFN_DOWN(HIGHMEM_START); |
316 | if (end > max_low_pfn) |
317 | max_low_pfn = end; |
318 | } |
319 | |
320 | if (min_low_pfn >= max_low_pfn) |
321 | panic(fmt: "Incorrect memory mapping !!!" ); |
322 | |
323 | if (max_pfn > PFN_DOWN(HIGHMEM_START)) { |
324 | #ifdef CONFIG_HIGHMEM |
325 | highstart_pfn = PFN_DOWN(HIGHMEM_START); |
326 | highend_pfn = max_pfn; |
327 | #else |
328 | max_low_pfn = PFN_DOWN(HIGHMEM_START); |
329 | max_pfn = max_low_pfn; |
330 | #endif |
331 | } |
332 | |
333 | /* |
334 | * Reserve initrd memory if needed. |
335 | */ |
336 | finalize_initrd(); |
337 | } |
338 | |
339 | #endif /* CONFIG_SGI_IP27 */ |
340 | |
341 | static int usermem __initdata; |
342 | |
343 | static int __init early_parse_mem(char *p) |
344 | { |
345 | phys_addr_t start, size; |
346 | |
347 | if (!p) { |
348 | pr_err("mem parameter is empty, do nothing\n" ); |
349 | return -EINVAL; |
350 | } |
351 | |
352 | /* |
353 | * If a user specifies memory size, we |
354 | * blow away any automatically generated |
355 | * size. |
356 | */ |
357 | if (usermem == 0) { |
358 | usermem = 1; |
359 | memblock_remove(base: memblock_start_of_DRAM(), |
360 | size: memblock_end_of_DRAM() - memblock_start_of_DRAM()); |
361 | } |
362 | start = 0; |
363 | size = memparse(ptr: p, retptr: &p); |
364 | if (*p == '@') |
365 | start = memparse(ptr: p + 1, retptr: &p); |
366 | |
367 | if (IS_ENABLED(CONFIG_NUMA)) |
368 | memblock_add_node(base: start, size, nid: pa_to_nid(start), flags: MEMBLOCK_NONE); |
369 | else |
370 | memblock_add(base: start, size); |
371 | |
372 | return 0; |
373 | } |
374 | early_param("mem" , early_parse_mem); |
375 | |
376 | static int __init early_parse_memmap(char *p) |
377 | { |
378 | char *oldp; |
379 | u64 start_at, mem_size; |
380 | |
381 | if (!p) |
382 | return -EINVAL; |
383 | |
384 | if (!strncmp(p, "exactmap" , 8)) { |
385 | pr_err("\"memmap=exactmap\" invalid on MIPS\n" ); |
386 | return 0; |
387 | } |
388 | |
389 | oldp = p; |
390 | mem_size = memparse(ptr: p, retptr: &p); |
391 | if (p == oldp) |
392 | return -EINVAL; |
393 | |
394 | if (*p == '@') { |
395 | start_at = memparse(ptr: p+1, retptr: &p); |
396 | memblock_add(base: start_at, size: mem_size); |
397 | } else if (*p == '#') { |
398 | pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n" ); |
399 | return -EINVAL; |
400 | } else if (*p == '$') { |
401 | start_at = memparse(ptr: p+1, retptr: &p); |
402 | memblock_add(base: start_at, size: mem_size); |
403 | memblock_reserve(base: start_at, size: mem_size); |
404 | } else { |
405 | pr_err("\"memmap\" invalid format!\n" ); |
406 | return -EINVAL; |
407 | } |
408 | |
409 | if (*p == '\0') { |
410 | usermem = 1; |
411 | return 0; |
412 | } else |
413 | return -EINVAL; |
414 | } |
415 | early_param("memmap" , early_parse_memmap); |
416 | |
417 | static void __init mips_reserve_vmcore(void) |
418 | { |
419 | #ifdef CONFIG_PROC_VMCORE |
420 | phys_addr_t start, end; |
421 | u64 i; |
422 | |
423 | if (!elfcorehdr_size) { |
424 | for_each_mem_range(i, &start, &end) { |
425 | if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { |
426 | /* |
427 | * Reserve from the elf core header to the end of |
428 | * the memory segment, that should all be kdump |
429 | * reserved memory. |
430 | */ |
431 | elfcorehdr_size = end - elfcorehdr_addr; |
432 | break; |
433 | } |
434 | } |
435 | } |
436 | |
437 | pr_info("Reserving %ldKB of memory at %ldKB for kdump\n" , |
438 | (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10); |
439 | |
440 | memblock_reserve(base: elfcorehdr_addr, size: elfcorehdr_size); |
441 | #endif |
442 | } |
443 | |
444 | #ifdef CONFIG_KEXEC |
445 | |
446 | /* 64M alignment for crash kernel regions */ |
447 | #define CRASH_ALIGN SZ_64M |
448 | #define CRASH_ADDR_MAX SZ_512M |
449 | |
450 | static void __init mips_parse_crashkernel(void) |
451 | { |
452 | unsigned long long total_mem; |
453 | unsigned long long crash_size, crash_base; |
454 | int ret; |
455 | |
456 | total_mem = memblock_phys_mem_size(); |
457 | ret = parse_crashkernel(cmdline: boot_command_line, system_ram: total_mem, |
458 | crash_size: &crash_size, crash_base: &crash_base, |
459 | NULL, NULL); |
460 | if (ret != 0 || crash_size <= 0) |
461 | return; |
462 | |
463 | if (crash_base <= 0) { |
464 | crash_base = memblock_phys_alloc_range(size: crash_size, CRASH_ALIGN, |
465 | CRASH_ALIGN, |
466 | CRASH_ADDR_MAX); |
467 | if (!crash_base) { |
468 | pr_warn("crashkernel reservation failed - No suitable area found.\n" ); |
469 | return; |
470 | } |
471 | } else { |
472 | unsigned long long start; |
473 | |
474 | start = memblock_phys_alloc_range(size: crash_size, align: 1, |
475 | start: crash_base, |
476 | end: crash_base + crash_size); |
477 | if (start != crash_base) { |
478 | pr_warn("Invalid memory region reserved for crash kernel\n" ); |
479 | return; |
480 | } |
481 | } |
482 | |
483 | crashk_res.start = crash_base; |
484 | crashk_res.end = crash_base + crash_size - 1; |
485 | } |
486 | |
487 | static void __init request_crashkernel(struct resource *res) |
488 | { |
489 | int ret; |
490 | |
491 | if (crashk_res.start == crashk_res.end) |
492 | return; |
493 | |
494 | ret = request_resource(root: res, new: &crashk_res); |
495 | if (!ret) |
496 | pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n" , |
497 | (unsigned long)(resource_size(&crashk_res) >> 20), |
498 | (unsigned long)(crashk_res.start >> 20)); |
499 | } |
500 | #else /* !defined(CONFIG_KEXEC) */ |
501 | static void __init mips_parse_crashkernel(void) |
502 | { |
503 | } |
504 | |
505 | static void __init request_crashkernel(struct resource *res) |
506 | { |
507 | } |
508 | #endif /* !defined(CONFIG_KEXEC) */ |
509 | |
510 | static void __init check_kernel_sections_mem(void) |
511 | { |
512 | phys_addr_t start = __pa_symbol(&_text); |
513 | phys_addr_t size = __pa_symbol(&_end) - start; |
514 | |
515 | if (!memblock_is_region_memory(base: start, size)) { |
516 | pr_info("Kernel sections are not in the memory maps\n" ); |
517 | memblock_add(base: start, size); |
518 | } |
519 | } |
520 | |
521 | static void __init bootcmdline_append(const char *s, size_t max) |
522 | { |
523 | if (!s[0] || !max) |
524 | return; |
525 | |
526 | if (boot_command_line[0]) |
527 | strlcat(p: boot_command_line, q: " " , COMMAND_LINE_SIZE); |
528 | |
529 | strlcat(p: boot_command_line, q: s, avail: max); |
530 | } |
531 | |
532 | #ifdef CONFIG_OF_EARLY_FLATTREE |
533 | |
534 | static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname, |
535 | int depth, void *data) |
536 | { |
537 | bool *dt_bootargs = data; |
538 | const char *p; |
539 | int l; |
540 | |
541 | if (depth != 1 || !data || |
542 | (strcmp(uname, "chosen" ) != 0 && strcmp(uname, "chosen@0" ) != 0)) |
543 | return 0; |
544 | |
545 | p = of_get_flat_dt_prop(node, name: "bootargs" , size: &l); |
546 | if (p != NULL && l > 0) { |
547 | bootcmdline_append(s: p, min(l, COMMAND_LINE_SIZE)); |
548 | *dt_bootargs = true; |
549 | } |
550 | |
551 | return 1; |
552 | } |
553 | |
554 | #endif /* CONFIG_OF_EARLY_FLATTREE */ |
555 | |
556 | static void __init bootcmdline_init(void) |
557 | { |
558 | bool dt_bootargs = false; |
559 | |
560 | /* |
561 | * If CMDLINE_OVERRIDE is enabled then initializing the command line is |
562 | * trivial - we simply use the built-in command line unconditionally & |
563 | * unmodified. |
564 | */ |
565 | if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { |
566 | strscpy(p: boot_command_line, q: builtin_cmdline, COMMAND_LINE_SIZE); |
567 | return; |
568 | } |
569 | |
570 | /* |
571 | * If the user specified a built-in command line & |
572 | * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is |
573 | * prepended to arguments from the bootloader or DT so we'll copy them |
574 | * to the start of boot_command_line here. Otherwise, empty |
575 | * boot_command_line to undo anything early_init_dt_scan_chosen() did. |
576 | */ |
577 | if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) |
578 | strscpy(p: boot_command_line, q: builtin_cmdline, COMMAND_LINE_SIZE); |
579 | else |
580 | boot_command_line[0] = 0; |
581 | |
582 | #ifdef CONFIG_OF_EARLY_FLATTREE |
583 | /* |
584 | * If we're configured to take boot arguments from DT, look for those |
585 | * now. |
586 | */ |
587 | if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) || |
588 | IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)) |
589 | of_scan_flat_dt(it: bootcmdline_scan_chosen, data: &dt_bootargs); |
590 | #endif |
591 | |
592 | /* |
593 | * If we didn't get any arguments from DT (regardless of whether that's |
594 | * because we weren't configured to look for them, or because we looked |
595 | * & found none) then we'll take arguments from the bootloader. |
596 | * plat_mem_setup() should have filled arcs_cmdline with arguments from |
597 | * the bootloader. |
598 | */ |
599 | if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs) |
600 | bootcmdline_append(s: arcs_cmdline, COMMAND_LINE_SIZE); |
601 | |
602 | /* |
603 | * If the user specified a built-in command line & we didn't already |
604 | * prepend it, we append it to boot_command_line here. |
605 | */ |
606 | if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && |
607 | !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) |
608 | bootcmdline_append(s: builtin_cmdline, COMMAND_LINE_SIZE); |
609 | } |
610 | |
611 | /* |
612 | * arch_mem_init - initialize memory management subsystem |
613 | * |
614 | * o plat_mem_setup() detects the memory configuration and will record detected |
615 | * memory areas using memblock_add. |
616 | * |
617 | * At this stage the memory configuration of the system is known to the |
618 | * kernel but generic memory management system is still entirely uninitialized. |
619 | * |
620 | * o bootmem_init() |
621 | * o sparse_init() |
622 | * o paging_init() |
623 | * o dma_contiguous_reserve() |
624 | * |
625 | * At this stage the bootmem allocator is ready to use. |
626 | * |
627 | * NOTE: historically plat_mem_setup did the entire platform initialization. |
628 | * This was rather impractical because it meant plat_mem_setup had to |
629 | * get away without any kind of memory allocator. To keep old code from |
630 | * breaking plat_setup was just renamed to plat_mem_setup and a second platform |
631 | * initialization hook for anything else was introduced. |
632 | */ |
633 | static void __init arch_mem_init(char **cmdline_p) |
634 | { |
635 | /* call board setup routine */ |
636 | plat_mem_setup(); |
637 | memblock_set_bottom_up(enable: true); |
638 | |
639 | bootcmdline_init(); |
640 | strscpy(p: command_line, q: boot_command_line, COMMAND_LINE_SIZE); |
641 | *cmdline_p = command_line; |
642 | |
643 | parse_early_param(); |
644 | |
645 | if (usermem) |
646 | pr_info("User-defined physical RAM map overwrite\n" ); |
647 | |
648 | check_kernel_sections_mem(); |
649 | |
650 | early_init_fdt_reserve_self(); |
651 | early_init_fdt_scan_reserved_mem(); |
652 | |
653 | #ifndef CONFIG_NUMA |
654 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); |
655 | #endif |
656 | bootmem_init(); |
657 | |
658 | /* |
659 | * Prevent memblock from allocating high memory. |
660 | * This cannot be done before max_low_pfn is detected, so up |
661 | * to this point is possible to only reserve physical memory |
662 | * with memblock_reserve; memblock_alloc* can be used |
663 | * only after this point |
664 | */ |
665 | memblock_set_current_limit(PFN_PHYS(max_low_pfn)); |
666 | |
667 | mips_reserve_vmcore(); |
668 | |
669 | mips_parse_crashkernel(); |
670 | device_tree_init(); |
671 | |
672 | /* |
673 | * In order to reduce the possibility of kernel panic when failed to |
674 | * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate |
675 | * low memory as small as possible before plat_swiotlb_setup(), so |
676 | * make sparse_init() using top-down allocation. |
677 | */ |
678 | memblock_set_bottom_up(enable: false); |
679 | sparse_init(); |
680 | memblock_set_bottom_up(enable: true); |
681 | |
682 | plat_swiotlb_setup(); |
683 | |
684 | dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); |
685 | |
686 | /* Reserve for hibernation. */ |
687 | memblock_reserve(__pa_symbol(&__nosave_begin), |
688 | __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); |
689 | |
690 | early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); |
691 | } |
692 | |
693 | static void __init resource_init(void) |
694 | { |
695 | phys_addr_t start, end; |
696 | u64 i; |
697 | |
698 | if (UNCAC_BASE != IO_BASE) |
699 | return; |
700 | |
701 | code_resource.start = __pa_symbol(&_text); |
702 | code_resource.end = __pa_symbol(&_etext) - 1; |
703 | data_resource.start = __pa_symbol(&_etext); |
704 | data_resource.end = __pa_symbol(&_edata) - 1; |
705 | bss_resource.start = __pa_symbol(&__bss_start); |
706 | bss_resource.end = __pa_symbol(&__bss_stop) - 1; |
707 | |
708 | for_each_mem_range(i, &start, &end) { |
709 | struct resource *res; |
710 | |
711 | res = memblock_alloc(size: sizeof(struct resource), SMP_CACHE_BYTES); |
712 | if (!res) |
713 | panic(fmt: "%s: Failed to allocate %zu bytes\n" , __func__, |
714 | sizeof(struct resource)); |
715 | |
716 | res->start = start; |
717 | /* |
718 | * In memblock, end points to the first byte after the |
719 | * range while in resourses, end points to the last byte in |
720 | * the range. |
721 | */ |
722 | res->end = end - 1; |
723 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
724 | res->name = "System RAM" ; |
725 | |
726 | request_resource(root: &iomem_resource, new: res); |
727 | |
728 | /* |
729 | * We don't know which RAM region contains kernel data, |
730 | * so we try it repeatedly and let the resource manager |
731 | * test it. |
732 | */ |
733 | request_resource(root: res, new: &code_resource); |
734 | request_resource(root: res, new: &data_resource); |
735 | request_resource(root: res, new: &bss_resource); |
736 | request_crashkernel(res); |
737 | } |
738 | } |
739 | |
740 | #ifdef CONFIG_SMP |
741 | static void __init prefill_possible_map(void) |
742 | { |
743 | int i, possible = num_possible_cpus(); |
744 | |
745 | if (possible > nr_cpu_ids) |
746 | possible = nr_cpu_ids; |
747 | |
748 | for (i = 0; i < possible; i++) |
749 | set_cpu_possible(cpu: i, possible: true); |
750 | for (; i < NR_CPUS; i++) |
751 | set_cpu_possible(cpu: i, possible: false); |
752 | |
753 | set_nr_cpu_ids(possible); |
754 | } |
755 | #else |
756 | static inline void prefill_possible_map(void) {} |
757 | #endif |
758 | |
759 | static void __init setup_rng_seed(void) |
760 | { |
761 | char *rng_seed_hex = fw_getenv("rngseed" ); |
762 | u8 rng_seed[512]; |
763 | size_t len; |
764 | |
765 | if (!rng_seed_hex) |
766 | return; |
767 | |
768 | len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2); |
769 | if (hex2bin(dst: rng_seed, src: rng_seed_hex, count: len)) |
770 | return; |
771 | |
772 | add_bootloader_randomness(buf: rng_seed, len); |
773 | memzero_explicit(s: rng_seed, count: len); |
774 | memzero_explicit(s: rng_seed_hex, count: len * 2); |
775 | } |
776 | |
777 | void __init setup_arch(char **cmdline_p) |
778 | { |
779 | cpu_probe(); |
780 | mips_cm_probe(); |
781 | prom_init(); |
782 | |
783 | setup_early_fdc_console(); |
784 | #ifdef CONFIG_EARLY_PRINTK |
785 | setup_early_printk(); |
786 | #endif |
787 | cpu_report(); |
788 | if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) |
789 | check_bugs64_early(); |
790 | |
791 | arch_mem_init(cmdline_p); |
792 | dmi_setup(); |
793 | |
794 | resource_init(); |
795 | plat_smp_setup(); |
796 | prefill_possible_map(); |
797 | |
798 | cpu_cache_init(); |
799 | paging_init(); |
800 | |
801 | memblock_dump_all(); |
802 | |
803 | setup_rng_seed(); |
804 | } |
805 | |
806 | unsigned long kernelsp[NR_CPUS]; |
807 | unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; |
808 | |
809 | #ifdef CONFIG_DEBUG_FS |
810 | struct dentry *mips_debugfs_dir; |
811 | static int __init debugfs_mips(void) |
812 | { |
813 | mips_debugfs_dir = debugfs_create_dir(name: "mips" , NULL); |
814 | return 0; |
815 | } |
816 | arch_initcall(debugfs_mips); |
817 | #endif |
818 | |
819 | #ifdef CONFIG_DMA_NONCOHERENT |
820 | static int __init setcoherentio(char *str) |
821 | { |
822 | dma_default_coherent = true; |
823 | pr_info("Hardware DMA cache coherency (command line)\n" ); |
824 | return 0; |
825 | } |
826 | early_param("coherentio" , setcoherentio); |
827 | |
828 | static int __init setnocoherentio(char *str) |
829 | { |
830 | dma_default_coherent = false; |
831 | pr_info("Software DMA cache coherency (command line)\n" ); |
832 | return 0; |
833 | } |
834 | early_param("nocoherentio" , setnocoherentio); |
835 | #endif |
836 | |
837 | void __init arch_cpu_finalize_init(void) |
838 | { |
839 | unsigned int cpu = smp_processor_id(); |
840 | |
841 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
842 | check_bugs32(); |
843 | |
844 | if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) |
845 | check_bugs64(); |
846 | } |
847 | |