1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2020-2022 Loongson Technology Corporation Limited |
4 | * |
5 | * Derived from MIPS: |
6 | * Copyright (C) 1995 Linus Torvalds |
7 | * Copyright (C) 1995 Waldorf Electronics |
8 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle |
9 | * Copyright (C) 1996 Stoned Elipot |
10 | * Copyright (C) 1999 Silicon Graphics, Inc. |
11 | * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki |
12 | */ |
13 | #include <linux/init.h> |
14 | #include <linux/acpi.h> |
15 | #include <linux/cpu.h> |
16 | #include <linux/dmi.h> |
17 | #include <linux/efi.h> |
18 | #include <linux/export.h> |
19 | #include <linux/memblock.h> |
20 | #include <linux/initrd.h> |
21 | #include <linux/ioport.h> |
22 | #include <linux/kexec.h> |
23 | #include <linux/crash_dump.h> |
24 | #include <linux/root_dev.h> |
25 | #include <linux/console.h> |
26 | #include <linux/pfn.h> |
27 | #include <linux/platform_device.h> |
28 | #include <linux/sizes.h> |
29 | #include <linux/device.h> |
30 | #include <linux/dma-map-ops.h> |
31 | #include <linux/libfdt.h> |
32 | #include <linux/of_fdt.h> |
33 | #include <linux/of_address.h> |
34 | #include <linux/suspend.h> |
35 | #include <linux/swiotlb.h> |
36 | |
37 | #include <asm/addrspace.h> |
38 | #include <asm/alternative.h> |
39 | #include <asm/bootinfo.h> |
40 | #include <asm/cache.h> |
41 | #include <asm/cpu.h> |
42 | #include <asm/dma.h> |
43 | #include <asm/efi.h> |
44 | #include <asm/loongson.h> |
45 | #include <asm/numa.h> |
46 | #include <asm/pgalloc.h> |
47 | #include <asm/sections.h> |
48 | #include <asm/setup.h> |
49 | #include <asm/time.h> |
50 | #include <asm/unwind.h> |
51 | |
52 | #define SMBIOS_BIOSSIZE_OFFSET 0x09 |
53 | #define SMBIOS_BIOSEXTERN_OFFSET 0x13 |
54 | #define SMBIOS_FREQLOW_OFFSET 0x16 |
55 | #define SMBIOS_FREQHIGH_OFFSET 0x17 |
56 | #define SMBIOS_FREQLOW_MASK 0xFF |
57 | #define SMBIOS_CORE_PACKAGE_OFFSET 0x23 |
58 | #define LOONGSON_EFI_ENABLE (1 << 3) |
59 | |
60 | unsigned long fw_arg0, fw_arg1, fw_arg2; |
61 | DEFINE_PER_CPU(unsigned long, kernelsp); |
62 | struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly; |
63 | |
64 | EXPORT_SYMBOL(cpu_data); |
65 | |
66 | struct loongson_board_info b_info; |
67 | static const char dmi_empty_string[] = " " ; |
68 | |
69 | /* |
70 | * Setup information |
71 | * |
72 | * These are initialized so they are in the .data section |
73 | */ |
74 | char init_command_line[COMMAND_LINE_SIZE] __initdata; |
75 | |
76 | static int num_standard_resources; |
77 | static struct resource *standard_resources; |
78 | |
79 | static struct resource code_resource = { .name = "Kernel code" , }; |
80 | static struct resource data_resource = { .name = "Kernel data" , }; |
81 | static struct resource bss_resource = { .name = "Kernel bss" , }; |
82 | |
83 | const char *get_system_type(void) |
84 | { |
85 | return "generic-loongson-machine" ; |
86 | } |
87 | |
88 | void __init arch_cpu_finalize_init(void) |
89 | { |
90 | alternative_instructions(); |
91 | } |
92 | |
93 | static const char *dmi_string_parse(const struct dmi_header *dm, u8 s) |
94 | { |
95 | const u8 *bp = ((u8 *) dm) + dm->length; |
96 | |
97 | if (s) { |
98 | s--; |
99 | while (s > 0 && *bp) { |
100 | bp += strlen(bp) + 1; |
101 | s--; |
102 | } |
103 | |
104 | if (*bp != 0) { |
105 | size_t len = strlen(bp)+1; |
106 | size_t cmp_len = len > 8 ? 8 : len; |
107 | |
108 | if (!memcmp(p: bp, q: dmi_empty_string, size: cmp_len)) |
109 | return dmi_empty_string; |
110 | |
111 | return bp; |
112 | } |
113 | } |
114 | |
115 | return "" ; |
116 | } |
117 | |
118 | static void __init parse_cpu_table(const struct dmi_header *dm) |
119 | { |
120 | long freq_temp = 0; |
121 | char *dmi_data = (char *)dm; |
122 | |
123 | freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) + |
124 | ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK)); |
125 | cpu_clock_freq = freq_temp * 1000000; |
126 | |
127 | loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]); |
128 | loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET); |
129 | |
130 | pr_info("CpuClock = %llu\n" , cpu_clock_freq); |
131 | } |
132 | |
133 | static void __init parse_bios_table(const struct dmi_header *dm) |
134 | { |
135 | char *dmi_data = (char *)dm; |
136 | |
137 | b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6; |
138 | } |
139 | |
140 | static void __init find_tokens(const struct dmi_header *dm, void *dummy) |
141 | { |
142 | switch (dm->type) { |
143 | case 0x0: /* Extern BIOS */ |
144 | parse_bios_table(dm); |
145 | break; |
146 | case 0x4: /* Calling interface */ |
147 | parse_cpu_table(dm); |
148 | break; |
149 | } |
150 | } |
151 | static void __init smbios_parse(void) |
152 | { |
153 | b_info.bios_vendor = (void *)dmi_get_system_info(field: DMI_BIOS_VENDOR); |
154 | b_info.bios_version = (void *)dmi_get_system_info(field: DMI_BIOS_VERSION); |
155 | b_info.bios_release_date = (void *)dmi_get_system_info(field: DMI_BIOS_DATE); |
156 | b_info.board_vendor = (void *)dmi_get_system_info(field: DMI_BOARD_VENDOR); |
157 | b_info.board_name = (void *)dmi_get_system_info(field: DMI_BOARD_NAME); |
158 | dmi_walk(decode: find_tokens, NULL); |
159 | } |
160 | |
161 | #ifdef CONFIG_ARCH_WRITECOMBINE |
162 | bool wc_enabled = true; |
163 | #else |
164 | bool wc_enabled = false; |
165 | #endif |
166 | |
167 | EXPORT_SYMBOL(wc_enabled); |
168 | |
169 | static int __init setup_writecombine(char *p) |
170 | { |
171 | if (!strcmp(p, "on" )) |
172 | wc_enabled = true; |
173 | else if (!strcmp(p, "off" )) |
174 | wc_enabled = false; |
175 | else |
176 | pr_warn("Unknown writecombine setting \"%s\".\n" , p); |
177 | |
178 | return 0; |
179 | } |
180 | early_param("writecombine" , setup_writecombine); |
181 | |
182 | static int usermem __initdata; |
183 | |
184 | static int __init early_parse_mem(char *p) |
185 | { |
186 | phys_addr_t start, size; |
187 | |
188 | if (!p) { |
189 | pr_err("mem parameter is empty, do nothing\n" ); |
190 | return -EINVAL; |
191 | } |
192 | |
193 | /* |
194 | * If a user specifies memory size, we |
195 | * blow away any automatically generated |
196 | * size. |
197 | */ |
198 | if (usermem == 0) { |
199 | usermem = 1; |
200 | memblock_remove(base: memblock_start_of_DRAM(), |
201 | size: memblock_end_of_DRAM() - memblock_start_of_DRAM()); |
202 | } |
203 | start = 0; |
204 | size = memparse(ptr: p, retptr: &p); |
205 | if (*p == '@') |
206 | start = memparse(ptr: p + 1, retptr: &p); |
207 | else { |
208 | pr_err("Invalid format!\n" ); |
209 | return -EINVAL; |
210 | } |
211 | |
212 | if (!IS_ENABLED(CONFIG_NUMA)) |
213 | memblock_add(base: start, size); |
214 | else |
215 | memblock_add_node(base: start, size, nid: pa_to_nid(start), flags: MEMBLOCK_NONE); |
216 | |
217 | return 0; |
218 | } |
219 | early_param("mem" , early_parse_mem); |
220 | |
221 | static void __init arch_reserve_vmcore(void) |
222 | { |
223 | #ifdef CONFIG_PROC_VMCORE |
224 | u64 i; |
225 | phys_addr_t start, end; |
226 | |
227 | if (!is_kdump_kernel()) |
228 | return; |
229 | |
230 | if (!elfcorehdr_size) { |
231 | for_each_mem_range(i, &start, &end) { |
232 | if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { |
233 | /* |
234 | * Reserve from the elf core header to the end of |
235 | * the memory segment, that should all be kdump |
236 | * reserved memory. |
237 | */ |
238 | elfcorehdr_size = end - elfcorehdr_addr; |
239 | break; |
240 | } |
241 | } |
242 | } |
243 | |
244 | if (memblock_is_region_reserved(base: elfcorehdr_addr, size: elfcorehdr_size)) { |
245 | pr_warn("elfcorehdr is overlapped\n" ); |
246 | return; |
247 | } |
248 | |
249 | memblock_reserve(base: elfcorehdr_addr, size: elfcorehdr_size); |
250 | |
251 | pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n" , |
252 | elfcorehdr_size >> 10, elfcorehdr_addr); |
253 | #endif |
254 | } |
255 | |
256 | static void __init arch_reserve_crashkernel(void) |
257 | { |
258 | int ret; |
259 | unsigned long long low_size = 0; |
260 | unsigned long long crash_base, crash_size; |
261 | char *cmdline = boot_command_line; |
262 | bool high = false; |
263 | |
264 | if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) |
265 | return; |
266 | |
267 | ret = parse_crashkernel(cmdline, system_ram: memblock_phys_mem_size(), |
268 | crash_size: &crash_size, crash_base: &crash_base, low_size: &low_size, high: &high); |
269 | if (ret) |
270 | return; |
271 | |
272 | reserve_crashkernel_generic(cmdline, crash_size, crash_base, crash_low_size: low_size, high); |
273 | } |
274 | |
275 | static void __init fdt_setup(void) |
276 | { |
277 | #ifdef CONFIG_OF_EARLY_FLATTREE |
278 | void *fdt_pointer; |
279 | |
280 | /* ACPI-based systems do not require parsing fdt */ |
281 | if (acpi_os_get_root_pointer()) |
282 | return; |
283 | |
284 | /* Prefer to use built-in dtb, checking its legality first. */ |
285 | if (!fdt_check_header(fdt: __dtb_start)) |
286 | fdt_pointer = __dtb_start; |
287 | else |
288 | fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */ |
289 | |
290 | if (!fdt_pointer || fdt_check_header(fdt: fdt_pointer)) |
291 | return; |
292 | |
293 | early_init_dt_scan(params: fdt_pointer); |
294 | early_init_fdt_reserve_self(); |
295 | |
296 | max_low_pfn = PFN_PHYS(memblock_end_of_DRAM()); |
297 | #endif |
298 | } |
299 | |
300 | static void __init bootcmdline_init(char **cmdline_p) |
301 | { |
302 | /* |
303 | * If CONFIG_CMDLINE_FORCE is enabled then initializing the command line |
304 | * is trivial - we simply use the built-in command line unconditionally & |
305 | * unmodified. |
306 | */ |
307 | if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) { |
308 | strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); |
309 | goto out; |
310 | } |
311 | |
312 | #ifdef CONFIG_OF_FLATTREE |
313 | /* |
314 | * If CONFIG_CMDLINE_BOOTLOADER is enabled and we are in FDT-based system, |
315 | * the boot_command_line will be overwritten by early_init_dt_scan_chosen(). |
316 | * So we need to append init_command_line (the original copy of boot_command_line) |
317 | * to boot_command_line. |
318 | */ |
319 | if (initial_boot_params) { |
320 | if (boot_command_line[0]) |
321 | strlcat(p: boot_command_line, q: " " , COMMAND_LINE_SIZE); |
322 | |
323 | if (!strstr(boot_command_line, init_command_line)) |
324 | strlcat(p: boot_command_line, q: init_command_line, COMMAND_LINE_SIZE); |
325 | |
326 | goto out; |
327 | } |
328 | #endif |
329 | |
330 | /* |
331 | * Append built-in command line to the bootloader command line if |
332 | * CONFIG_CMDLINE_EXTEND is enabled. |
333 | */ |
334 | if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) && CONFIG_CMDLINE[0]) { |
335 | strlcat(p: boot_command_line, q: " " , COMMAND_LINE_SIZE); |
336 | strlcat(p: boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); |
337 | } |
338 | |
339 | /* |
340 | * Use built-in command line if the bootloader command line is empty. |
341 | */ |
342 | if (IS_ENABLED(CONFIG_CMDLINE_BOOTLOADER) && !boot_command_line[0]) |
343 | strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); |
344 | |
345 | out: |
346 | *cmdline_p = boot_command_line; |
347 | } |
348 | |
349 | void __init platform_init(void) |
350 | { |
351 | arch_reserve_vmcore(); |
352 | arch_reserve_crashkernel(); |
353 | |
354 | #ifdef CONFIG_ACPI_TABLE_UPGRADE |
355 | acpi_table_upgrade(); |
356 | #endif |
357 | #ifdef CONFIG_ACPI |
358 | acpi_gbl_use_default_register_widths = false; |
359 | acpi_boot_table_init(); |
360 | #endif |
361 | |
362 | early_init_fdt_scan_reserved_mem(); |
363 | unflatten_and_copy_device_tree(); |
364 | |
365 | #ifdef CONFIG_NUMA |
366 | init_numa_memory(); |
367 | #endif |
368 | dmi_setup(); |
369 | smbios_parse(); |
370 | pr_info("The BIOS Version: %s\n" , b_info.bios_version); |
371 | |
372 | efi_runtime_init(); |
373 | } |
374 | |
375 | static void __init check_kernel_sections_mem(void) |
376 | { |
377 | phys_addr_t start = __pa_symbol(&_text); |
378 | phys_addr_t size = __pa_symbol(&_end) - start; |
379 | |
380 | if (!memblock_is_region_memory(base: start, size)) { |
381 | pr_info("Kernel sections are not in the memory maps\n" ); |
382 | memblock_add(base: start, size); |
383 | } |
384 | } |
385 | |
386 | /* |
387 | * arch_mem_init - initialize memory management subsystem |
388 | */ |
389 | static void __init arch_mem_init(char **cmdline_p) |
390 | { |
391 | if (usermem) |
392 | pr_info("User-defined physical RAM map overwrite\n" ); |
393 | |
394 | check_kernel_sections_mem(); |
395 | |
396 | /* |
397 | * In order to reduce the possibility of kernel panic when failed to |
398 | * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate |
399 | * low memory as small as possible before swiotlb_init(), so make |
400 | * sparse_init() using top-down allocation. |
401 | */ |
402 | memblock_set_bottom_up(enable: false); |
403 | sparse_init(); |
404 | memblock_set_bottom_up(enable: true); |
405 | |
406 | swiotlb_init(addressing_limited: true, SWIOTLB_VERBOSE); |
407 | |
408 | dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); |
409 | |
410 | /* Reserve for hibernation. */ |
411 | register_nosave_region(PFN_DOWN(__pa_symbol(&__nosave_begin)), |
412 | PFN_UP(__pa_symbol(&__nosave_end))); |
413 | |
414 | memblock_dump_all(); |
415 | |
416 | early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); |
417 | } |
418 | |
419 | static void __init resource_init(void) |
420 | { |
421 | long i = 0; |
422 | size_t res_size; |
423 | struct resource *res; |
424 | struct memblock_region *region; |
425 | |
426 | code_resource.start = __pa_symbol(&_text); |
427 | code_resource.end = __pa_symbol(&_etext) - 1; |
428 | data_resource.start = __pa_symbol(&_etext); |
429 | data_resource.end = __pa_symbol(&_edata) - 1; |
430 | bss_resource.start = __pa_symbol(&__bss_start); |
431 | bss_resource.end = __pa_symbol(&__bss_stop) - 1; |
432 | |
433 | num_standard_resources = memblock.memory.cnt; |
434 | res_size = num_standard_resources * sizeof(*standard_resources); |
435 | standard_resources = memblock_alloc(size: res_size, SMP_CACHE_BYTES); |
436 | |
437 | for_each_mem_region(region) { |
438 | res = &standard_resources[i++]; |
439 | if (!memblock_is_nomap(m: region)) { |
440 | res->name = "System RAM" ; |
441 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
442 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); |
443 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; |
444 | } else { |
445 | res->name = "Reserved" ; |
446 | res->flags = IORESOURCE_MEM; |
447 | res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); |
448 | res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; |
449 | } |
450 | |
451 | request_resource(root: &iomem_resource, new: res); |
452 | |
453 | /* |
454 | * We don't know which RAM region contains kernel data, |
455 | * so we try it repeatedly and let the resource manager |
456 | * test it. |
457 | */ |
458 | request_resource(root: res, new: &code_resource); |
459 | request_resource(root: res, new: &data_resource); |
460 | request_resource(root: res, new: &bss_resource); |
461 | } |
462 | } |
463 | |
464 | static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, |
465 | resource_size_t hw_start, resource_size_t size) |
466 | { |
467 | int ret = 0; |
468 | unsigned long vaddr; |
469 | struct logic_pio_hwaddr *range; |
470 | |
471 | range = kzalloc(size: sizeof(*range), GFP_ATOMIC); |
472 | if (!range) |
473 | return -ENOMEM; |
474 | |
475 | range->fwnode = fwnode; |
476 | range->size = size = round_up(size, PAGE_SIZE); |
477 | range->hw_start = hw_start; |
478 | range->flags = LOGIC_PIO_CPU_MMIO; |
479 | |
480 | ret = logic_pio_register_range(newrange: range); |
481 | if (ret) { |
482 | kfree(objp: range); |
483 | return ret; |
484 | } |
485 | |
486 | /* Legacy ISA must placed at the start of PCI_IOBASE */ |
487 | if (range->io_start != 0) { |
488 | logic_pio_unregister_range(range); |
489 | kfree(objp: range); |
490 | return -EINVAL; |
491 | } |
492 | |
493 | vaddr = (unsigned long)(PCI_IOBASE + range->io_start); |
494 | vmap_page_range(addr: vaddr, end: vaddr + size, phys_addr: hw_start, pgprot_device(PAGE_KERNEL)); |
495 | |
496 | return 0; |
497 | } |
498 | |
499 | static __init int arch_reserve_pio_range(void) |
500 | { |
501 | struct device_node *np; |
502 | |
503 | for_each_node_by_name(np, "isa" ) { |
504 | struct of_range range; |
505 | struct of_range_parser parser; |
506 | |
507 | pr_info("ISA Bridge: %pOF\n" , np); |
508 | |
509 | if (of_range_parser_init(parser: &parser, node: np)) { |
510 | pr_info("Failed to parse resources.\n" ); |
511 | of_node_put(node: np); |
512 | break; |
513 | } |
514 | |
515 | for_each_of_range(&parser, &range) { |
516 | switch (range.flags & IORESOURCE_TYPE_BITS) { |
517 | case IORESOURCE_IO: |
518 | pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n" , |
519 | range.cpu_addr, |
520 | range.cpu_addr + range.size - 1, |
521 | range.bus_addr); |
522 | if (add_legacy_isa_io(fwnode: &np->fwnode, hw_start: range.cpu_addr, size: range.size)) |
523 | pr_warn("Failed to reserve legacy IO in Logic PIO\n" ); |
524 | break; |
525 | case IORESOURCE_MEM: |
526 | pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n" , |
527 | range.cpu_addr, |
528 | range.cpu_addr + range.size - 1, |
529 | range.bus_addr); |
530 | break; |
531 | } |
532 | } |
533 | } |
534 | |
535 | return 0; |
536 | } |
537 | arch_initcall(arch_reserve_pio_range); |
538 | |
539 | static int __init reserve_memblock_reserved_regions(void) |
540 | { |
541 | u64 i, j; |
542 | |
543 | for (i = 0; i < num_standard_resources; ++i) { |
544 | struct resource *mem = &standard_resources[i]; |
545 | phys_addr_t r_start, r_end, mem_size = resource_size(res: mem); |
546 | |
547 | if (!memblock_is_region_reserved(base: mem->start, size: mem_size)) |
548 | continue; |
549 | |
550 | for_each_reserved_mem_range(j, &r_start, &r_end) { |
551 | resource_size_t start, end; |
552 | |
553 | start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); |
554 | end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); |
555 | |
556 | if (start > mem->end || end < mem->start) |
557 | continue; |
558 | |
559 | reserve_region_with_split(root: mem, start, end, name: "Reserved" ); |
560 | } |
561 | } |
562 | |
563 | return 0; |
564 | } |
565 | arch_initcall(reserve_memblock_reserved_regions); |
566 | |
567 | #ifdef CONFIG_SMP |
568 | static void __init prefill_possible_map(void) |
569 | { |
570 | int i, possible; |
571 | |
572 | possible = num_processors + disabled_cpus; |
573 | if (possible > nr_cpu_ids) |
574 | possible = nr_cpu_ids; |
575 | |
576 | pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n" , |
577 | possible, max((possible - num_processors), 0)); |
578 | |
579 | for (i = 0; i < possible; i++) |
580 | set_cpu_possible(cpu: i, possible: true); |
581 | for (; i < NR_CPUS; i++) |
582 | set_cpu_possible(cpu: i, possible: false); |
583 | |
584 | set_nr_cpu_ids(possible); |
585 | } |
586 | #endif |
587 | |
588 | void __init setup_arch(char **cmdline_p) |
589 | { |
590 | cpu_probe(); |
591 | unwind_init(); |
592 | |
593 | init_environ(); |
594 | efi_init(); |
595 | fdt_setup(); |
596 | memblock_init(); |
597 | pagetable_init(); |
598 | bootcmdline_init(cmdline_p); |
599 | parse_early_param(); |
600 | reserve_initrd_mem(); |
601 | |
602 | platform_init(); |
603 | arch_mem_init(cmdline_p); |
604 | |
605 | resource_init(); |
606 | #ifdef CONFIG_SMP |
607 | plat_smp_setup(); |
608 | prefill_possible_map(); |
609 | #endif |
610 | |
611 | paging_init(); |
612 | |
613 | #ifdef CONFIG_KASAN |
614 | kasan_init(); |
615 | #endif |
616 | } |
617 | |