1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Virtual Memory Map support |
4 | * |
5 | * (C) 2007 sgi. Christoph Lameter. |
6 | * |
7 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, |
8 | * virt_to_page, page_address() to be implemented as a base offset |
9 | * calculation without memory access. |
10 | * |
11 | * However, virtual mappings need a page table and TLBs. Many Linux |
12 | * architectures already map their physical space using 1-1 mappings |
13 | * via TLBs. For those arches the virtual memory map is essentially |
14 | * for free if we use the same page size as the 1-1 mappings. In that |
15 | * case the overhead consists of a few additional pages that are |
16 | * allocated to create a view of memory for vmemmap. |
17 | * |
18 | * The architecture is expected to provide a vmemmap_populate() function |
19 | * to instantiate the mapping. |
20 | */ |
21 | #include <linux/mm.h> |
22 | #include <linux/mmzone.h> |
23 | #include <linux/memblock.h> |
24 | #include <linux/memremap.h> |
25 | #include <linux/highmem.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/spinlock.h> |
28 | #include <linux/vmalloc.h> |
29 | #include <linux/sched.h> |
30 | |
31 | #include <asm/dma.h> |
32 | #include <asm/pgalloc.h> |
33 | #include <asm/tlbflush.h> |
34 | |
35 | #include "hugetlb_vmemmap.h" |
36 | |
37 | /* |
38 | * Flags for vmemmap_populate_range and friends. |
39 | */ |
40 | /* Get a ref on the head page struct page, for ZONE_DEVICE compound pages */ |
41 | #define 0x0001 |
42 | |
43 | #include "internal.h" |
44 | |
45 | /* |
46 | * Allocate a block of memory to be used to back the virtual memory map |
47 | * or to back the page tables that are used to create the mapping. |
48 | * Uses the main allocators if they are available, else bootmem. |
49 | */ |
50 | |
51 | static void * __ref __earlyonly_bootmem_alloc(int node, |
52 | unsigned long size, |
53 | unsigned long align, |
54 | unsigned long goal) |
55 | { |
56 | return memmap_alloc(size, align, min_addr: goal, nid: node, exact_nid: false); |
57 | } |
58 | |
59 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
60 | { |
61 | /* If the main allocator is up use that, fallback to bootmem. */ |
62 | if (slab_is_available()) { |
63 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
64 | int order = get_order(size); |
65 | static bool warned; |
66 | struct page *page; |
67 | |
68 | page = alloc_pages_node(node, gfp_mask, order); |
69 | if (page) |
70 | return page_address(page); |
71 | |
72 | if (!warned) { |
73 | warn_alloc(gfp_mask: gfp_mask & ~__GFP_NOWARN, NULL, |
74 | fmt: "vmemmap alloc failure: order:%u" , order); |
75 | warned = true; |
76 | } |
77 | return NULL; |
78 | } else |
79 | return __earlyonly_bootmem_alloc(node, size, align: size, |
80 | __pa(MAX_DMA_ADDRESS)); |
81 | } |
82 | |
83 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
84 | struct vmem_altmap *altmap); |
85 | |
86 | /* need to make sure size is all the same during early stage */ |
87 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, |
88 | struct vmem_altmap *altmap) |
89 | { |
90 | void *ptr; |
91 | |
92 | if (altmap) |
93 | return altmap_alloc_block_buf(size, altmap); |
94 | |
95 | ptr = sparse_buffer_alloc(size); |
96 | if (!ptr) |
97 | ptr = vmemmap_alloc_block(size, node); |
98 | return ptr; |
99 | } |
100 | |
101 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
102 | { |
103 | return altmap->base_pfn + altmap->reserve + altmap->alloc |
104 | + altmap->align; |
105 | } |
106 | |
107 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) |
108 | { |
109 | unsigned long allocated = altmap->alloc + altmap->align; |
110 | |
111 | if (altmap->free > allocated) |
112 | return altmap->free - allocated; |
113 | return 0; |
114 | } |
115 | |
116 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
117 | struct vmem_altmap *altmap) |
118 | { |
119 | unsigned long pfn, nr_pfns, nr_align; |
120 | |
121 | if (size & ~PAGE_MASK) { |
122 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n" , |
123 | __func__, size); |
124 | return NULL; |
125 | } |
126 | |
127 | pfn = vmem_altmap_next_pfn(altmap); |
128 | nr_pfns = size >> PAGE_SHIFT; |
129 | nr_align = 1UL << find_first_bit(addr: &nr_pfns, BITS_PER_LONG); |
130 | nr_align = ALIGN(pfn, nr_align) - pfn; |
131 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) |
132 | return NULL; |
133 | |
134 | altmap->alloc += nr_pfns; |
135 | altmap->align += nr_align; |
136 | pfn += nr_align; |
137 | |
138 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n" , |
139 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
140 | return __va(__pfn_to_phys(pfn)); |
141 | } |
142 | |
143 | void __meminit vmemmap_verify(pte_t *pte, int node, |
144 | unsigned long start, unsigned long end) |
145 | { |
146 | unsigned long pfn = pte_pfn(pte: ptep_get(ptep: pte)); |
147 | int actual_node = early_pfn_to_nid(pfn); |
148 | |
149 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
150 | pr_warn_once("[%lx-%lx] potential offnode page_structs\n" , |
151 | start, end - 1); |
152 | } |
153 | |
154 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
155 | struct vmem_altmap *altmap, |
156 | unsigned long ptpfn, unsigned long flags) |
157 | { |
158 | pte_t *pte = pte_offset_kernel(pmd, address: addr); |
159 | if (pte_none(pte: ptep_get(ptep: pte))) { |
160 | pte_t entry; |
161 | void *p; |
162 | |
163 | if (ptpfn == (unsigned long)-1) { |
164 | p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); |
165 | if (!p) |
166 | return NULL; |
167 | ptpfn = PHYS_PFN(__pa(p)); |
168 | } else { |
169 | /* |
170 | * When a PTE/PMD entry is freed from the init_mm |
171 | * there's a free_pages() call to this page allocated |
172 | * above. Thus this get_page() is paired with the |
173 | * put_page_testzero() on the freeing path. |
174 | * This can only called by certain ZONE_DEVICE path, |
175 | * and through vmemmap_populate_compound_pages() when |
176 | * slab is available. |
177 | */ |
178 | if (flags & VMEMMAP_POPULATE_PAGEREF) |
179 | get_page(pfn_to_page(ptpfn)); |
180 | } |
181 | entry = pfn_pte(page_nr: ptpfn, PAGE_KERNEL); |
182 | set_pte_at(&init_mm, addr, pte, entry); |
183 | } |
184 | return pte; |
185 | } |
186 | |
187 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
188 | { |
189 | void *p = vmemmap_alloc_block(size, node); |
190 | |
191 | if (!p) |
192 | return NULL; |
193 | memset(p, 0, size); |
194 | |
195 | return p; |
196 | } |
197 | |
198 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
199 | { |
200 | pmd_t *pmd = pmd_offset(pud, address: addr); |
201 | if (pmd_none(pmd: *pmd)) { |
202 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
203 | if (!p) |
204 | return NULL; |
205 | kernel_pte_init(addr: p); |
206 | pmd_populate_kernel(mm: &init_mm, pmd, pte: p); |
207 | } |
208 | return pmd; |
209 | } |
210 | |
211 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
212 | { |
213 | pud_t *pud = pud_offset(p4d, address: addr); |
214 | if (pud_none(pud: *pud)) { |
215 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
216 | if (!p) |
217 | return NULL; |
218 | pmd_init(addr: p); |
219 | pud_populate(mm: &init_mm, pud, pmd: p); |
220 | } |
221 | return pud; |
222 | } |
223 | |
224 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
225 | { |
226 | p4d_t *p4d = p4d_offset(pgd, address: addr); |
227 | if (p4d_none(p4d: *p4d)) { |
228 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
229 | if (!p) |
230 | return NULL; |
231 | pud_init(addr: p); |
232 | p4d_populate(mm: &init_mm, p4d, pud: p); |
233 | } |
234 | return p4d; |
235 | } |
236 | |
237 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
238 | { |
239 | pgd_t *pgd = pgd_offset_k(addr); |
240 | if (pgd_none(pgd: *pgd)) { |
241 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
242 | if (!p) |
243 | return NULL; |
244 | pgd_populate(mm: &init_mm, pgd, p4d: p); |
245 | } |
246 | return pgd; |
247 | } |
248 | |
249 | static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, |
250 | struct vmem_altmap *altmap, |
251 | unsigned long ptpfn, |
252 | unsigned long flags) |
253 | { |
254 | pgd_t *pgd; |
255 | p4d_t *p4d; |
256 | pud_t *pud; |
257 | pmd_t *pmd; |
258 | pte_t *pte; |
259 | |
260 | pgd = vmemmap_pgd_populate(addr, node); |
261 | if (!pgd) |
262 | return NULL; |
263 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
264 | if (!p4d) |
265 | return NULL; |
266 | pud = vmemmap_pud_populate(p4d, addr, node); |
267 | if (!pud) |
268 | return NULL; |
269 | pmd = vmemmap_pmd_populate(pud, addr, node); |
270 | if (!pmd) |
271 | return NULL; |
272 | pte = vmemmap_pte_populate(pmd, addr, node, altmap, ptpfn, flags); |
273 | if (!pte) |
274 | return NULL; |
275 | vmemmap_verify(pte, node, start: addr, end: addr + PAGE_SIZE); |
276 | |
277 | return pte; |
278 | } |
279 | |
280 | static int __meminit vmemmap_populate_range(unsigned long start, |
281 | unsigned long end, int node, |
282 | struct vmem_altmap *altmap, |
283 | unsigned long ptpfn, |
284 | unsigned long flags) |
285 | { |
286 | unsigned long addr = start; |
287 | pte_t *pte; |
288 | |
289 | for (; addr < end; addr += PAGE_SIZE) { |
290 | pte = vmemmap_populate_address(addr, node, altmap, |
291 | ptpfn, flags); |
292 | if (!pte) |
293 | return -ENOMEM; |
294 | } |
295 | |
296 | return 0; |
297 | } |
298 | |
299 | int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, |
300 | int node, struct vmem_altmap *altmap) |
301 | { |
302 | return vmemmap_populate_range(start, end, node, altmap, ptpfn: -1, flags: 0); |
303 | } |
304 | |
305 | /* |
306 | * Undo populate_hvo, and replace it with a normal base page mapping. |
307 | * Used in memory init in case a HVO mapping needs to be undone. |
308 | * |
309 | * This can happen when it is discovered that a memblock allocated |
310 | * hugetlb page spans multiple zones, which can only be verified |
311 | * after zones have been initialized. |
312 | * |
313 | * We know that: |
314 | * 1) The first @headsize / PAGE_SIZE vmemmap pages were individually |
315 | * allocated through memblock, and mapped. |
316 | * |
317 | * 2) The rest of the vmemmap pages are mirrors of the last head page. |
318 | */ |
319 | int __meminit vmemmap_undo_hvo(unsigned long addr, unsigned long end, |
320 | int node, unsigned long headsize) |
321 | { |
322 | unsigned long maddr, pfn; |
323 | pte_t *pte; |
324 | int headpages; |
325 | |
326 | /* |
327 | * Should only be called early in boot, so nothing will |
328 | * be accessing these page structures. |
329 | */ |
330 | WARN_ON(!early_boot_irqs_disabled); |
331 | |
332 | headpages = headsize >> PAGE_SHIFT; |
333 | |
334 | /* |
335 | * Clear mirrored mappings for tail page structs. |
336 | */ |
337 | for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) { |
338 | pte = virt_to_kpte(vaddr: maddr); |
339 | pte_clear(mm: &init_mm, addr: maddr, ptep: pte); |
340 | } |
341 | |
342 | /* |
343 | * Clear and free mappings for head page and first tail page |
344 | * structs. |
345 | */ |
346 | for (maddr = addr; headpages-- > 0; maddr += PAGE_SIZE) { |
347 | pte = virt_to_kpte(vaddr: maddr); |
348 | pfn = pte_pfn(pte: ptep_get(ptep: pte)); |
349 | pte_clear(mm: &init_mm, addr: maddr, ptep: pte); |
350 | memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE); |
351 | } |
352 | |
353 | flush_tlb_kernel_range(start: addr, end); |
354 | |
355 | return vmemmap_populate(start: addr, end, node, NULL); |
356 | } |
357 | |
358 | /* |
359 | * Write protect the mirrored tail page structs for HVO. This will be |
360 | * called from the hugetlb code when gathering and initializing the |
361 | * memblock allocated gigantic pages. The write protect can't be |
362 | * done earlier, since it can't be guaranteed that the reserved |
363 | * page structures will not be written to during initialization, |
364 | * even if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. |
365 | * |
366 | * The PTEs are known to exist, and nothing else should be touching |
367 | * these pages. The caller is responsible for any TLB flushing. |
368 | */ |
369 | void vmemmap_wrprotect_hvo(unsigned long addr, unsigned long end, |
370 | int node, unsigned long headsize) |
371 | { |
372 | unsigned long maddr; |
373 | pte_t *pte; |
374 | |
375 | for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) { |
376 | pte = virt_to_kpte(vaddr: maddr); |
377 | ptep_set_wrprotect(mm: &init_mm, addr: maddr, ptep: pte); |
378 | } |
379 | } |
380 | |
381 | /* |
382 | * Populate vmemmap pages HVO-style. The first page contains the head |
383 | * page and needed tail pages, the other ones are mirrors of the first |
384 | * page. |
385 | */ |
386 | int __meminit vmemmap_populate_hvo(unsigned long addr, unsigned long end, |
387 | int node, unsigned long headsize) |
388 | { |
389 | pte_t *pte; |
390 | unsigned long maddr; |
391 | |
392 | for (maddr = addr; maddr < addr + headsize; maddr += PAGE_SIZE) { |
393 | pte = vmemmap_populate_address(addr: maddr, node, NULL, ptpfn: -1, flags: 0); |
394 | if (!pte) |
395 | return -ENOMEM; |
396 | } |
397 | |
398 | /* |
399 | * Reuse the last page struct page mapped above for the rest. |
400 | */ |
401 | return vmemmap_populate_range(start: maddr, end, node, NULL, |
402 | ptpfn: pte_pfn(pte: ptep_get(ptep: pte)), flags: 0); |
403 | } |
404 | |
405 | void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, |
406 | unsigned long addr, unsigned long next) |
407 | { |
408 | } |
409 | |
410 | int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node, |
411 | unsigned long addr, unsigned long next) |
412 | { |
413 | return 0; |
414 | } |
415 | |
416 | int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end, |
417 | int node, struct vmem_altmap *altmap) |
418 | { |
419 | unsigned long addr; |
420 | unsigned long next; |
421 | pgd_t *pgd; |
422 | p4d_t *p4d; |
423 | pud_t *pud; |
424 | pmd_t *pmd; |
425 | |
426 | for (addr = start; addr < end; addr = next) { |
427 | next = pmd_addr_end(addr, end); |
428 | |
429 | pgd = vmemmap_pgd_populate(addr, node); |
430 | if (!pgd) |
431 | return -ENOMEM; |
432 | |
433 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
434 | if (!p4d) |
435 | return -ENOMEM; |
436 | |
437 | pud = vmemmap_pud_populate(p4d, addr, node); |
438 | if (!pud) |
439 | return -ENOMEM; |
440 | |
441 | pmd = pmd_offset(pud, address: addr); |
442 | if (pmd_none(READ_ONCE(*pmd))) { |
443 | void *p; |
444 | |
445 | p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); |
446 | if (p) { |
447 | vmemmap_set_pmd(pmd, p, node, addr, next); |
448 | continue; |
449 | } else if (altmap) { |
450 | /* |
451 | * No fallback: In any case we care about, the |
452 | * altmap should be reasonably sized and aligned |
453 | * such that vmemmap_alloc_block_buf() will always |
454 | * succeed. For consistency with the PTE case, |
455 | * return an error here as failure could indicate |
456 | * a configuration issue with the size of the altmap. |
457 | */ |
458 | return -ENOMEM; |
459 | } |
460 | } else if (vmemmap_check_pmd(pmd, node, addr, next)) |
461 | continue; |
462 | if (vmemmap_populate_basepages(start: addr, end: next, node, altmap)) |
463 | return -ENOMEM; |
464 | } |
465 | return 0; |
466 | } |
467 | |
468 | #ifndef vmemmap_populate_compound_pages |
469 | /* |
470 | * For compound pages bigger than section size (e.g. x86 1G compound |
471 | * pages with 2M subsection size) fill the rest of sections as tail |
472 | * pages. |
473 | * |
474 | * Note that memremap_pages() resets @nr_range value and will increment |
475 | * it after each range successful onlining. Thus the value or @nr_range |
476 | * at section memmap populate corresponds to the in-progress range |
477 | * being onlined here. |
478 | */ |
479 | static bool __meminit reuse_compound_section(unsigned long start_pfn, |
480 | struct dev_pagemap *pgmap) |
481 | { |
482 | unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); |
483 | unsigned long offset = start_pfn - |
484 | PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); |
485 | |
486 | return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION; |
487 | } |
488 | |
489 | static pte_t * __meminit compound_section_tail_page(unsigned long addr) |
490 | { |
491 | pte_t *pte; |
492 | |
493 | addr -= PAGE_SIZE; |
494 | |
495 | /* |
496 | * Assuming sections are populated sequentially, the previous section's |
497 | * page data can be reused. |
498 | */ |
499 | pte = pte_offset_kernel(pmd: pmd_off_k(va: addr), address: addr); |
500 | if (!pte) |
501 | return NULL; |
502 | |
503 | return pte; |
504 | } |
505 | |
506 | static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, |
507 | unsigned long start, |
508 | unsigned long end, int node, |
509 | struct dev_pagemap *pgmap) |
510 | { |
511 | unsigned long size, addr; |
512 | pte_t *pte; |
513 | int rc; |
514 | |
515 | if (reuse_compound_section(start_pfn, pgmap)) { |
516 | pte = compound_section_tail_page(addr: start); |
517 | if (!pte) |
518 | return -ENOMEM; |
519 | |
520 | /* |
521 | * Reuse the page that was populated in the prior iteration |
522 | * with just tail struct pages. |
523 | */ |
524 | return vmemmap_populate_range(start, end, node, NULL, |
525 | ptpfn: pte_pfn(pte: ptep_get(ptep: pte)), |
526 | VMEMMAP_POPULATE_PAGEREF); |
527 | } |
528 | |
529 | size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); |
530 | for (addr = start; addr < end; addr += size) { |
531 | unsigned long next, last = addr + size; |
532 | |
533 | /* Populate the head page vmemmap page */ |
534 | pte = vmemmap_populate_address(addr, node, NULL, ptpfn: -1, flags: 0); |
535 | if (!pte) |
536 | return -ENOMEM; |
537 | |
538 | /* Populate the tail pages vmemmap page */ |
539 | next = addr + PAGE_SIZE; |
540 | pte = vmemmap_populate_address(addr: next, node, NULL, ptpfn: -1, flags: 0); |
541 | if (!pte) |
542 | return -ENOMEM; |
543 | |
544 | /* |
545 | * Reuse the previous page for the rest of tail pages |
546 | * See layout diagram in Documentation/mm/vmemmap_dedup.rst |
547 | */ |
548 | next += PAGE_SIZE; |
549 | rc = vmemmap_populate_range(start: next, end: last, node, NULL, |
550 | ptpfn: pte_pfn(pte: ptep_get(ptep: pte)), |
551 | VMEMMAP_POPULATE_PAGEREF); |
552 | if (rc) |
553 | return -ENOMEM; |
554 | } |
555 | |
556 | return 0; |
557 | } |
558 | |
559 | #endif |
560 | |
561 | struct page * __meminit __populate_section_memmap(unsigned long pfn, |
562 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap, |
563 | struct dev_pagemap *pgmap) |
564 | { |
565 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
566 | unsigned long end = start + nr_pages * sizeof(struct page); |
567 | int r; |
568 | |
569 | if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || |
570 | !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) |
571 | return NULL; |
572 | |
573 | if (vmemmap_can_optimize(altmap, pgmap)) |
574 | r = vmemmap_populate_compound_pages(start_pfn: pfn, start, end, node: nid, pgmap); |
575 | else |
576 | r = vmemmap_populate(start, end, node: nid, altmap); |
577 | |
578 | if (r < 0) |
579 | return NULL; |
580 | |
581 | if (system_state == SYSTEM_BOOTING) |
582 | memmap_boot_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE)); |
583 | else |
584 | memmap_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE)); |
585 | |
586 | return pfn_to_page(pfn); |
587 | } |
588 | |
589 | #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT |
590 | /* |
591 | * This is called just before initializing sections for a NUMA node. |
592 | * Any special initialization that needs to be done before the |
593 | * generic initialization can be done from here. Sections that |
594 | * are initialized in hooks called from here will be skipped by |
595 | * the generic initialization. |
596 | */ |
597 | void __init sparse_vmemmap_init_nid_early(int nid) |
598 | { |
599 | hugetlb_vmemmap_init_early(nid); |
600 | } |
601 | |
602 | /* |
603 | * This is called just before the initialization of page structures |
604 | * through memmap_init. Zones are now initialized, so any work that |
605 | * needs to be done that needs zone information can be done from |
606 | * here. |
607 | */ |
608 | void __init sparse_vmemmap_init_nid_late(int nid) |
609 | { |
610 | hugetlb_vmemmap_init_late(nid); |
611 | } |
612 | #endif |
613 | |