1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Procedures for maintaining information about logical memory blocks. |
4 | * |
5 | * Peter Bergner, IBM Corp. June 2001. |
6 | * Copyright (C) 2001 Peter Bergner. |
7 | */ |
8 | |
9 | #include <linux/kernel.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/init.h> |
12 | #include <linux/bitops.h> |
13 | #include <linux/poison.h> |
14 | #include <linux/pfn.h> |
15 | #include <linux/debugfs.h> |
16 | #include <linux/kmemleak.h> |
17 | #include <linux/seq_file.h> |
18 | #include <linux/memblock.h> |
19 | |
20 | #include <asm/sections.h> |
21 | #include <linux/io.h> |
22 | |
23 | #include "internal.h" |
24 | |
25 | #define INIT_MEMBLOCK_REGIONS 128 |
26 | #define INIT_PHYSMEM_REGIONS 4 |
27 | |
28 | #ifndef INIT_MEMBLOCK_RESERVED_REGIONS |
29 | # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS |
30 | #endif |
31 | |
32 | #ifndef INIT_MEMBLOCK_MEMORY_REGIONS |
33 | #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS |
34 | #endif |
35 | |
36 | /** |
37 | * DOC: memblock overview |
38 | * |
39 | * Memblock is a method of managing memory regions during the early |
40 | * boot period when the usual kernel memory allocators are not up and |
41 | * running. |
42 | * |
43 | * Memblock views the system memory as collections of contiguous |
44 | * regions. There are several types of these collections: |
45 | * |
46 | * * ``memory`` - describes the physical memory available to the |
47 | * kernel; this may differ from the actual physical memory installed |
48 | * in the system, for instance when the memory is restricted with |
49 | * ``mem=`` command line parameter |
50 | * * ``reserved`` - describes the regions that were allocated |
51 | * * ``physmem`` - describes the actual physical memory available during |
52 | * boot regardless of the possible restrictions and memory hot(un)plug; |
53 | * the ``physmem`` type is only available on some architectures. |
54 | * |
55 | * Each region is represented by struct memblock_region that |
56 | * defines the region extents, its attributes and NUMA node id on NUMA |
57 | * systems. Every memory type is described by the struct memblock_type |
58 | * which contains an array of memory regions along with |
59 | * the allocator metadata. The "memory" and "reserved" types are nicely |
60 | * wrapped with struct memblock. This structure is statically |
61 | * initialized at build time. The region arrays are initially sized to |
62 | * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and |
63 | * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array |
64 | * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS. |
65 | * The memblock_allow_resize() enables automatic resizing of the region |
66 | * arrays during addition of new regions. This feature should be used |
67 | * with care so that memory allocated for the region array will not |
68 | * overlap with areas that should be reserved, for example initrd. |
69 | * |
70 | * The early architecture setup should tell memblock what the physical |
71 | * memory layout is by using memblock_add() or memblock_add_node() |
72 | * functions. The first function does not assign the region to a NUMA |
73 | * node and it is appropriate for UMA systems. Yet, it is possible to |
74 | * use it on NUMA systems as well and assign the region to a NUMA node |
75 | * later in the setup process using memblock_set_node(). The |
76 | * memblock_add_node() performs such an assignment directly. |
77 | * |
78 | * Once memblock is setup the memory can be allocated using one of the |
79 | * API variants: |
80 | * |
81 | * * memblock_phys_alloc*() - these functions return the **physical** |
82 | * address of the allocated memory |
83 | * * memblock_alloc*() - these functions return the **virtual** address |
84 | * of the allocated memory. |
85 | * |
86 | * Note, that both API variants use implicit assumptions about allowed |
87 | * memory ranges and the fallback methods. Consult the documentation |
88 | * of memblock_alloc_internal() and memblock_alloc_range_nid() |
89 | * functions for more elaborate description. |
90 | * |
91 | * As the system boot progresses, the architecture specific mem_init() |
92 | * function frees all the memory to the buddy page allocator. |
93 | * |
94 | * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the |
95 | * memblock data structures (except "physmem") will be discarded after the |
96 | * system initialization completes. |
97 | */ |
98 | |
99 | #ifndef CONFIG_NUMA |
100 | struct pglist_data __refdata contig_page_data; |
101 | EXPORT_SYMBOL(contig_page_data); |
102 | #endif |
103 | |
104 | unsigned long max_low_pfn; |
105 | unsigned long min_low_pfn; |
106 | unsigned long max_pfn; |
107 | unsigned long long max_possible_pfn; |
108 | |
109 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock; |
110 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; |
111 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
112 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; |
113 | #endif |
114 | |
115 | struct memblock memblock __initdata_memblock = { |
116 | .memory.regions = memblock_memory_init_regions, |
117 | .memory.cnt = 1, /* empty dummy entry */ |
118 | .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS, |
119 | .memory.name = "memory" , |
120 | |
121 | .reserved.regions = memblock_reserved_init_regions, |
122 | .reserved.cnt = 1, /* empty dummy entry */ |
123 | .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, |
124 | .reserved.name = "reserved" , |
125 | |
126 | .bottom_up = false, |
127 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, |
128 | }; |
129 | |
130 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
131 | struct memblock_type physmem = { |
132 | .regions = memblock_physmem_init_regions, |
133 | .cnt = 1, /* empty dummy entry */ |
134 | .max = INIT_PHYSMEM_REGIONS, |
135 | .name = "physmem" , |
136 | }; |
137 | #endif |
138 | |
139 | /* |
140 | * keep a pointer to &memblock.memory in the text section to use it in |
141 | * __next_mem_range() and its helpers. |
142 | * For architectures that do not keep memblock data after init, this |
143 | * pointer will be reset to NULL at memblock_discard() |
144 | */ |
145 | static __refdata struct memblock_type *memblock_memory = &memblock.memory; |
146 | |
147 | #define for_each_memblock_type(i, memblock_type, rgn) \ |
148 | for (i = 0, rgn = &memblock_type->regions[0]; \ |
149 | i < memblock_type->cnt; \ |
150 | i++, rgn = &memblock_type->regions[i]) |
151 | |
152 | #define memblock_dbg(fmt, ...) \ |
153 | do { \ |
154 | if (memblock_debug) \ |
155 | pr_info(fmt, ##__VA_ARGS__); \ |
156 | } while (0) |
157 | |
158 | static int memblock_debug __initdata_memblock; |
159 | static bool system_has_some_mirror __initdata_memblock; |
160 | static int memblock_can_resize __initdata_memblock; |
161 | static int memblock_memory_in_slab __initdata_memblock; |
162 | static int memblock_reserved_in_slab __initdata_memblock; |
163 | |
164 | bool __init_memblock memblock_has_mirror(void) |
165 | { |
166 | return system_has_some_mirror; |
167 | } |
168 | |
169 | static enum memblock_flags __init_memblock choose_memblock_flags(void) |
170 | { |
171 | return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; |
172 | } |
173 | |
174 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ |
175 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) |
176 | { |
177 | return *size = min(*size, PHYS_ADDR_MAX - base); |
178 | } |
179 | |
180 | /* |
181 | * Address comparison utilities |
182 | */ |
183 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
184 | phys_addr_t base2, phys_addr_t size2) |
185 | { |
186 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
187 | } |
188 | |
189 | bool __init_memblock memblock_overlaps_region(struct memblock_type *type, |
190 | phys_addr_t base, phys_addr_t size) |
191 | { |
192 | unsigned long i; |
193 | |
194 | memblock_cap_size(base, size: &size); |
195 | |
196 | for (i = 0; i < type->cnt; i++) |
197 | if (memblock_addrs_overlap(base1: base, size1: size, base2: type->regions[i].base, |
198 | size2: type->regions[i].size)) |
199 | break; |
200 | return i < type->cnt; |
201 | } |
202 | |
203 | /** |
204 | * __memblock_find_range_bottom_up - find free area utility in bottom-up |
205 | * @start: start of candidate range |
206 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
207 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
208 | * @size: size of free area to find |
209 | * @align: alignment of free area to find |
210 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
211 | * @flags: pick from blocks based on memory attributes |
212 | * |
213 | * Utility called from memblock_find_in_range_node(), find free area bottom-up. |
214 | * |
215 | * Return: |
216 | * Found address on success, 0 on failure. |
217 | */ |
218 | static phys_addr_t __init_memblock |
219 | __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, |
220 | phys_addr_t size, phys_addr_t align, int nid, |
221 | enum memblock_flags flags) |
222 | { |
223 | phys_addr_t this_start, this_end, cand; |
224 | u64 i; |
225 | |
226 | for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { |
227 | this_start = clamp(this_start, start, end); |
228 | this_end = clamp(this_end, start, end); |
229 | |
230 | cand = round_up(this_start, align); |
231 | if (cand < this_end && this_end - cand >= size) |
232 | return cand; |
233 | } |
234 | |
235 | return 0; |
236 | } |
237 | |
238 | /** |
239 | * __memblock_find_range_top_down - find free area utility, in top-down |
240 | * @start: start of candidate range |
241 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
242 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
243 | * @size: size of free area to find |
244 | * @align: alignment of free area to find |
245 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
246 | * @flags: pick from blocks based on memory attributes |
247 | * |
248 | * Utility called from memblock_find_in_range_node(), find free area top-down. |
249 | * |
250 | * Return: |
251 | * Found address on success, 0 on failure. |
252 | */ |
253 | static phys_addr_t __init_memblock |
254 | __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, |
255 | phys_addr_t size, phys_addr_t align, int nid, |
256 | enum memblock_flags flags) |
257 | { |
258 | phys_addr_t this_start, this_end, cand; |
259 | u64 i; |
260 | |
261 | for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, |
262 | NULL) { |
263 | this_start = clamp(this_start, start, end); |
264 | this_end = clamp(this_end, start, end); |
265 | |
266 | if (this_end < size) |
267 | continue; |
268 | |
269 | cand = round_down(this_end - size, align); |
270 | if (cand >= this_start) |
271 | return cand; |
272 | } |
273 | |
274 | return 0; |
275 | } |
276 | |
277 | /** |
278 | * memblock_find_in_range_node - find free area in given range and node |
279 | * @size: size of free area to find |
280 | * @align: alignment of free area to find |
281 | * @start: start of candidate range |
282 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
283 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
284 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
285 | * @flags: pick from blocks based on memory attributes |
286 | * |
287 | * Find @size free area aligned to @align in the specified range and node. |
288 | * |
289 | * Return: |
290 | * Found address on success, 0 on failure. |
291 | */ |
292 | static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, |
293 | phys_addr_t align, phys_addr_t start, |
294 | phys_addr_t end, int nid, |
295 | enum memblock_flags flags) |
296 | { |
297 | /* pump up @end */ |
298 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE || |
299 | end == MEMBLOCK_ALLOC_NOLEAKTRACE) |
300 | end = memblock.current_limit; |
301 | |
302 | /* avoid allocating the first page */ |
303 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
304 | end = max(start, end); |
305 | |
306 | if (memblock_bottom_up()) |
307 | return __memblock_find_range_bottom_up(start, end, size, align, |
308 | nid, flags); |
309 | else |
310 | return __memblock_find_range_top_down(start, end, size, align, |
311 | nid, flags); |
312 | } |
313 | |
314 | /** |
315 | * memblock_find_in_range - find free area in given range |
316 | * @start: start of candidate range |
317 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or |
318 | * %MEMBLOCK_ALLOC_ACCESSIBLE |
319 | * @size: size of free area to find |
320 | * @align: alignment of free area to find |
321 | * |
322 | * Find @size free area aligned to @align in the specified range. |
323 | * |
324 | * Return: |
325 | * Found address on success, 0 on failure. |
326 | */ |
327 | static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, |
328 | phys_addr_t end, phys_addr_t size, |
329 | phys_addr_t align) |
330 | { |
331 | phys_addr_t ret; |
332 | enum memblock_flags flags = choose_memblock_flags(); |
333 | |
334 | again: |
335 | ret = memblock_find_in_range_node(size, align, start, end, |
336 | NUMA_NO_NODE, flags); |
337 | |
338 | if (!ret && (flags & MEMBLOCK_MIRROR)) { |
339 | pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n" , |
340 | &size); |
341 | flags &= ~MEMBLOCK_MIRROR; |
342 | goto again; |
343 | } |
344 | |
345 | return ret; |
346 | } |
347 | |
348 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
349 | { |
350 | type->total_size -= type->regions[r].size; |
351 | memmove(&type->regions[r], &type->regions[r + 1], |
352 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); |
353 | type->cnt--; |
354 | |
355 | /* Special case for empty arrays */ |
356 | if (type->cnt == 0) { |
357 | WARN_ON(type->total_size != 0); |
358 | type->cnt = 1; |
359 | type->regions[0].base = 0; |
360 | type->regions[0].size = 0; |
361 | type->regions[0].flags = 0; |
362 | memblock_set_region_node(r: &type->regions[0], MAX_NUMNODES); |
363 | } |
364 | } |
365 | |
366 | #ifndef CONFIG_ARCH_KEEP_MEMBLOCK |
367 | /** |
368 | * memblock_discard - discard memory and reserved arrays if they were allocated |
369 | */ |
370 | void __init memblock_discard(void) |
371 | { |
372 | phys_addr_t addr, size; |
373 | |
374 | if (memblock.reserved.regions != memblock_reserved_init_regions) { |
375 | addr = __pa(memblock.reserved.regions); |
376 | size = PAGE_ALIGN(sizeof(struct memblock_region) * |
377 | memblock.reserved.max); |
378 | if (memblock_reserved_in_slab) |
379 | kfree(objp: memblock.reserved.regions); |
380 | else |
381 | memblock_free_late(base: addr, size); |
382 | } |
383 | |
384 | if (memblock.memory.regions != memblock_memory_init_regions) { |
385 | addr = __pa(memblock.memory.regions); |
386 | size = PAGE_ALIGN(sizeof(struct memblock_region) * |
387 | memblock.memory.max); |
388 | if (memblock_memory_in_slab) |
389 | kfree(objp: memblock.memory.regions); |
390 | else |
391 | memblock_free_late(base: addr, size); |
392 | } |
393 | |
394 | memblock_memory = NULL; |
395 | } |
396 | #endif |
397 | |
398 | /** |
399 | * memblock_double_array - double the size of the memblock regions array |
400 | * @type: memblock type of the regions array being doubled |
401 | * @new_area_start: starting address of memory range to avoid overlap with |
402 | * @new_area_size: size of memory range to avoid overlap with |
403 | * |
404 | * Double the size of the @type regions array. If memblock is being used to |
405 | * allocate memory for a new reserved regions array and there is a previously |
406 | * allocated memory range [@new_area_start, @new_area_start + @new_area_size] |
407 | * waiting to be reserved, ensure the memory used by the new array does |
408 | * not overlap. |
409 | * |
410 | * Return: |
411 | * 0 on success, -1 on failure. |
412 | */ |
413 | static int __init_memblock memblock_double_array(struct memblock_type *type, |
414 | phys_addr_t new_area_start, |
415 | phys_addr_t new_area_size) |
416 | { |
417 | struct memblock_region *new_array, *old_array; |
418 | phys_addr_t old_alloc_size, new_alloc_size; |
419 | phys_addr_t old_size, new_size, addr, new_end; |
420 | int use_slab = slab_is_available(); |
421 | int *in_slab; |
422 | |
423 | /* We don't allow resizing until we know about the reserved regions |
424 | * of memory that aren't suitable for allocation |
425 | */ |
426 | if (!memblock_can_resize) |
427 | panic(fmt: "memblock: cannot resize %s array\n" , type->name); |
428 | |
429 | /* Calculate new doubled size */ |
430 | old_size = type->max * sizeof(struct memblock_region); |
431 | new_size = old_size << 1; |
432 | /* |
433 | * We need to allocated new one align to PAGE_SIZE, |
434 | * so we can free them completely later. |
435 | */ |
436 | old_alloc_size = PAGE_ALIGN(old_size); |
437 | new_alloc_size = PAGE_ALIGN(new_size); |
438 | |
439 | /* Retrieve the slab flag */ |
440 | if (type == &memblock.memory) |
441 | in_slab = &memblock_memory_in_slab; |
442 | else |
443 | in_slab = &memblock_reserved_in_slab; |
444 | |
445 | /* Try to find some space for it */ |
446 | if (use_slab) { |
447 | new_array = kmalloc(size: new_size, GFP_KERNEL); |
448 | addr = new_array ? __pa(new_array) : 0; |
449 | } else { |
450 | /* only exclude range when trying to double reserved.regions */ |
451 | if (type != &memblock.reserved) |
452 | new_area_start = new_area_size = 0; |
453 | |
454 | addr = memblock_find_in_range(start: new_area_start + new_area_size, |
455 | end: memblock.current_limit, |
456 | size: new_alloc_size, PAGE_SIZE); |
457 | if (!addr && new_area_size) |
458 | addr = memblock_find_in_range(start: 0, |
459 | min(new_area_start, memblock.current_limit), |
460 | size: new_alloc_size, PAGE_SIZE); |
461 | |
462 | new_array = addr ? __va(addr) : NULL; |
463 | } |
464 | if (!addr) { |
465 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n" , |
466 | type->name, type->max, type->max * 2); |
467 | return -1; |
468 | } |
469 | |
470 | new_end = addr + new_size - 1; |
471 | memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]" , |
472 | type->name, type->max * 2, &addr, &new_end); |
473 | |
474 | /* |
475 | * Found space, we now need to move the array over before we add the |
476 | * reserved region since it may be our reserved array itself that is |
477 | * full. |
478 | */ |
479 | memcpy(new_array, type->regions, old_size); |
480 | memset(new_array + type->max, 0, old_size); |
481 | old_array = type->regions; |
482 | type->regions = new_array; |
483 | type->max <<= 1; |
484 | |
485 | /* Free old array. We needn't free it if the array is the static one */ |
486 | if (*in_slab) |
487 | kfree(objp: old_array); |
488 | else if (old_array != memblock_memory_init_regions && |
489 | old_array != memblock_reserved_init_regions) |
490 | memblock_free(ptr: old_array, size: old_alloc_size); |
491 | |
492 | /* |
493 | * Reserve the new array if that comes from the memblock. Otherwise, we |
494 | * needn't do it |
495 | */ |
496 | if (!use_slab) |
497 | BUG_ON(memblock_reserve(addr, new_alloc_size)); |
498 | |
499 | /* Update slab flag */ |
500 | *in_slab = use_slab; |
501 | |
502 | return 0; |
503 | } |
504 | |
505 | /** |
506 | * memblock_merge_regions - merge neighboring compatible regions |
507 | * @type: memblock type to scan |
508 | * @start_rgn: start scanning from (@start_rgn - 1) |
509 | * @end_rgn: end scanning at (@end_rgn - 1) |
510 | * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn) |
511 | */ |
512 | static void __init_memblock memblock_merge_regions(struct memblock_type *type, |
513 | unsigned long start_rgn, |
514 | unsigned long end_rgn) |
515 | { |
516 | int i = 0; |
517 | if (start_rgn) |
518 | i = start_rgn - 1; |
519 | end_rgn = min(end_rgn, type->cnt - 1); |
520 | while (i < end_rgn) { |
521 | struct memblock_region *this = &type->regions[i]; |
522 | struct memblock_region *next = &type->regions[i + 1]; |
523 | |
524 | if (this->base + this->size != next->base || |
525 | memblock_get_region_node(r: this) != |
526 | memblock_get_region_node(r: next) || |
527 | this->flags != next->flags) { |
528 | BUG_ON(this->base + this->size > next->base); |
529 | i++; |
530 | continue; |
531 | } |
532 | |
533 | this->size += next->size; |
534 | /* move forward from next + 1, index of which is i + 2 */ |
535 | memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); |
536 | type->cnt--; |
537 | end_rgn--; |
538 | } |
539 | } |
540 | |
541 | /** |
542 | * memblock_insert_region - insert new memblock region |
543 | * @type: memblock type to insert into |
544 | * @idx: index for the insertion point |
545 | * @base: base address of the new region |
546 | * @size: size of the new region |
547 | * @nid: node id of the new region |
548 | * @flags: flags of the new region |
549 | * |
550 | * Insert new memblock region [@base, @base + @size) into @type at @idx. |
551 | * @type must already have extra room to accommodate the new region. |
552 | */ |
553 | static void __init_memblock memblock_insert_region(struct memblock_type *type, |
554 | int idx, phys_addr_t base, |
555 | phys_addr_t size, |
556 | int nid, |
557 | enum memblock_flags flags) |
558 | { |
559 | struct memblock_region *rgn = &type->regions[idx]; |
560 | |
561 | BUG_ON(type->cnt >= type->max); |
562 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); |
563 | rgn->base = base; |
564 | rgn->size = size; |
565 | rgn->flags = flags; |
566 | memblock_set_region_node(r: rgn, nid); |
567 | type->cnt++; |
568 | type->total_size += size; |
569 | } |
570 | |
571 | /** |
572 | * memblock_add_range - add new memblock region |
573 | * @type: memblock type to add new region into |
574 | * @base: base address of the new region |
575 | * @size: size of the new region |
576 | * @nid: nid of the new region |
577 | * @flags: flags of the new region |
578 | * |
579 | * Add new memblock region [@base, @base + @size) into @type. The new region |
580 | * is allowed to overlap with existing ones - overlaps don't affect already |
581 | * existing regions. @type is guaranteed to be minimal (all neighbouring |
582 | * compatible regions are merged) after the addition. |
583 | * |
584 | * Return: |
585 | * 0 on success, -errno on failure. |
586 | */ |
587 | static int __init_memblock memblock_add_range(struct memblock_type *type, |
588 | phys_addr_t base, phys_addr_t size, |
589 | int nid, enum memblock_flags flags) |
590 | { |
591 | bool insert = false; |
592 | phys_addr_t obase = base; |
593 | phys_addr_t end = base + memblock_cap_size(base, size: &size); |
594 | int idx, nr_new, start_rgn = -1, end_rgn; |
595 | struct memblock_region *rgn; |
596 | |
597 | if (!size) |
598 | return 0; |
599 | |
600 | /* special case for empty array */ |
601 | if (type->regions[0].size == 0) { |
602 | WARN_ON(type->cnt != 1 || type->total_size); |
603 | type->regions[0].base = base; |
604 | type->regions[0].size = size; |
605 | type->regions[0].flags = flags; |
606 | memblock_set_region_node(r: &type->regions[0], nid); |
607 | type->total_size = size; |
608 | return 0; |
609 | } |
610 | |
611 | /* |
612 | * The worst case is when new range overlaps all existing regions, |
613 | * then we'll need type->cnt + 1 empty regions in @type. So if |
614 | * type->cnt * 2 + 1 is less than or equal to type->max, we know |
615 | * that there is enough empty regions in @type, and we can insert |
616 | * regions directly. |
617 | */ |
618 | if (type->cnt * 2 + 1 <= type->max) |
619 | insert = true; |
620 | |
621 | repeat: |
622 | /* |
623 | * The following is executed twice. Once with %false @insert and |
624 | * then with %true. The first counts the number of regions needed |
625 | * to accommodate the new area. The second actually inserts them. |
626 | */ |
627 | base = obase; |
628 | nr_new = 0; |
629 | |
630 | for_each_memblock_type(idx, type, rgn) { |
631 | phys_addr_t rbase = rgn->base; |
632 | phys_addr_t rend = rbase + rgn->size; |
633 | |
634 | if (rbase >= end) |
635 | break; |
636 | if (rend <= base) |
637 | continue; |
638 | /* |
639 | * @rgn overlaps. If it separates the lower part of new |
640 | * area, insert that portion. |
641 | */ |
642 | if (rbase > base) { |
643 | #ifdef CONFIG_NUMA |
644 | WARN_ON(nid != memblock_get_region_node(rgn)); |
645 | #endif |
646 | WARN_ON(flags != rgn->flags); |
647 | nr_new++; |
648 | if (insert) { |
649 | if (start_rgn == -1) |
650 | start_rgn = idx; |
651 | end_rgn = idx + 1; |
652 | memblock_insert_region(type, idx: idx++, base, |
653 | size: rbase - base, nid, |
654 | flags); |
655 | } |
656 | } |
657 | /* area below @rend is dealt with, forget about it */ |
658 | base = min(rend, end); |
659 | } |
660 | |
661 | /* insert the remaining portion */ |
662 | if (base < end) { |
663 | nr_new++; |
664 | if (insert) { |
665 | if (start_rgn == -1) |
666 | start_rgn = idx; |
667 | end_rgn = idx + 1; |
668 | memblock_insert_region(type, idx, base, size: end - base, |
669 | nid, flags); |
670 | } |
671 | } |
672 | |
673 | if (!nr_new) |
674 | return 0; |
675 | |
676 | /* |
677 | * If this was the first round, resize array and repeat for actual |
678 | * insertions; otherwise, merge and return. |
679 | */ |
680 | if (!insert) { |
681 | while (type->cnt + nr_new > type->max) |
682 | if (memblock_double_array(type, new_area_start: obase, new_area_size: size) < 0) |
683 | return -ENOMEM; |
684 | insert = true; |
685 | goto repeat; |
686 | } else { |
687 | memblock_merge_regions(type, start_rgn, end_rgn); |
688 | return 0; |
689 | } |
690 | } |
691 | |
692 | /** |
693 | * memblock_add_node - add new memblock region within a NUMA node |
694 | * @base: base address of the new region |
695 | * @size: size of the new region |
696 | * @nid: nid of the new region |
697 | * @flags: flags of the new region |
698 | * |
699 | * Add new memblock region [@base, @base + @size) to the "memory" |
700 | * type. See memblock_add_range() description for mode details |
701 | * |
702 | * Return: |
703 | * 0 on success, -errno on failure. |
704 | */ |
705 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, |
706 | int nid, enum memblock_flags flags) |
707 | { |
708 | phys_addr_t end = base + size - 1; |
709 | |
710 | memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n" , __func__, |
711 | &base, &end, nid, flags, (void *)_RET_IP_); |
712 | |
713 | return memblock_add_range(type: &memblock.memory, base, size, nid, flags); |
714 | } |
715 | |
716 | /** |
717 | * memblock_add - add new memblock region |
718 | * @base: base address of the new region |
719 | * @size: size of the new region |
720 | * |
721 | * Add new memblock region [@base, @base + @size) to the "memory" |
722 | * type. See memblock_add_range() description for mode details |
723 | * |
724 | * Return: |
725 | * 0 on success, -errno on failure. |
726 | */ |
727 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
728 | { |
729 | phys_addr_t end = base + size - 1; |
730 | |
731 | memblock_dbg("%s: [%pa-%pa] %pS\n" , __func__, |
732 | &base, &end, (void *)_RET_IP_); |
733 | |
734 | return memblock_add_range(type: &memblock.memory, base, size, MAX_NUMNODES, flags: 0); |
735 | } |
736 | |
737 | /** |
738 | * memblock_isolate_range - isolate given range into disjoint memblocks |
739 | * @type: memblock type to isolate range for |
740 | * @base: base of range to isolate |
741 | * @size: size of range to isolate |
742 | * @start_rgn: out parameter for the start of isolated region |
743 | * @end_rgn: out parameter for the end of isolated region |
744 | * |
745 | * Walk @type and ensure that regions don't cross the boundaries defined by |
746 | * [@base, @base + @size). Crossing regions are split at the boundaries, |
747 | * which may create at most two more regions. The index of the first |
748 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. |
749 | * |
750 | * Return: |
751 | * 0 on success, -errno on failure. |
752 | */ |
753 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, |
754 | phys_addr_t base, phys_addr_t size, |
755 | int *start_rgn, int *end_rgn) |
756 | { |
757 | phys_addr_t end = base + memblock_cap_size(base, size: &size); |
758 | int idx; |
759 | struct memblock_region *rgn; |
760 | |
761 | *start_rgn = *end_rgn = 0; |
762 | |
763 | if (!size) |
764 | return 0; |
765 | |
766 | /* we'll create at most two more regions */ |
767 | while (type->cnt + 2 > type->max) |
768 | if (memblock_double_array(type, new_area_start: base, new_area_size: size) < 0) |
769 | return -ENOMEM; |
770 | |
771 | for_each_memblock_type(idx, type, rgn) { |
772 | phys_addr_t rbase = rgn->base; |
773 | phys_addr_t rend = rbase + rgn->size; |
774 | |
775 | if (rbase >= end) |
776 | break; |
777 | if (rend <= base) |
778 | continue; |
779 | |
780 | if (rbase < base) { |
781 | /* |
782 | * @rgn intersects from below. Split and continue |
783 | * to process the next region - the new top half. |
784 | */ |
785 | rgn->base = base; |
786 | rgn->size -= base - rbase; |
787 | type->total_size -= base - rbase; |
788 | memblock_insert_region(type, idx, base: rbase, size: base - rbase, |
789 | nid: memblock_get_region_node(r: rgn), |
790 | flags: rgn->flags); |
791 | } else if (rend > end) { |
792 | /* |
793 | * @rgn intersects from above. Split and redo the |
794 | * current region - the new bottom half. |
795 | */ |
796 | rgn->base = end; |
797 | rgn->size -= end - rbase; |
798 | type->total_size -= end - rbase; |
799 | memblock_insert_region(type, idx: idx--, base: rbase, size: end - rbase, |
800 | nid: memblock_get_region_node(r: rgn), |
801 | flags: rgn->flags); |
802 | } else { |
803 | /* @rgn is fully contained, record it */ |
804 | if (!*end_rgn) |
805 | *start_rgn = idx; |
806 | *end_rgn = idx + 1; |
807 | } |
808 | } |
809 | |
810 | return 0; |
811 | } |
812 | |
813 | static int __init_memblock memblock_remove_range(struct memblock_type *type, |
814 | phys_addr_t base, phys_addr_t size) |
815 | { |
816 | int start_rgn, end_rgn; |
817 | int i, ret; |
818 | |
819 | ret = memblock_isolate_range(type, base, size, start_rgn: &start_rgn, end_rgn: &end_rgn); |
820 | if (ret) |
821 | return ret; |
822 | |
823 | for (i = end_rgn - 1; i >= start_rgn; i--) |
824 | memblock_remove_region(type, r: i); |
825 | return 0; |
826 | } |
827 | |
828 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
829 | { |
830 | phys_addr_t end = base + size - 1; |
831 | |
832 | memblock_dbg("%s: [%pa-%pa] %pS\n" , __func__, |
833 | &base, &end, (void *)_RET_IP_); |
834 | |
835 | return memblock_remove_range(type: &memblock.memory, base, size); |
836 | } |
837 | |
838 | /** |
839 | * memblock_free - free boot memory allocation |
840 | * @ptr: starting address of the boot memory allocation |
841 | * @size: size of the boot memory block in bytes |
842 | * |
843 | * Free boot memory block previously allocated by memblock_alloc_xx() API. |
844 | * The freeing memory will not be released to the buddy allocator. |
845 | */ |
846 | void __init_memblock memblock_free(void *ptr, size_t size) |
847 | { |
848 | if (ptr) |
849 | memblock_phys_free(__pa(ptr), size); |
850 | } |
851 | |
852 | /** |
853 | * memblock_phys_free - free boot memory block |
854 | * @base: phys starting address of the boot memory block |
855 | * @size: size of the boot memory block in bytes |
856 | * |
857 | * Free boot memory block previously allocated by memblock_phys_alloc_xx() API. |
858 | * The freeing memory will not be released to the buddy allocator. |
859 | */ |
860 | int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) |
861 | { |
862 | phys_addr_t end = base + size - 1; |
863 | |
864 | memblock_dbg("%s: [%pa-%pa] %pS\n" , __func__, |
865 | &base, &end, (void *)_RET_IP_); |
866 | |
867 | kmemleak_free_part_phys(phys: base, size); |
868 | return memblock_remove_range(type: &memblock.reserved, base, size); |
869 | } |
870 | |
871 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
872 | { |
873 | phys_addr_t end = base + size - 1; |
874 | |
875 | memblock_dbg("%s: [%pa-%pa] %pS\n" , __func__, |
876 | &base, &end, (void *)_RET_IP_); |
877 | |
878 | return memblock_add_range(type: &memblock.reserved, base, size, MAX_NUMNODES, flags: 0); |
879 | } |
880 | |
881 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
882 | int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) |
883 | { |
884 | phys_addr_t end = base + size - 1; |
885 | |
886 | memblock_dbg("%s: [%pa-%pa] %pS\n" , __func__, |
887 | &base, &end, (void *)_RET_IP_); |
888 | |
889 | return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); |
890 | } |
891 | #endif |
892 | |
893 | /** |
894 | * memblock_setclr_flag - set or clear flag for a memory region |
895 | * @type: memblock type to set/clear flag for |
896 | * @base: base address of the region |
897 | * @size: size of the region |
898 | * @set: set or clear the flag |
899 | * @flag: the flag to update |
900 | * |
901 | * This function isolates region [@base, @base + @size), and sets/clears flag |
902 | * |
903 | * Return: 0 on success, -errno on failure. |
904 | */ |
905 | static int __init_memblock memblock_setclr_flag(struct memblock_type *type, |
906 | phys_addr_t base, phys_addr_t size, int set, int flag) |
907 | { |
908 | int i, ret, start_rgn, end_rgn; |
909 | |
910 | ret = memblock_isolate_range(type, base, size, start_rgn: &start_rgn, end_rgn: &end_rgn); |
911 | if (ret) |
912 | return ret; |
913 | |
914 | for (i = start_rgn; i < end_rgn; i++) { |
915 | struct memblock_region *r = &type->regions[i]; |
916 | |
917 | if (set) |
918 | r->flags |= flag; |
919 | else |
920 | r->flags &= ~flag; |
921 | } |
922 | |
923 | memblock_merge_regions(type, start_rgn, end_rgn); |
924 | return 0; |
925 | } |
926 | |
927 | /** |
928 | * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. |
929 | * @base: the base phys addr of the region |
930 | * @size: the size of the region |
931 | * |
932 | * Return: 0 on success, -errno on failure. |
933 | */ |
934 | int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) |
935 | { |
936 | return memblock_setclr_flag(type: &memblock.memory, base, size, set: 1, flag: MEMBLOCK_HOTPLUG); |
937 | } |
938 | |
939 | /** |
940 | * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. |
941 | * @base: the base phys addr of the region |
942 | * @size: the size of the region |
943 | * |
944 | * Return: 0 on success, -errno on failure. |
945 | */ |
946 | int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) |
947 | { |
948 | return memblock_setclr_flag(type: &memblock.memory, base, size, set: 0, flag: MEMBLOCK_HOTPLUG); |
949 | } |
950 | |
951 | /** |
952 | * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. |
953 | * @base: the base phys addr of the region |
954 | * @size: the size of the region |
955 | * |
956 | * Return: 0 on success, -errno on failure. |
957 | */ |
958 | int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) |
959 | { |
960 | if (!mirrored_kernelcore) |
961 | return 0; |
962 | |
963 | system_has_some_mirror = true; |
964 | |
965 | return memblock_setclr_flag(type: &memblock.memory, base, size, set: 1, flag: MEMBLOCK_MIRROR); |
966 | } |
967 | |
968 | /** |
969 | * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. |
970 | * @base: the base phys addr of the region |
971 | * @size: the size of the region |
972 | * |
973 | * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the |
974 | * direct mapping of the physical memory. These regions will still be |
975 | * covered by the memory map. The struct page representing NOMAP memory |
976 | * frames in the memory map will be PageReserved() |
977 | * |
978 | * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from |
979 | * memblock, the caller must inform kmemleak to ignore that memory |
980 | * |
981 | * Return: 0 on success, -errno on failure. |
982 | */ |
983 | int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) |
984 | { |
985 | return memblock_setclr_flag(type: &memblock.memory, base, size, set: 1, flag: MEMBLOCK_NOMAP); |
986 | } |
987 | |
988 | /** |
989 | * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. |
990 | * @base: the base phys addr of the region |
991 | * @size: the size of the region |
992 | * |
993 | * Return: 0 on success, -errno on failure. |
994 | */ |
995 | int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) |
996 | { |
997 | return memblock_setclr_flag(type: &memblock.memory, base, size, set: 0, flag: MEMBLOCK_NOMAP); |
998 | } |
999 | |
1000 | /** |
1001 | * memblock_reserved_mark_noinit - Mark a reserved memory region with flag |
1002 | * MEMBLOCK_RSRV_NOINIT which results in the struct pages not being initialized |
1003 | * for this region. |
1004 | * @base: the base phys addr of the region |
1005 | * @size: the size of the region |
1006 | * |
1007 | * struct pages will not be initialized for reserved memory regions marked with |
1008 | * %MEMBLOCK_RSRV_NOINIT. |
1009 | * |
1010 | * Return: 0 on success, -errno on failure. |
1011 | */ |
1012 | int __init_memblock memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size) |
1013 | { |
1014 | return memblock_setclr_flag(type: &memblock.reserved, base, size, set: 1, |
1015 | flag: MEMBLOCK_RSRV_NOINIT); |
1016 | } |
1017 | |
1018 | static bool should_skip_region(struct memblock_type *type, |
1019 | struct memblock_region *m, |
1020 | int nid, int flags) |
1021 | { |
1022 | int m_nid = memblock_get_region_node(r: m); |
1023 | |
1024 | /* we never skip regions when iterating memblock.reserved or physmem */ |
1025 | if (type != memblock_memory) |
1026 | return false; |
1027 | |
1028 | /* only memory regions are associated with nodes, check it */ |
1029 | if (nid != NUMA_NO_NODE && nid != m_nid) |
1030 | return true; |
1031 | |
1032 | /* skip hotpluggable memory regions if needed */ |
1033 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && |
1034 | !(flags & MEMBLOCK_HOTPLUG)) |
1035 | return true; |
1036 | |
1037 | /* if we want mirror memory skip non-mirror memory regions */ |
1038 | if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) |
1039 | return true; |
1040 | |
1041 | /* skip nomap memory unless we were asked for it explicitly */ |
1042 | if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) |
1043 | return true; |
1044 | |
1045 | /* skip driver-managed memory unless we were asked for it explicitly */ |
1046 | if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m)) |
1047 | return true; |
1048 | |
1049 | return false; |
1050 | } |
1051 | |
1052 | /** |
1053 | * __next_mem_range - next function for for_each_free_mem_range() etc. |
1054 | * @idx: pointer to u64 loop variable |
1055 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
1056 | * @flags: pick from blocks based on memory attributes |
1057 | * @type_a: pointer to memblock_type from where the range is taken |
1058 | * @type_b: pointer to memblock_type which excludes memory from being taken |
1059 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
1060 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
1061 | * @out_nid: ptr to int for nid of the range, can be %NULL |
1062 | * |
1063 | * Find the first area from *@idx which matches @nid, fill the out |
1064 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
1065 | * *@idx contains index into type_a and the upper 32bit indexes the |
1066 | * areas before each region in type_b. For example, if type_b regions |
1067 | * look like the following, |
1068 | * |
1069 | * 0:[0-16), 1:[32-48), 2:[128-130) |
1070 | * |
1071 | * The upper 32bit indexes the following regions. |
1072 | * |
1073 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) |
1074 | * |
1075 | * As both region arrays are sorted, the function advances the two indices |
1076 | * in lockstep and returns each intersection. |
1077 | */ |
1078 | void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, |
1079 | struct memblock_type *type_a, |
1080 | struct memblock_type *type_b, phys_addr_t *out_start, |
1081 | phys_addr_t *out_end, int *out_nid) |
1082 | { |
1083 | int idx_a = *idx & 0xffffffff; |
1084 | int idx_b = *idx >> 32; |
1085 | |
1086 | if (WARN_ONCE(nid == MAX_NUMNODES, |
1087 | "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n" )) |
1088 | nid = NUMA_NO_NODE; |
1089 | |
1090 | for (; idx_a < type_a->cnt; idx_a++) { |
1091 | struct memblock_region *m = &type_a->regions[idx_a]; |
1092 | |
1093 | phys_addr_t m_start = m->base; |
1094 | phys_addr_t m_end = m->base + m->size; |
1095 | int m_nid = memblock_get_region_node(r: m); |
1096 | |
1097 | if (should_skip_region(type: type_a, m, nid, flags)) |
1098 | continue; |
1099 | |
1100 | if (!type_b) { |
1101 | if (out_start) |
1102 | *out_start = m_start; |
1103 | if (out_end) |
1104 | *out_end = m_end; |
1105 | if (out_nid) |
1106 | *out_nid = m_nid; |
1107 | idx_a++; |
1108 | *idx = (u32)idx_a | (u64)idx_b << 32; |
1109 | return; |
1110 | } |
1111 | |
1112 | /* scan areas before each reservation */ |
1113 | for (; idx_b < type_b->cnt + 1; idx_b++) { |
1114 | struct memblock_region *r; |
1115 | phys_addr_t r_start; |
1116 | phys_addr_t r_end; |
1117 | |
1118 | r = &type_b->regions[idx_b]; |
1119 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
1120 | r_end = idx_b < type_b->cnt ? |
1121 | r->base : PHYS_ADDR_MAX; |
1122 | |
1123 | /* |
1124 | * if idx_b advanced past idx_a, |
1125 | * break out to advance idx_a |
1126 | */ |
1127 | if (r_start >= m_end) |
1128 | break; |
1129 | /* if the two regions intersect, we're done */ |
1130 | if (m_start < r_end) { |
1131 | if (out_start) |
1132 | *out_start = |
1133 | max(m_start, r_start); |
1134 | if (out_end) |
1135 | *out_end = min(m_end, r_end); |
1136 | if (out_nid) |
1137 | *out_nid = m_nid; |
1138 | /* |
1139 | * The region which ends first is |
1140 | * advanced for the next iteration. |
1141 | */ |
1142 | if (m_end <= r_end) |
1143 | idx_a++; |
1144 | else |
1145 | idx_b++; |
1146 | *idx = (u32)idx_a | (u64)idx_b << 32; |
1147 | return; |
1148 | } |
1149 | } |
1150 | } |
1151 | |
1152 | /* signal end of iteration */ |
1153 | *idx = ULLONG_MAX; |
1154 | } |
1155 | |
1156 | /** |
1157 | * __next_mem_range_rev - generic next function for for_each_*_range_rev() |
1158 | * |
1159 | * @idx: pointer to u64 loop variable |
1160 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
1161 | * @flags: pick from blocks based on memory attributes |
1162 | * @type_a: pointer to memblock_type from where the range is taken |
1163 | * @type_b: pointer to memblock_type which excludes memory from being taken |
1164 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
1165 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
1166 | * @out_nid: ptr to int for nid of the range, can be %NULL |
1167 | * |
1168 | * Finds the next range from type_a which is not marked as unsuitable |
1169 | * in type_b. |
1170 | * |
1171 | * Reverse of __next_mem_range(). |
1172 | */ |
1173 | void __init_memblock __next_mem_range_rev(u64 *idx, int nid, |
1174 | enum memblock_flags flags, |
1175 | struct memblock_type *type_a, |
1176 | struct memblock_type *type_b, |
1177 | phys_addr_t *out_start, |
1178 | phys_addr_t *out_end, int *out_nid) |
1179 | { |
1180 | int idx_a = *idx & 0xffffffff; |
1181 | int idx_b = *idx >> 32; |
1182 | |
1183 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n" )) |
1184 | nid = NUMA_NO_NODE; |
1185 | |
1186 | if (*idx == (u64)ULLONG_MAX) { |
1187 | idx_a = type_a->cnt - 1; |
1188 | if (type_b != NULL) |
1189 | idx_b = type_b->cnt; |
1190 | else |
1191 | idx_b = 0; |
1192 | } |
1193 | |
1194 | for (; idx_a >= 0; idx_a--) { |
1195 | struct memblock_region *m = &type_a->regions[idx_a]; |
1196 | |
1197 | phys_addr_t m_start = m->base; |
1198 | phys_addr_t m_end = m->base + m->size; |
1199 | int m_nid = memblock_get_region_node(r: m); |
1200 | |
1201 | if (should_skip_region(type: type_a, m, nid, flags)) |
1202 | continue; |
1203 | |
1204 | if (!type_b) { |
1205 | if (out_start) |
1206 | *out_start = m_start; |
1207 | if (out_end) |
1208 | *out_end = m_end; |
1209 | if (out_nid) |
1210 | *out_nid = m_nid; |
1211 | idx_a--; |
1212 | *idx = (u32)idx_a | (u64)idx_b << 32; |
1213 | return; |
1214 | } |
1215 | |
1216 | /* scan areas before each reservation */ |
1217 | for (; idx_b >= 0; idx_b--) { |
1218 | struct memblock_region *r; |
1219 | phys_addr_t r_start; |
1220 | phys_addr_t r_end; |
1221 | |
1222 | r = &type_b->regions[idx_b]; |
1223 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
1224 | r_end = idx_b < type_b->cnt ? |
1225 | r->base : PHYS_ADDR_MAX; |
1226 | /* |
1227 | * if idx_b advanced past idx_a, |
1228 | * break out to advance idx_a |
1229 | */ |
1230 | |
1231 | if (r_end <= m_start) |
1232 | break; |
1233 | /* if the two regions intersect, we're done */ |
1234 | if (m_end > r_start) { |
1235 | if (out_start) |
1236 | *out_start = max(m_start, r_start); |
1237 | if (out_end) |
1238 | *out_end = min(m_end, r_end); |
1239 | if (out_nid) |
1240 | *out_nid = m_nid; |
1241 | if (m_start >= r_start) |
1242 | idx_a--; |
1243 | else |
1244 | idx_b--; |
1245 | *idx = (u32)idx_a | (u64)idx_b << 32; |
1246 | return; |
1247 | } |
1248 | } |
1249 | } |
1250 | /* signal end of iteration */ |
1251 | *idx = ULLONG_MAX; |
1252 | } |
1253 | |
1254 | /* |
1255 | * Common iterator interface used to define for_each_mem_pfn_range(). |
1256 | */ |
1257 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, |
1258 | unsigned long *out_start_pfn, |
1259 | unsigned long *out_end_pfn, int *out_nid) |
1260 | { |
1261 | struct memblock_type *type = &memblock.memory; |
1262 | struct memblock_region *r; |
1263 | int r_nid; |
1264 | |
1265 | while (++*idx < type->cnt) { |
1266 | r = &type->regions[*idx]; |
1267 | r_nid = memblock_get_region_node(r); |
1268 | |
1269 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) |
1270 | continue; |
1271 | if (nid == MAX_NUMNODES || nid == r_nid) |
1272 | break; |
1273 | } |
1274 | if (*idx >= type->cnt) { |
1275 | *idx = -1; |
1276 | return; |
1277 | } |
1278 | |
1279 | if (out_start_pfn) |
1280 | *out_start_pfn = PFN_UP(r->base); |
1281 | if (out_end_pfn) |
1282 | *out_end_pfn = PFN_DOWN(r->base + r->size); |
1283 | if (out_nid) |
1284 | *out_nid = r_nid; |
1285 | } |
1286 | |
1287 | /** |
1288 | * memblock_set_node - set node ID on memblock regions |
1289 | * @base: base of area to set node ID for |
1290 | * @size: size of area to set node ID for |
1291 | * @type: memblock type to set node ID for |
1292 | * @nid: node ID to set |
1293 | * |
1294 | * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. |
1295 | * Regions which cross the area boundaries are split as necessary. |
1296 | * |
1297 | * Return: |
1298 | * 0 on success, -errno on failure. |
1299 | */ |
1300 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, |
1301 | struct memblock_type *type, int nid) |
1302 | { |
1303 | #ifdef CONFIG_NUMA |
1304 | int start_rgn, end_rgn; |
1305 | int i, ret; |
1306 | |
1307 | ret = memblock_isolate_range(type, base, size, start_rgn: &start_rgn, end_rgn: &end_rgn); |
1308 | if (ret) |
1309 | return ret; |
1310 | |
1311 | for (i = start_rgn; i < end_rgn; i++) |
1312 | memblock_set_region_node(r: &type->regions[i], nid); |
1313 | |
1314 | memblock_merge_regions(type, start_rgn, end_rgn); |
1315 | #endif |
1316 | return 0; |
1317 | } |
1318 | |
1319 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
1320 | /** |
1321 | * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() |
1322 | * |
1323 | * @idx: pointer to u64 loop variable |
1324 | * @zone: zone in which all of the memory blocks reside |
1325 | * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL |
1326 | * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL |
1327 | * |
1328 | * This function is meant to be a zone/pfn specific wrapper for the |
1329 | * for_each_mem_range type iterators. Specifically they are used in the |
1330 | * deferred memory init routines and as such we were duplicating much of |
1331 | * this logic throughout the code. So instead of having it in multiple |
1332 | * locations it seemed like it would make more sense to centralize this to |
1333 | * one new iterator that does everything they need. |
1334 | */ |
1335 | void __init_memblock |
1336 | __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, |
1337 | unsigned long *out_spfn, unsigned long *out_epfn) |
1338 | { |
1339 | int zone_nid = zone_to_nid(zone); |
1340 | phys_addr_t spa, epa; |
1341 | |
1342 | __next_mem_range(idx, nid: zone_nid, flags: MEMBLOCK_NONE, |
1343 | type_a: &memblock.memory, type_b: &memblock.reserved, |
1344 | out_start: &spa, out_end: &epa, NULL); |
1345 | |
1346 | while (*idx != U64_MAX) { |
1347 | unsigned long epfn = PFN_DOWN(epa); |
1348 | unsigned long spfn = PFN_UP(spa); |
1349 | |
1350 | /* |
1351 | * Verify the end is at least past the start of the zone and |
1352 | * that we have at least one PFN to initialize. |
1353 | */ |
1354 | if (zone->zone_start_pfn < epfn && spfn < epfn) { |
1355 | /* if we went too far just stop searching */ |
1356 | if (zone_end_pfn(zone) <= spfn) { |
1357 | *idx = U64_MAX; |
1358 | break; |
1359 | } |
1360 | |
1361 | if (out_spfn) |
1362 | *out_spfn = max(zone->zone_start_pfn, spfn); |
1363 | if (out_epfn) |
1364 | *out_epfn = min(zone_end_pfn(zone), epfn); |
1365 | |
1366 | return; |
1367 | } |
1368 | |
1369 | __next_mem_range(idx, nid: zone_nid, flags: MEMBLOCK_NONE, |
1370 | type_a: &memblock.memory, type_b: &memblock.reserved, |
1371 | out_start: &spa, out_end: &epa, NULL); |
1372 | } |
1373 | |
1374 | /* signal end of iteration */ |
1375 | if (out_spfn) |
1376 | *out_spfn = ULONG_MAX; |
1377 | if (out_epfn) |
1378 | *out_epfn = 0; |
1379 | } |
1380 | |
1381 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
1382 | |
1383 | /** |
1384 | * memblock_alloc_range_nid - allocate boot memory block |
1385 | * @size: size of memory block to be allocated in bytes |
1386 | * @align: alignment of the region and block's size |
1387 | * @start: the lower bound of the memory region to allocate (phys address) |
1388 | * @end: the upper bound of the memory region to allocate (phys address) |
1389 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1390 | * @exact_nid: control the allocation fall back to other nodes |
1391 | * |
1392 | * The allocation is performed from memory region limited by |
1393 | * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. |
1394 | * |
1395 | * If the specified node can not hold the requested memory and @exact_nid |
1396 | * is false, the allocation falls back to any node in the system. |
1397 | * |
1398 | * For systems with memory mirroring, the allocation is attempted first |
1399 | * from the regions with mirroring enabled and then retried from any |
1400 | * memory region. |
1401 | * |
1402 | * In addition, function using kmemleak_alloc_phys for allocated boot |
1403 | * memory block, it is never reported as leaks. |
1404 | * |
1405 | * Return: |
1406 | * Physical address of allocated memory block on success, %0 on failure. |
1407 | */ |
1408 | phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, |
1409 | phys_addr_t align, phys_addr_t start, |
1410 | phys_addr_t end, int nid, |
1411 | bool exact_nid) |
1412 | { |
1413 | enum memblock_flags flags = choose_memblock_flags(); |
1414 | phys_addr_t found; |
1415 | |
1416 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n" )) |
1417 | nid = NUMA_NO_NODE; |
1418 | |
1419 | if (!align) { |
1420 | /* Can't use WARNs this early in boot on powerpc */ |
1421 | dump_stack(); |
1422 | align = SMP_CACHE_BYTES; |
1423 | } |
1424 | |
1425 | again: |
1426 | found = memblock_find_in_range_node(size, align, start, end, nid, |
1427 | flags); |
1428 | if (found && !memblock_reserve(base: found, size)) |
1429 | goto done; |
1430 | |
1431 | if (nid != NUMA_NO_NODE && !exact_nid) { |
1432 | found = memblock_find_in_range_node(size, align, start, |
1433 | end, NUMA_NO_NODE, |
1434 | flags); |
1435 | if (found && !memblock_reserve(base: found, size)) |
1436 | goto done; |
1437 | } |
1438 | |
1439 | if (flags & MEMBLOCK_MIRROR) { |
1440 | flags &= ~MEMBLOCK_MIRROR; |
1441 | pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n" , |
1442 | &size); |
1443 | goto again; |
1444 | } |
1445 | |
1446 | return 0; |
1447 | |
1448 | done: |
1449 | /* |
1450 | * Skip kmemleak for those places like kasan_init() and |
1451 | * early_pgtable_alloc() due to high volume. |
1452 | */ |
1453 | if (end != MEMBLOCK_ALLOC_NOLEAKTRACE) |
1454 | /* |
1455 | * Memblock allocated blocks are never reported as |
1456 | * leaks. This is because many of these blocks are |
1457 | * only referred via the physical address which is |
1458 | * not looked up by kmemleak. |
1459 | */ |
1460 | kmemleak_alloc_phys(phys: found, size, gfp: 0); |
1461 | |
1462 | /* |
1463 | * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, |
1464 | * require memory to be accepted before it can be used by the |
1465 | * guest. |
1466 | * |
1467 | * Accept the memory of the allocated buffer. |
1468 | */ |
1469 | accept_memory(start: found, end: found + size); |
1470 | |
1471 | return found; |
1472 | } |
1473 | |
1474 | /** |
1475 | * memblock_phys_alloc_range - allocate a memory block inside specified range |
1476 | * @size: size of memory block to be allocated in bytes |
1477 | * @align: alignment of the region and block's size |
1478 | * @start: the lower bound of the memory region to allocate (physical address) |
1479 | * @end: the upper bound of the memory region to allocate (physical address) |
1480 | * |
1481 | * Allocate @size bytes in the between @start and @end. |
1482 | * |
1483 | * Return: physical address of the allocated memory block on success, |
1484 | * %0 on failure. |
1485 | */ |
1486 | phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, |
1487 | phys_addr_t align, |
1488 | phys_addr_t start, |
1489 | phys_addr_t end) |
1490 | { |
1491 | memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n" , |
1492 | __func__, (u64)size, (u64)align, &start, &end, |
1493 | (void *)_RET_IP_); |
1494 | return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, |
1495 | exact_nid: false); |
1496 | } |
1497 | |
1498 | /** |
1499 | * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node |
1500 | * @size: size of memory block to be allocated in bytes |
1501 | * @align: alignment of the region and block's size |
1502 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1503 | * |
1504 | * Allocates memory block from the specified NUMA node. If the node |
1505 | * has no available memory, attempts to allocated from any node in the |
1506 | * system. |
1507 | * |
1508 | * Return: physical address of the allocated memory block on success, |
1509 | * %0 on failure. |
1510 | */ |
1511 | phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
1512 | { |
1513 | return memblock_alloc_range_nid(size, align, start: 0, |
1514 | MEMBLOCK_ALLOC_ACCESSIBLE, nid, exact_nid: false); |
1515 | } |
1516 | |
1517 | /** |
1518 | * memblock_alloc_internal - allocate boot memory block |
1519 | * @size: size of memory block to be allocated in bytes |
1520 | * @align: alignment of the region and block's size |
1521 | * @min_addr: the lower bound of the memory region to allocate (phys address) |
1522 | * @max_addr: the upper bound of the memory region to allocate (phys address) |
1523 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1524 | * @exact_nid: control the allocation fall back to other nodes |
1525 | * |
1526 | * Allocates memory block using memblock_alloc_range_nid() and |
1527 | * converts the returned physical address to virtual. |
1528 | * |
1529 | * The @min_addr limit is dropped if it can not be satisfied and the allocation |
1530 | * will fall back to memory below @min_addr. Other constraints, such |
1531 | * as node and mirrored memory will be handled again in |
1532 | * memblock_alloc_range_nid(). |
1533 | * |
1534 | * Return: |
1535 | * Virtual address of allocated memory block on success, NULL on failure. |
1536 | */ |
1537 | static void * __init memblock_alloc_internal( |
1538 | phys_addr_t size, phys_addr_t align, |
1539 | phys_addr_t min_addr, phys_addr_t max_addr, |
1540 | int nid, bool exact_nid) |
1541 | { |
1542 | phys_addr_t alloc; |
1543 | |
1544 | /* |
1545 | * Detect any accidental use of these APIs after slab is ready, as at |
1546 | * this moment memblock may be deinitialized already and its |
1547 | * internal data may be destroyed (after execution of memblock_free_all) |
1548 | */ |
1549 | if (WARN_ON_ONCE(slab_is_available())) |
1550 | return kzalloc_node(size, GFP_NOWAIT, node: nid); |
1551 | |
1552 | if (max_addr > memblock.current_limit) |
1553 | max_addr = memblock.current_limit; |
1554 | |
1555 | alloc = memblock_alloc_range_nid(size, align, start: min_addr, end: max_addr, nid, |
1556 | exact_nid); |
1557 | |
1558 | /* retry allocation without lower limit */ |
1559 | if (!alloc && min_addr) |
1560 | alloc = memblock_alloc_range_nid(size, align, start: 0, end: max_addr, nid, |
1561 | exact_nid); |
1562 | |
1563 | if (!alloc) |
1564 | return NULL; |
1565 | |
1566 | return phys_to_virt(address: alloc); |
1567 | } |
1568 | |
1569 | /** |
1570 | * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node |
1571 | * without zeroing memory |
1572 | * @size: size of memory block to be allocated in bytes |
1573 | * @align: alignment of the region and block's size |
1574 | * @min_addr: the lower bound of the memory region from where the allocation |
1575 | * is preferred (phys address) |
1576 | * @max_addr: the upper bound of the memory region from where the allocation |
1577 | * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to |
1578 | * allocate only from memory limited by memblock.current_limit value |
1579 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1580 | * |
1581 | * Public function, provides additional debug information (including caller |
1582 | * info), if enabled. Does not zero allocated memory. |
1583 | * |
1584 | * Return: |
1585 | * Virtual address of allocated memory block on success, NULL on failure. |
1586 | */ |
1587 | void * __init memblock_alloc_exact_nid_raw( |
1588 | phys_addr_t size, phys_addr_t align, |
1589 | phys_addr_t min_addr, phys_addr_t max_addr, |
1590 | int nid) |
1591 | { |
1592 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n" , |
1593 | __func__, (u64)size, (u64)align, nid, &min_addr, |
1594 | &max_addr, (void *)_RET_IP_); |
1595 | |
1596 | return memblock_alloc_internal(size, align, min_addr, max_addr, nid, |
1597 | exact_nid: true); |
1598 | } |
1599 | |
1600 | /** |
1601 | * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing |
1602 | * memory and without panicking |
1603 | * @size: size of memory block to be allocated in bytes |
1604 | * @align: alignment of the region and block's size |
1605 | * @min_addr: the lower bound of the memory region from where the allocation |
1606 | * is preferred (phys address) |
1607 | * @max_addr: the upper bound of the memory region from where the allocation |
1608 | * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to |
1609 | * allocate only from memory limited by memblock.current_limit value |
1610 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1611 | * |
1612 | * Public function, provides additional debug information (including caller |
1613 | * info), if enabled. Does not zero allocated memory, does not panic if request |
1614 | * cannot be satisfied. |
1615 | * |
1616 | * Return: |
1617 | * Virtual address of allocated memory block on success, NULL on failure. |
1618 | */ |
1619 | void * __init memblock_alloc_try_nid_raw( |
1620 | phys_addr_t size, phys_addr_t align, |
1621 | phys_addr_t min_addr, phys_addr_t max_addr, |
1622 | int nid) |
1623 | { |
1624 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n" , |
1625 | __func__, (u64)size, (u64)align, nid, &min_addr, |
1626 | &max_addr, (void *)_RET_IP_); |
1627 | |
1628 | return memblock_alloc_internal(size, align, min_addr, max_addr, nid, |
1629 | exact_nid: false); |
1630 | } |
1631 | |
1632 | /** |
1633 | * memblock_alloc_try_nid - allocate boot memory block |
1634 | * @size: size of memory block to be allocated in bytes |
1635 | * @align: alignment of the region and block's size |
1636 | * @min_addr: the lower bound of the memory region from where the allocation |
1637 | * is preferred (phys address) |
1638 | * @max_addr: the upper bound of the memory region from where the allocation |
1639 | * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to |
1640 | * allocate only from memory limited by memblock.current_limit value |
1641 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
1642 | * |
1643 | * Public function, provides additional debug information (including caller |
1644 | * info), if enabled. This function zeroes the allocated memory. |
1645 | * |
1646 | * Return: |
1647 | * Virtual address of allocated memory block on success, NULL on failure. |
1648 | */ |
1649 | void * __init memblock_alloc_try_nid( |
1650 | phys_addr_t size, phys_addr_t align, |
1651 | phys_addr_t min_addr, phys_addr_t max_addr, |
1652 | int nid) |
1653 | { |
1654 | void *ptr; |
1655 | |
1656 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n" , |
1657 | __func__, (u64)size, (u64)align, nid, &min_addr, |
1658 | &max_addr, (void *)_RET_IP_); |
1659 | ptr = memblock_alloc_internal(size, align, |
1660 | min_addr, max_addr, nid, exact_nid: false); |
1661 | if (ptr) |
1662 | memset(ptr, 0, size); |
1663 | |
1664 | return ptr; |
1665 | } |
1666 | |
1667 | /** |
1668 | * memblock_free_late - free pages directly to buddy allocator |
1669 | * @base: phys starting address of the boot memory block |
1670 | * @size: size of the boot memory block in bytes |
1671 | * |
1672 | * This is only useful when the memblock allocator has already been torn |
1673 | * down, but we are still initializing the system. Pages are released directly |
1674 | * to the buddy allocator. |
1675 | */ |
1676 | void __init memblock_free_late(phys_addr_t base, phys_addr_t size) |
1677 | { |
1678 | phys_addr_t cursor, end; |
1679 | |
1680 | end = base + size - 1; |
1681 | memblock_dbg("%s: [%pa-%pa] %pS\n" , |
1682 | __func__, &base, &end, (void *)_RET_IP_); |
1683 | kmemleak_free_part_phys(phys: base, size); |
1684 | cursor = PFN_UP(base); |
1685 | end = PFN_DOWN(base + size); |
1686 | |
1687 | for (; cursor < end; cursor++) { |
1688 | memblock_free_pages(pfn_to_page(cursor), pfn: cursor, order: 0); |
1689 | totalram_pages_inc(); |
1690 | } |
1691 | } |
1692 | |
1693 | /* |
1694 | * Remaining API functions |
1695 | */ |
1696 | |
1697 | phys_addr_t __init_memblock memblock_phys_mem_size(void) |
1698 | { |
1699 | return memblock.memory.total_size; |
1700 | } |
1701 | |
1702 | phys_addr_t __init_memblock memblock_reserved_size(void) |
1703 | { |
1704 | return memblock.reserved.total_size; |
1705 | } |
1706 | |
1707 | /* lowest address */ |
1708 | phys_addr_t __init_memblock memblock_start_of_DRAM(void) |
1709 | { |
1710 | return memblock.memory.regions[0].base; |
1711 | } |
1712 | |
1713 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
1714 | { |
1715 | int idx = memblock.memory.cnt - 1; |
1716 | |
1717 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
1718 | } |
1719 | |
1720 | static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) |
1721 | { |
1722 | phys_addr_t max_addr = PHYS_ADDR_MAX; |
1723 | struct memblock_region *r; |
1724 | |
1725 | /* |
1726 | * translate the memory @limit size into the max address within one of |
1727 | * the memory memblock regions, if the @limit exceeds the total size |
1728 | * of those regions, max_addr will keep original value PHYS_ADDR_MAX |
1729 | */ |
1730 | for_each_mem_region(r) { |
1731 | if (limit <= r->size) { |
1732 | max_addr = r->base + limit; |
1733 | break; |
1734 | } |
1735 | limit -= r->size; |
1736 | } |
1737 | |
1738 | return max_addr; |
1739 | } |
1740 | |
1741 | void __init memblock_enforce_memory_limit(phys_addr_t limit) |
1742 | { |
1743 | phys_addr_t max_addr; |
1744 | |
1745 | if (!limit) |
1746 | return; |
1747 | |
1748 | max_addr = __find_max_addr(limit); |
1749 | |
1750 | /* @limit exceeds the total size of the memory, do nothing */ |
1751 | if (max_addr == PHYS_ADDR_MAX) |
1752 | return; |
1753 | |
1754 | /* truncate both memory and reserved regions */ |
1755 | memblock_remove_range(type: &memblock.memory, base: max_addr, |
1756 | PHYS_ADDR_MAX); |
1757 | memblock_remove_range(type: &memblock.reserved, base: max_addr, |
1758 | PHYS_ADDR_MAX); |
1759 | } |
1760 | |
1761 | void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) |
1762 | { |
1763 | int start_rgn, end_rgn; |
1764 | int i, ret; |
1765 | |
1766 | if (!size) |
1767 | return; |
1768 | |
1769 | if (!memblock_memory->total_size) { |
1770 | pr_warn("%s: No memory registered yet\n" , __func__); |
1771 | return; |
1772 | } |
1773 | |
1774 | ret = memblock_isolate_range(type: &memblock.memory, base, size, |
1775 | start_rgn: &start_rgn, end_rgn: &end_rgn); |
1776 | if (ret) |
1777 | return; |
1778 | |
1779 | /* remove all the MAP regions */ |
1780 | for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) |
1781 | if (!memblock_is_nomap(m: &memblock.memory.regions[i])) |
1782 | memblock_remove_region(type: &memblock.memory, r: i); |
1783 | |
1784 | for (i = start_rgn - 1; i >= 0; i--) |
1785 | if (!memblock_is_nomap(m: &memblock.memory.regions[i])) |
1786 | memblock_remove_region(type: &memblock.memory, r: i); |
1787 | |
1788 | /* truncate the reserved regions */ |
1789 | memblock_remove_range(type: &memblock.reserved, base: 0, size: base); |
1790 | memblock_remove_range(type: &memblock.reserved, |
1791 | base: base + size, PHYS_ADDR_MAX); |
1792 | } |
1793 | |
1794 | void __init memblock_mem_limit_remove_map(phys_addr_t limit) |
1795 | { |
1796 | phys_addr_t max_addr; |
1797 | |
1798 | if (!limit) |
1799 | return; |
1800 | |
1801 | max_addr = __find_max_addr(limit); |
1802 | |
1803 | /* @limit exceeds the total size of the memory, do nothing */ |
1804 | if (max_addr == PHYS_ADDR_MAX) |
1805 | return; |
1806 | |
1807 | memblock_cap_memory_range(base: 0, size: max_addr); |
1808 | } |
1809 | |
1810 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
1811 | { |
1812 | unsigned int left = 0, right = type->cnt; |
1813 | |
1814 | do { |
1815 | unsigned int mid = (right + left) / 2; |
1816 | |
1817 | if (addr < type->regions[mid].base) |
1818 | right = mid; |
1819 | else if (addr >= (type->regions[mid].base + |
1820 | type->regions[mid].size)) |
1821 | left = mid + 1; |
1822 | else |
1823 | return mid; |
1824 | } while (left < right); |
1825 | return -1; |
1826 | } |
1827 | |
1828 | bool __init_memblock memblock_is_reserved(phys_addr_t addr) |
1829 | { |
1830 | return memblock_search(type: &memblock.reserved, addr) != -1; |
1831 | } |
1832 | |
1833 | bool __init_memblock memblock_is_memory(phys_addr_t addr) |
1834 | { |
1835 | return memblock_search(type: &memblock.memory, addr) != -1; |
1836 | } |
1837 | |
1838 | bool __init_memblock memblock_is_map_memory(phys_addr_t addr) |
1839 | { |
1840 | int i = memblock_search(type: &memblock.memory, addr); |
1841 | |
1842 | if (i == -1) |
1843 | return false; |
1844 | return !memblock_is_nomap(m: &memblock.memory.regions[i]); |
1845 | } |
1846 | |
1847 | int __init_memblock memblock_search_pfn_nid(unsigned long pfn, |
1848 | unsigned long *start_pfn, unsigned long *end_pfn) |
1849 | { |
1850 | struct memblock_type *type = &memblock.memory; |
1851 | int mid = memblock_search(type, PFN_PHYS(pfn)); |
1852 | |
1853 | if (mid == -1) |
1854 | return -1; |
1855 | |
1856 | *start_pfn = PFN_DOWN(type->regions[mid].base); |
1857 | *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); |
1858 | |
1859 | return memblock_get_region_node(r: &type->regions[mid]); |
1860 | } |
1861 | |
1862 | /** |
1863 | * memblock_is_region_memory - check if a region is a subset of memory |
1864 | * @base: base of region to check |
1865 | * @size: size of region to check |
1866 | * |
1867 | * Check if the region [@base, @base + @size) is a subset of a memory block. |
1868 | * |
1869 | * Return: |
1870 | * 0 if false, non-zero if true |
1871 | */ |
1872 | bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
1873 | { |
1874 | int idx = memblock_search(type: &memblock.memory, addr: base); |
1875 | phys_addr_t end = base + memblock_cap_size(base, size: &size); |
1876 | |
1877 | if (idx == -1) |
1878 | return false; |
1879 | return (memblock.memory.regions[idx].base + |
1880 | memblock.memory.regions[idx].size) >= end; |
1881 | } |
1882 | |
1883 | /** |
1884 | * memblock_is_region_reserved - check if a region intersects reserved memory |
1885 | * @base: base of region to check |
1886 | * @size: size of region to check |
1887 | * |
1888 | * Check if the region [@base, @base + @size) intersects a reserved |
1889 | * memory block. |
1890 | * |
1891 | * Return: |
1892 | * True if they intersect, false if not. |
1893 | */ |
1894 | bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
1895 | { |
1896 | return memblock_overlaps_region(type: &memblock.reserved, base, size); |
1897 | } |
1898 | |
1899 | void __init_memblock memblock_trim_memory(phys_addr_t align) |
1900 | { |
1901 | phys_addr_t start, end, orig_start, orig_end; |
1902 | struct memblock_region *r; |
1903 | |
1904 | for_each_mem_region(r) { |
1905 | orig_start = r->base; |
1906 | orig_end = r->base + r->size; |
1907 | start = round_up(orig_start, align); |
1908 | end = round_down(orig_end, align); |
1909 | |
1910 | if (start == orig_start && end == orig_end) |
1911 | continue; |
1912 | |
1913 | if (start < end) { |
1914 | r->base = start; |
1915 | r->size = end - start; |
1916 | } else { |
1917 | memblock_remove_region(type: &memblock.memory, |
1918 | r: r - memblock.memory.regions); |
1919 | r--; |
1920 | } |
1921 | } |
1922 | } |
1923 | |
1924 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) |
1925 | { |
1926 | memblock.current_limit = limit; |
1927 | } |
1928 | |
1929 | phys_addr_t __init_memblock memblock_get_current_limit(void) |
1930 | { |
1931 | return memblock.current_limit; |
1932 | } |
1933 | |
1934 | static void __init_memblock memblock_dump(struct memblock_type *type) |
1935 | { |
1936 | phys_addr_t base, end, size; |
1937 | enum memblock_flags flags; |
1938 | int idx; |
1939 | struct memblock_region *rgn; |
1940 | |
1941 | pr_info(" %s.cnt = 0x%lx\n" , type->name, type->cnt); |
1942 | |
1943 | for_each_memblock_type(idx, type, rgn) { |
1944 | char nid_buf[32] = "" ; |
1945 | |
1946 | base = rgn->base; |
1947 | size = rgn->size; |
1948 | end = base + size - 1; |
1949 | flags = rgn->flags; |
1950 | #ifdef CONFIG_NUMA |
1951 | if (memblock_get_region_node(r: rgn) != MAX_NUMNODES) |
1952 | snprintf(buf: nid_buf, size: sizeof(nid_buf), fmt: " on node %d" , |
1953 | memblock_get_region_node(r: rgn)); |
1954 | #endif |
1955 | pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n" , |
1956 | type->name, idx, &base, &end, &size, nid_buf, flags); |
1957 | } |
1958 | } |
1959 | |
1960 | static void __init_memblock __memblock_dump_all(void) |
1961 | { |
1962 | pr_info("MEMBLOCK configuration:\n" ); |
1963 | pr_info(" memory size = %pa reserved size = %pa\n" , |
1964 | &memblock.memory.total_size, |
1965 | &memblock.reserved.total_size); |
1966 | |
1967 | memblock_dump(type: &memblock.memory); |
1968 | memblock_dump(type: &memblock.reserved); |
1969 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
1970 | memblock_dump(&physmem); |
1971 | #endif |
1972 | } |
1973 | |
1974 | void __init_memblock memblock_dump_all(void) |
1975 | { |
1976 | if (memblock_debug) |
1977 | __memblock_dump_all(); |
1978 | } |
1979 | |
1980 | void __init memblock_allow_resize(void) |
1981 | { |
1982 | memblock_can_resize = 1; |
1983 | } |
1984 | |
1985 | static int __init early_memblock(char *p) |
1986 | { |
1987 | if (p && strstr(p, "debug" )) |
1988 | memblock_debug = 1; |
1989 | return 0; |
1990 | } |
1991 | early_param("memblock" , early_memblock); |
1992 | |
1993 | static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
1994 | { |
1995 | struct page *start_pg, *end_pg; |
1996 | phys_addr_t pg, pgend; |
1997 | |
1998 | /* |
1999 | * Convert start_pfn/end_pfn to a struct page pointer. |
2000 | */ |
2001 | start_pg = pfn_to_page(start_pfn - 1) + 1; |
2002 | end_pg = pfn_to_page(end_pfn - 1) + 1; |
2003 | |
2004 | /* |
2005 | * Convert to physical addresses, and round start upwards and end |
2006 | * downwards. |
2007 | */ |
2008 | pg = PAGE_ALIGN(__pa(start_pg)); |
2009 | pgend = __pa(end_pg) & PAGE_MASK; |
2010 | |
2011 | /* |
2012 | * If there are free pages between these, free the section of the |
2013 | * memmap array. |
2014 | */ |
2015 | if (pg < pgend) |
2016 | memblock_phys_free(base: pg, size: pgend - pg); |
2017 | } |
2018 | |
2019 | /* |
2020 | * The mem_map array can get very big. Free the unused area of the memory map. |
2021 | */ |
2022 | static void __init free_unused_memmap(void) |
2023 | { |
2024 | unsigned long start, end, prev_end = 0; |
2025 | int i; |
2026 | |
2027 | if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) || |
2028 | IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) |
2029 | return; |
2030 | |
2031 | /* |
2032 | * This relies on each bank being in address order. |
2033 | * The banks are sorted previously in bootmem_init(). |
2034 | */ |
2035 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { |
2036 | #ifdef CONFIG_SPARSEMEM |
2037 | /* |
2038 | * Take care not to free memmap entries that don't exist |
2039 | * due to SPARSEMEM sections which aren't present. |
2040 | */ |
2041 | start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); |
2042 | #endif |
2043 | /* |
2044 | * Align down here since many operations in VM subsystem |
2045 | * presume that there are no holes in the memory map inside |
2046 | * a pageblock |
2047 | */ |
2048 | start = pageblock_start_pfn(start); |
2049 | |
2050 | /* |
2051 | * If we had a previous bank, and there is a space |
2052 | * between the current bank and the previous, free it. |
2053 | */ |
2054 | if (prev_end && prev_end < start) |
2055 | free_memmap(start_pfn: prev_end, end_pfn: start); |
2056 | |
2057 | /* |
2058 | * Align up here since many operations in VM subsystem |
2059 | * presume that there are no holes in the memory map inside |
2060 | * a pageblock |
2061 | */ |
2062 | prev_end = pageblock_align(end); |
2063 | } |
2064 | |
2065 | #ifdef CONFIG_SPARSEMEM |
2066 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { |
2067 | prev_end = pageblock_align(end); |
2068 | free_memmap(start_pfn: prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); |
2069 | } |
2070 | #endif |
2071 | } |
2072 | |
2073 | static void __init __free_pages_memory(unsigned long start, unsigned long end) |
2074 | { |
2075 | int order; |
2076 | |
2077 | while (start < end) { |
2078 | /* |
2079 | * Free the pages in the largest chunks alignment allows. |
2080 | * |
2081 | * __ffs() behaviour is undefined for 0. start == 0 is |
2082 | * MAX_ORDER-aligned, set order to MAX_ORDER for the case. |
2083 | */ |
2084 | if (start) |
2085 | order = min_t(int, MAX_ORDER, __ffs(start)); |
2086 | else |
2087 | order = MAX_ORDER; |
2088 | |
2089 | while (start + (1UL << order) > end) |
2090 | order--; |
2091 | |
2092 | memblock_free_pages(pfn_to_page(start), pfn: start, order); |
2093 | |
2094 | start += (1UL << order); |
2095 | } |
2096 | } |
2097 | |
2098 | static unsigned long __init __free_memory_core(phys_addr_t start, |
2099 | phys_addr_t end) |
2100 | { |
2101 | unsigned long start_pfn = PFN_UP(start); |
2102 | unsigned long end_pfn = min_t(unsigned long, |
2103 | PFN_DOWN(end), max_low_pfn); |
2104 | |
2105 | if (start_pfn >= end_pfn) |
2106 | return 0; |
2107 | |
2108 | __free_pages_memory(start: start_pfn, end: end_pfn); |
2109 | |
2110 | return end_pfn - start_pfn; |
2111 | } |
2112 | |
2113 | static void __init memmap_init_reserved_pages(void) |
2114 | { |
2115 | struct memblock_region *region; |
2116 | phys_addr_t start, end; |
2117 | int nid; |
2118 | |
2119 | /* |
2120 | * set nid on all reserved pages and also treat struct |
2121 | * pages for the NOMAP regions as PageReserved |
2122 | */ |
2123 | for_each_mem_region(region) { |
2124 | nid = memblock_get_region_node(r: region); |
2125 | start = region->base; |
2126 | end = start + region->size; |
2127 | |
2128 | if (memblock_is_nomap(m: region)) |
2129 | reserve_bootmem_region(start, end, nid); |
2130 | |
2131 | memblock_set_node(base: start, size: end, type: &memblock.reserved, nid); |
2132 | } |
2133 | |
2134 | /* |
2135 | * initialize struct pages for reserved regions that don't have |
2136 | * the MEMBLOCK_RSRV_NOINIT flag set |
2137 | */ |
2138 | for_each_reserved_mem_region(region) { |
2139 | if (!memblock_is_reserved_noinit(m: region)) { |
2140 | nid = memblock_get_region_node(r: region); |
2141 | start = region->base; |
2142 | end = start + region->size; |
2143 | |
2144 | reserve_bootmem_region(start, end, nid); |
2145 | } |
2146 | } |
2147 | } |
2148 | |
2149 | static unsigned long __init free_low_memory_core_early(void) |
2150 | { |
2151 | unsigned long count = 0; |
2152 | phys_addr_t start, end; |
2153 | u64 i; |
2154 | |
2155 | memblock_clear_hotplug(base: 0, size: -1); |
2156 | |
2157 | memmap_init_reserved_pages(); |
2158 | |
2159 | /* |
2160 | * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id |
2161 | * because in some case like Node0 doesn't have RAM installed |
2162 | * low ram will be on Node1 |
2163 | */ |
2164 | for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, |
2165 | NULL) |
2166 | count += __free_memory_core(start, end); |
2167 | |
2168 | return count; |
2169 | } |
2170 | |
2171 | static int reset_managed_pages_done __initdata; |
2172 | |
2173 | static void __init reset_node_managed_pages(pg_data_t *pgdat) |
2174 | { |
2175 | struct zone *z; |
2176 | |
2177 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
2178 | atomic_long_set(v: &z->managed_pages, i: 0); |
2179 | } |
2180 | |
2181 | void __init reset_all_zones_managed_pages(void) |
2182 | { |
2183 | struct pglist_data *pgdat; |
2184 | |
2185 | if (reset_managed_pages_done) |
2186 | return; |
2187 | |
2188 | for_each_online_pgdat(pgdat) |
2189 | reset_node_managed_pages(pgdat); |
2190 | |
2191 | reset_managed_pages_done = 1; |
2192 | } |
2193 | |
2194 | /** |
2195 | * memblock_free_all - release free pages to the buddy allocator |
2196 | */ |
2197 | void __init memblock_free_all(void) |
2198 | { |
2199 | unsigned long pages; |
2200 | |
2201 | free_unused_memmap(); |
2202 | reset_all_zones_managed_pages(); |
2203 | |
2204 | pages = free_low_memory_core_early(); |
2205 | totalram_pages_add(count: pages); |
2206 | } |
2207 | |
2208 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) |
2209 | static const char * const flagname[] = { |
2210 | [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG" , |
2211 | [ilog2(MEMBLOCK_MIRROR)] = "MIRROR" , |
2212 | [ilog2(MEMBLOCK_NOMAP)] = "NOMAP" , |
2213 | [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG" , |
2214 | }; |
2215 | |
2216 | static int memblock_debug_show(struct seq_file *m, void *private) |
2217 | { |
2218 | struct memblock_type *type = m->private; |
2219 | struct memblock_region *reg; |
2220 | int i, j, nid; |
2221 | unsigned int count = ARRAY_SIZE(flagname); |
2222 | phys_addr_t end; |
2223 | |
2224 | for (i = 0; i < type->cnt; i++) { |
2225 | reg = &type->regions[i]; |
2226 | end = reg->base + reg->size - 1; |
2227 | nid = memblock_get_region_node(reg); |
2228 | |
2229 | seq_printf(m, "%4d: " , i); |
2230 | seq_printf(m, "%pa..%pa " , ®->base, &end); |
2231 | if (nid != MAX_NUMNODES) |
2232 | seq_printf(m, "%4d " , nid); |
2233 | else |
2234 | seq_printf(m, "%4c " , 'x'); |
2235 | if (reg->flags) { |
2236 | for (j = 0; j < count; j++) { |
2237 | if (reg->flags & (1U << j)) { |
2238 | seq_printf(m, "%s\n" , flagname[j]); |
2239 | break; |
2240 | } |
2241 | } |
2242 | if (j == count) |
2243 | seq_printf(m, "%s\n" , "UNKNOWN" ); |
2244 | } else { |
2245 | seq_printf(m, "%s\n" , "NONE" ); |
2246 | } |
2247 | } |
2248 | return 0; |
2249 | } |
2250 | DEFINE_SHOW_ATTRIBUTE(memblock_debug); |
2251 | |
2252 | static int __init memblock_init_debugfs(void) |
2253 | { |
2254 | struct dentry *root = debugfs_create_dir("memblock" , NULL); |
2255 | |
2256 | debugfs_create_file("memory" , 0444, root, |
2257 | &memblock.memory, &memblock_debug_fops); |
2258 | debugfs_create_file("reserved" , 0444, root, |
2259 | &memblock.reserved, &memblock_debug_fops); |
2260 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
2261 | debugfs_create_file("physmem" , 0444, root, &physmem, |
2262 | &memblock_debug_fops); |
2263 | #endif |
2264 | |
2265 | return 0; |
2266 | } |
2267 | __initcall(memblock_init_debugfs); |
2268 | |
2269 | #endif /* CONFIG_DEBUG_FS */ |
2270 | |