1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_MMZONE_H |
3 | #define _LINUX_MMZONE_H |
4 | |
5 | #ifndef __ASSEMBLY__ |
6 | #ifndef __GENERATING_BOUNDS_H |
7 | |
8 | #include <linux/spinlock.h> |
9 | #include <linux/list.h> |
10 | #include <linux/wait.h> |
11 | #include <linux/bitops.h> |
12 | #include <linux/cache.h> |
13 | #include <linux/threads.h> |
14 | #include <linux/numa.h> |
15 | #include <linux/init.h> |
16 | #include <linux/seqlock.h> |
17 | #include <linux/nodemask.h> |
18 | #include <linux/pageblock-flags.h> |
19 | #include <linux/page-flags-layout.h> |
20 | #include <linux/atomic.h> |
21 | #include <linux/mm_types.h> |
22 | #include <linux/page-flags.h> |
23 | #include <linux/local_lock.h> |
24 | #include <asm/page.h> |
25 | |
26 | /* Free memory management - zoned buddy allocator. */ |
27 | #ifndef CONFIG_FORCE_MAX_ZONEORDER |
28 | #define MAX_ORDER 11 |
29 | #else |
30 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER |
31 | #endif |
32 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
33 | |
34 | /* |
35 | * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed |
36 | * costly to service. That is between allocation orders which should |
37 | * coalesce naturally under reasonable reclaim pressure and those which |
38 | * will not. |
39 | */ |
40 | #define PAGE_ALLOC_COSTLY_ORDER 3 |
41 | |
42 | enum migratetype { |
43 | MIGRATE_UNMOVABLE, |
44 | MIGRATE_MOVABLE, |
45 | MIGRATE_RECLAIMABLE, |
46 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
47 | MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, |
48 | #ifdef CONFIG_CMA |
49 | /* |
50 | * MIGRATE_CMA migration type is designed to mimic the way |
51 | * ZONE_MOVABLE works. Only movable pages can be allocated |
52 | * from MIGRATE_CMA pageblocks and page allocator never |
53 | * implicitly change migration type of MIGRATE_CMA pageblock. |
54 | * |
55 | * The way to use it is to change migratetype of a range of |
56 | * pageblocks to MIGRATE_CMA which can be done by |
57 | * __free_pageblock_cma() function. |
58 | */ |
59 | MIGRATE_CMA, |
60 | #endif |
61 | #ifdef CONFIG_MEMORY_ISOLATION |
62 | MIGRATE_ISOLATE, /* can't allocate from here */ |
63 | #endif |
64 | MIGRATE_TYPES |
65 | }; |
66 | |
67 | /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ |
68 | extern const char * const migratetype_names[MIGRATE_TYPES]; |
69 | |
70 | #ifdef CONFIG_CMA |
71 | # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) |
72 | # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) |
73 | #else |
74 | # define is_migrate_cma(migratetype) false |
75 | # define is_migrate_cma_page(_page) false |
76 | #endif |
77 | |
78 | static inline bool is_migrate_movable(int mt) |
79 | { |
80 | return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; |
81 | } |
82 | |
83 | /* |
84 | * Check whether a migratetype can be merged with another migratetype. |
85 | * |
86 | * It is only mergeable when it can fall back to other migratetypes for |
87 | * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. |
88 | */ |
89 | static inline bool migratetype_is_mergeable(int mt) |
90 | { |
91 | return mt < MIGRATE_PCPTYPES; |
92 | } |
93 | |
94 | #define for_each_migratetype_order(order, type) \ |
95 | for (order = 0; order < MAX_ORDER; order++) \ |
96 | for (type = 0; type < MIGRATE_TYPES; type++) |
97 | |
98 | extern int page_group_by_mobility_disabled; |
99 | |
100 | #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) |
101 | |
102 | #define get_pageblock_migratetype(page) \ |
103 | get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) |
104 | |
105 | struct free_area { |
106 | struct list_head free_list[MIGRATE_TYPES]; |
107 | unsigned long nr_free; |
108 | }; |
109 | |
110 | static inline struct page *get_page_from_free_area(struct free_area *area, |
111 | int migratetype) |
112 | { |
113 | return list_first_entry_or_null(&area->free_list[migratetype], |
114 | struct page, lru); |
115 | } |
116 | |
117 | static inline bool free_area_empty(struct free_area *area, int migratetype) |
118 | { |
119 | return list_empty(&area->free_list[migratetype]); |
120 | } |
121 | |
122 | struct pglist_data; |
123 | |
124 | /* |
125 | * Add a wild amount of padding here to ensure data fall into separate |
126 | * cachelines. There are very few zone structures in the machine, so space |
127 | * consumption is not a concern here. |
128 | */ |
129 | #if defined(CONFIG_SMP) |
130 | struct zone_padding { |
131 | char x[0]; |
132 | } ____cacheline_internodealigned_in_smp; |
133 | #define ZONE_PADDING(name) struct zone_padding name; |
134 | #else |
135 | #define ZONE_PADDING(name) |
136 | #endif |
137 | |
138 | #ifdef CONFIG_NUMA |
139 | enum numa_stat_item { |
140 | NUMA_HIT, /* allocated in intended node */ |
141 | NUMA_MISS, /* allocated in non intended node */ |
142 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ |
143 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ |
144 | NUMA_LOCAL, /* allocation from local node */ |
145 | NUMA_OTHER, /* allocation from other node */ |
146 | NR_VM_NUMA_EVENT_ITEMS |
147 | }; |
148 | #else |
149 | #define NR_VM_NUMA_EVENT_ITEMS 0 |
150 | #endif |
151 | |
152 | enum zone_stat_item { |
153 | /* First 128 byte cacheline (assuming 64 bit words) */ |
154 | NR_FREE_PAGES, |
155 | NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ |
156 | NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, |
157 | NR_ZONE_ACTIVE_ANON, |
158 | NR_ZONE_INACTIVE_FILE, |
159 | NR_ZONE_ACTIVE_FILE, |
160 | NR_ZONE_UNEVICTABLE, |
161 | NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ |
162 | NR_MLOCK, /* mlock()ed pages found and moved off LRU */ |
163 | /* Second 128 byte cacheline */ |
164 | NR_BOUNCE, |
165 | #if IS_ENABLED(CONFIG_ZSMALLOC) |
166 | NR_ZSPAGES, /* allocated in zsmalloc */ |
167 | #endif |
168 | NR_FREE_CMA_PAGES, |
169 | NR_VM_ZONE_STAT_ITEMS }; |
170 | |
171 | enum node_stat_item { |
172 | NR_LRU_BASE, |
173 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ |
174 | NR_ACTIVE_ANON, /* " " " " " */ |
175 | NR_INACTIVE_FILE, /* " " " " " */ |
176 | NR_ACTIVE_FILE, /* " " " " " */ |
177 | NR_UNEVICTABLE, /* " " " " " */ |
178 | NR_SLAB_RECLAIMABLE_B, |
179 | NR_SLAB_UNRECLAIMABLE_B, |
180 | NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ |
181 | NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ |
182 | WORKINGSET_NODES, |
183 | WORKINGSET_REFAULT_BASE, |
184 | WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, |
185 | WORKINGSET_REFAULT_FILE, |
186 | WORKINGSET_ACTIVATE_BASE, |
187 | WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, |
188 | WORKINGSET_ACTIVATE_FILE, |
189 | WORKINGSET_RESTORE_BASE, |
190 | WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, |
191 | WORKINGSET_RESTORE_FILE, |
192 | WORKINGSET_NODERECLAIM, |
193 | NR_ANON_MAPPED, /* Mapped anonymous pages */ |
194 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
195 | only modified from process context */ |
196 | NR_FILE_PAGES, |
197 | NR_FILE_DIRTY, |
198 | NR_WRITEBACK, |
199 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
200 | NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ |
201 | NR_SHMEM_THPS, |
202 | NR_SHMEM_PMDMAPPED, |
203 | NR_FILE_THPS, |
204 | NR_FILE_PMDMAPPED, |
205 | NR_ANON_THPS, |
206 | NR_VMSCAN_WRITE, |
207 | NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ |
208 | NR_DIRTIED, /* page dirtyings since bootup */ |
209 | NR_WRITTEN, /* page writings since bootup */ |
210 | NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ |
211 | NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ |
212 | NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ |
213 | NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ |
214 | NR_KERNEL_STACK_KB, /* measured in KiB */ |
215 | #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) |
216 | NR_KERNEL_SCS_KB, /* measured in KiB */ |
217 | #endif |
218 | NR_PAGETABLE, /* used for pagetables */ |
219 | #ifdef CONFIG_SWAP |
220 | NR_SWAPCACHE, |
221 | #endif |
222 | #ifdef CONFIG_NUMA_BALANCING |
223 | PGPROMOTE_SUCCESS, /* promote successfully */ |
224 | #endif |
225 | NR_VM_NODE_STAT_ITEMS |
226 | }; |
227 | |
228 | /* |
229 | * Returns true if the item should be printed in THPs (/proc/vmstat |
230 | * currently prints number of anon, file and shmem THPs. But the item |
231 | * is charged in pages). |
232 | */ |
233 | static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) |
234 | { |
235 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) |
236 | return false; |
237 | |
238 | return item == NR_ANON_THPS || |
239 | item == NR_FILE_THPS || |
240 | item == NR_SHMEM_THPS || |
241 | item == NR_SHMEM_PMDMAPPED || |
242 | item == NR_FILE_PMDMAPPED; |
243 | } |
244 | |
245 | /* |
246 | * Returns true if the value is measured in bytes (most vmstat values are |
247 | * measured in pages). This defines the API part, the internal representation |
248 | * might be different. |
249 | */ |
250 | static __always_inline bool vmstat_item_in_bytes(int idx) |
251 | { |
252 | /* |
253 | * Global and per-node slab counters track slab pages. |
254 | * It's expected that changes are multiples of PAGE_SIZE. |
255 | * Internally values are stored in pages. |
256 | * |
257 | * Per-memcg and per-lruvec counters track memory, consumed |
258 | * by individual slab objects. These counters are actually |
259 | * byte-precise. |
260 | */ |
261 | return (idx == NR_SLAB_RECLAIMABLE_B || |
262 | idx == NR_SLAB_UNRECLAIMABLE_B); |
263 | } |
264 | |
265 | /* |
266 | * We do arithmetic on the LRU lists in various places in the code, |
267 | * so it is important to keep the active lists LRU_ACTIVE higher in |
268 | * the array than the corresponding inactive lists, and to keep |
269 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. |
270 | * |
271 | * This has to be kept in sync with the statistics in zone_stat_item |
272 | * above and the descriptions in vmstat_text in mm/vmstat.c |
273 | */ |
274 | #define LRU_BASE 0 |
275 | #define LRU_ACTIVE 1 |
276 | #define LRU_FILE 2 |
277 | |
278 | enum lru_list { |
279 | LRU_INACTIVE_ANON = LRU_BASE, |
280 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, |
281 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, |
282 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, |
283 | LRU_UNEVICTABLE, |
284 | NR_LRU_LISTS |
285 | }; |
286 | |
287 | enum vmscan_throttle_state { |
288 | VMSCAN_THROTTLE_WRITEBACK, |
289 | VMSCAN_THROTTLE_ISOLATED, |
290 | VMSCAN_THROTTLE_NOPROGRESS, |
291 | VMSCAN_THROTTLE_CONGESTED, |
292 | NR_VMSCAN_THROTTLE, |
293 | }; |
294 | |
295 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
296 | |
297 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
298 | |
299 | static inline bool is_file_lru(enum lru_list lru) |
300 | { |
301 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
302 | } |
303 | |
304 | static inline bool is_active_lru(enum lru_list lru) |
305 | { |
306 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
307 | } |
308 | |
309 | #define ANON_AND_FILE 2 |
310 | |
311 | enum lruvec_flags { |
312 | LRUVEC_CONGESTED, /* lruvec has many dirty pages |
313 | * backed by a congested BDI |
314 | */ |
315 | }; |
316 | |
317 | struct lruvec { |
318 | struct list_head lists[NR_LRU_LISTS]; |
319 | /* per lruvec lru_lock for memcg */ |
320 | spinlock_t lru_lock; |
321 | /* |
322 | * These track the cost of reclaiming one LRU - file or anon - |
323 | * over the other. As the observed cost of reclaiming one LRU |
324 | * increases, the reclaim scan balance tips toward the other. |
325 | */ |
326 | unsigned long anon_cost; |
327 | unsigned long file_cost; |
328 | /* Non-resident age, driven by LRU movement */ |
329 | atomic_long_t nonresident_age; |
330 | /* Refaults at the time of last reclaim cycle */ |
331 | unsigned long refaults[ANON_AND_FILE]; |
332 | /* Various lruvec state flags (enum lruvec_flags) */ |
333 | unsigned long flags; |
334 | #ifdef CONFIG_MEMCG |
335 | struct pglist_data *pgdat; |
336 | #endif |
337 | }; |
338 | |
339 | /* Isolate unmapped pages */ |
340 | #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) |
341 | /* Isolate for asynchronous migration */ |
342 | #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) |
343 | /* Isolate unevictable pages */ |
344 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) |
345 | |
346 | /* LRU Isolation modes. */ |
347 | typedef unsigned __bitwise isolate_mode_t; |
348 | |
349 | enum zone_watermarks { |
350 | WMARK_MIN, |
351 | WMARK_LOW, |
352 | WMARK_HIGH, |
353 | WMARK_PROMO, |
354 | NR_WMARK |
355 | }; |
356 | |
357 | /* |
358 | * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list |
359 | * for THP which will usually be GFP_MOVABLE. Even if it is another type, |
360 | * it should not contribute to serious fragmentation causing THP allocation |
361 | * failures. |
362 | */ |
363 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
364 | #define NR_PCP_THP 1 |
365 | #else |
366 | #define NR_PCP_THP 0 |
367 | #endif |
368 | #define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) |
369 | #define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) |
370 | |
371 | /* |
372 | * Shift to encode migratetype and order in the same integer, with order |
373 | * in the least significant bits. |
374 | */ |
375 | #define NR_PCP_ORDER_WIDTH 8 |
376 | #define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1) |
377 | |
378 | #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) |
379 | #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) |
380 | #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) |
381 | #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) |
382 | |
383 | /* Fields and list protected by pagesets local_lock in page_alloc.c */ |
384 | struct per_cpu_pages { |
385 | spinlock_t lock; /* Protects lists field */ |
386 | int count; /* number of pages in the list */ |
387 | int high; /* high watermark, emptying needed */ |
388 | int batch; /* chunk size for buddy add/remove */ |
389 | short free_factor; /* batch scaling factor during free */ |
390 | #ifdef CONFIG_NUMA |
391 | short expire; /* When 0, remote pagesets are drained */ |
392 | #endif |
393 | |
394 | /* Lists of pages, one per migrate type stored on the pcp-lists */ |
395 | struct list_head lists[NR_PCP_LISTS]; |
396 | } ____cacheline_aligned_in_smp; |
397 | |
398 | struct per_cpu_zonestat { |
399 | #ifdef CONFIG_SMP |
400 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
401 | s8 stat_threshold; |
402 | #endif |
403 | #ifdef CONFIG_NUMA |
404 | /* |
405 | * Low priority inaccurate counters that are only folded |
406 | * on demand. Use a large type to avoid the overhead of |
407 | * folding during refresh_cpu_vm_stats. |
408 | */ |
409 | unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; |
410 | #endif |
411 | }; |
412 | |
413 | struct per_cpu_nodestat { |
414 | s8 stat_threshold; |
415 | s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; |
416 | }; |
417 | |
418 | #endif /* !__GENERATING_BOUNDS.H */ |
419 | |
420 | enum zone_type { |
421 | /* |
422 | * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able |
423 | * to DMA to all of the addressable memory (ZONE_NORMAL). |
424 | * On architectures where this area covers the whole 32 bit address |
425 | * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller |
426 | * DMA addressing constraints. This distinction is important as a 32bit |
427 | * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit |
428 | * platforms may need both zones as they support peripherals with |
429 | * different DMA addressing limitations. |
430 | */ |
431 | #ifdef CONFIG_ZONE_DMA |
432 | ZONE_DMA, |
433 | #endif |
434 | #ifdef CONFIG_ZONE_DMA32 |
435 | ZONE_DMA32, |
436 | #endif |
437 | /* |
438 | * Normal addressable memory is in ZONE_NORMAL. DMA operations can be |
439 | * performed on pages in ZONE_NORMAL if the DMA devices support |
440 | * transfers to all addressable memory. |
441 | */ |
442 | ZONE_NORMAL, |
443 | #ifdef CONFIG_HIGHMEM |
444 | /* |
445 | * A memory area that is only addressable by the kernel through |
446 | * mapping portions into its own address space. This is for example |
447 | * used by i386 to allow the kernel to address the memory beyond |
448 | * 900MB. The kernel will set up special mappings (page |
449 | * table entries on i386) for each page that the kernel needs to |
450 | * access. |
451 | */ |
452 | ZONE_HIGHMEM, |
453 | #endif |
454 | /* |
455 | * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains |
456 | * movable pages with few exceptional cases described below. Main use |
457 | * cases for ZONE_MOVABLE are to make memory offlining/unplug more |
458 | * likely to succeed, and to locally limit unmovable allocations - e.g., |
459 | * to increase the number of THP/huge pages. Notable special cases are: |
460 | * |
461 | * 1. Pinned pages: (long-term) pinning of movable pages might |
462 | * essentially turn such pages unmovable. Therefore, we do not allow |
463 | * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and |
464 | * faulted, they come from the right zone right away. However, it is |
465 | * still possible that address space already has pages in |
466 | * ZONE_MOVABLE at the time when pages are pinned (i.e. user has |
467 | * touches that memory before pinning). In such case we migrate them |
468 | * to a different zone. When migration fails - pinning fails. |
469 | * 2. memblock allocations: kernelcore/movablecore setups might create |
470 | * situations where ZONE_MOVABLE contains unmovable allocations |
471 | * after boot. Memory offlining and allocations fail early. |
472 | * 3. Memory holes: kernelcore/movablecore setups might create very rare |
473 | * situations where ZONE_MOVABLE contains memory holes after boot, |
474 | * for example, if we have sections that are only partially |
475 | * populated. Memory offlining and allocations fail early. |
476 | * 4. PG_hwpoison pages: while poisoned pages can be skipped during |
477 | * memory offlining, such pages cannot be allocated. |
478 | * 5. Unmovable PG_offline pages: in paravirtualized environments, |
479 | * hotplugged memory blocks might only partially be managed by the |
480 | * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The |
481 | * parts not manged by the buddy are unmovable PG_offline pages. In |
482 | * some cases (virtio-mem), such pages can be skipped during |
483 | * memory offlining, however, cannot be moved/allocated. These |
484 | * techniques might use alloc_contig_range() to hide previously |
485 | * exposed pages from the buddy again (e.g., to implement some sort |
486 | * of memory unplug in virtio-mem). |
487 | * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create |
488 | * situations where ZERO_PAGE(0) which is allocated differently |
489 | * on different platforms may end up in a movable zone. ZERO_PAGE(0) |
490 | * cannot be migrated. |
491 | * 7. Memory-hotplug: when using memmap_on_memory and onlining the |
492 | * memory to the MOVABLE zone, the vmemmap pages are also placed in |
493 | * such zone. Such pages cannot be really moved around as they are |
494 | * self-stored in the range, but they are treated as movable when |
495 | * the range they describe is about to be offlined. |
496 | * |
497 | * In general, no unmovable allocations that degrade memory offlining |
498 | * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) |
499 | * have to expect that migrating pages in ZONE_MOVABLE can fail (even |
500 | * if has_unmovable_pages() states that there are no unmovable pages, |
501 | * there can be false negatives). |
502 | */ |
503 | ZONE_MOVABLE, |
504 | #ifdef CONFIG_ZONE_DEVICE |
505 | ZONE_DEVICE, |
506 | #endif |
507 | __MAX_NR_ZONES |
508 | |
509 | }; |
510 | |
511 | #ifndef __GENERATING_BOUNDS_H |
512 | |
513 | #define ASYNC_AND_SYNC 2 |
514 | |
515 | struct zone { |
516 | /* Read-mostly fields */ |
517 | |
518 | /* zone watermarks, access with *_wmark_pages(zone) macros */ |
519 | unsigned long _watermark[NR_WMARK]; |
520 | unsigned long watermark_boost; |
521 | |
522 | unsigned long nr_reserved_highatomic; |
523 | |
524 | /* |
525 | * We don't know if the memory that we're going to allocate will be |
526 | * freeable or/and it will be released eventually, so to avoid totally |
527 | * wasting several GB of ram we must reserve some of the lower zone |
528 | * memory (otherwise we risk to run OOM on the lower zones despite |
529 | * there being tons of freeable ram on the higher zones). This array is |
530 | * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl |
531 | * changes. |
532 | */ |
533 | long lowmem_reserve[MAX_NR_ZONES]; |
534 | |
535 | #ifdef CONFIG_NUMA |
536 | int node; |
537 | #endif |
538 | struct pglist_data *zone_pgdat; |
539 | struct per_cpu_pages __percpu *per_cpu_pageset; |
540 | struct per_cpu_zonestat __percpu *per_cpu_zonestats; |
541 | /* |
542 | * the high and batch values are copied to individual pagesets for |
543 | * faster access |
544 | */ |
545 | int pageset_high; |
546 | int pageset_batch; |
547 | |
548 | #ifndef CONFIG_SPARSEMEM |
549 | /* |
550 | * Flags for a pageblock_nr_pages block. See pageblock-flags.h. |
551 | * In SPARSEMEM, this map is stored in struct mem_section |
552 | */ |
553 | unsigned long *pageblock_flags; |
554 | #endif /* CONFIG_SPARSEMEM */ |
555 | |
556 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
557 | unsigned long zone_start_pfn; |
558 | |
559 | /* |
560 | * spanned_pages is the total pages spanned by the zone, including |
561 | * holes, which is calculated as: |
562 | * spanned_pages = zone_end_pfn - zone_start_pfn; |
563 | * |
564 | * present_pages is physical pages existing within the zone, which |
565 | * is calculated as: |
566 | * present_pages = spanned_pages - absent_pages(pages in holes); |
567 | * |
568 | * present_early_pages is present pages existing within the zone |
569 | * located on memory available since early boot, excluding hotplugged |
570 | * memory. |
571 | * |
572 | * managed_pages is present pages managed by the buddy system, which |
573 | * is calculated as (reserved_pages includes pages allocated by the |
574 | * bootmem allocator): |
575 | * managed_pages = present_pages - reserved_pages; |
576 | * |
577 | * cma pages is present pages that are assigned for CMA use |
578 | * (MIGRATE_CMA). |
579 | * |
580 | * So present_pages may be used by memory hotplug or memory power |
581 | * management logic to figure out unmanaged pages by checking |
582 | * (present_pages - managed_pages). And managed_pages should be used |
583 | * by page allocator and vm scanner to calculate all kinds of watermarks |
584 | * and thresholds. |
585 | * |
586 | * Locking rules: |
587 | * |
588 | * zone_start_pfn and spanned_pages are protected by span_seqlock. |
589 | * It is a seqlock because it has to be read outside of zone->lock, |
590 | * and it is done in the main allocator path. But, it is written |
591 | * quite infrequently. |
592 | * |
593 | * The span_seq lock is declared along with zone->lock because it is |
594 | * frequently read in proximity to zone->lock. It's good to |
595 | * give them a chance of being in the same cacheline. |
596 | * |
597 | * Write access to present_pages at runtime should be protected by |
598 | * mem_hotplug_begin/done(). Any reader who can't tolerant drift of |
599 | * present_pages should use get_online_mems() to get a stable value. |
600 | */ |
601 | atomic_long_t managed_pages; |
602 | unsigned long spanned_pages; |
603 | unsigned long present_pages; |
604 | #if defined(CONFIG_MEMORY_HOTPLUG) |
605 | unsigned long present_early_pages; |
606 | #endif |
607 | #ifdef CONFIG_CMA |
608 | unsigned long cma_pages; |
609 | #endif |
610 | |
611 | const char *name; |
612 | |
613 | #ifdef CONFIG_MEMORY_ISOLATION |
614 | /* |
615 | * Number of isolated pageblock. It is used to solve incorrect |
616 | * freepage counting problem due to racy retrieving migratetype |
617 | * of pageblock. Protected by zone->lock. |
618 | */ |
619 | unsigned long nr_isolate_pageblock; |
620 | #endif |
621 | |
622 | #ifdef CONFIG_MEMORY_HOTPLUG |
623 | /* see spanned/present_pages for more description */ |
624 | seqlock_t span_seqlock; |
625 | #endif |
626 | |
627 | int initialized; |
628 | |
629 | /* Write-intensive fields used from the page allocator */ |
630 | ZONE_PADDING(_pad1_) |
631 | |
632 | /* free areas of different sizes */ |
633 | struct free_area free_area[MAX_ORDER]; |
634 | |
635 | /* zone flags, see below */ |
636 | unsigned long flags; |
637 | |
638 | /* Primarily protects free_area */ |
639 | spinlock_t lock; |
640 | |
641 | /* Write-intensive fields used by compaction and vmstats. */ |
642 | ZONE_PADDING(_pad2_) |
643 | |
644 | /* |
645 | * When free pages are below this point, additional steps are taken |
646 | * when reading the number of free pages to avoid per-cpu counter |
647 | * drift allowing watermarks to be breached |
648 | */ |
649 | unsigned long percpu_drift_mark; |
650 | |
651 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
652 | /* pfn where compaction free scanner should start */ |
653 | unsigned long compact_cached_free_pfn; |
654 | /* pfn where compaction migration scanner should start */ |
655 | unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; |
656 | unsigned long compact_init_migrate_pfn; |
657 | unsigned long compact_init_free_pfn; |
658 | #endif |
659 | |
660 | #ifdef CONFIG_COMPACTION |
661 | /* |
662 | * On compaction failure, 1<<compact_defer_shift compactions |
663 | * are skipped before trying again. The number attempted since |
664 | * last failure is tracked with compact_considered. |
665 | * compact_order_failed is the minimum compaction failed order. |
666 | */ |
667 | unsigned int compact_considered; |
668 | unsigned int compact_defer_shift; |
669 | int compact_order_failed; |
670 | #endif |
671 | |
672 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
673 | /* Set to true when the PG_migrate_skip bits should be cleared */ |
674 | bool compact_blockskip_flush; |
675 | #endif |
676 | |
677 | bool contiguous; |
678 | |
679 | ZONE_PADDING(_pad3_) |
680 | /* Zone statistics */ |
681 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
682 | atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; |
683 | } ____cacheline_internodealigned_in_smp; |
684 | |
685 | enum pgdat_flags { |
686 | PGDAT_DIRTY, /* reclaim scanning has recently found |
687 | * many dirty file pages at the tail |
688 | * of the LRU. |
689 | */ |
690 | PGDAT_WRITEBACK, /* reclaim scanning has recently found |
691 | * many pages under writeback |
692 | */ |
693 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
694 | }; |
695 | |
696 | enum zone_flags { |
697 | ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. |
698 | * Cleared when kswapd is woken. |
699 | */ |
700 | ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ |
701 | }; |
702 | |
703 | static inline unsigned long zone_managed_pages(struct zone *zone) |
704 | { |
705 | return (unsigned long)atomic_long_read(&zone->managed_pages); |
706 | } |
707 | |
708 | static inline unsigned long zone_cma_pages(struct zone *zone) |
709 | { |
710 | #ifdef CONFIG_CMA |
711 | return zone->cma_pages; |
712 | #else |
713 | return 0; |
714 | #endif |
715 | } |
716 | |
717 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
718 | { |
719 | return zone->zone_start_pfn + zone->spanned_pages; |
720 | } |
721 | |
722 | static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) |
723 | { |
724 | return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); |
725 | } |
726 | |
727 | static inline bool zone_is_initialized(struct zone *zone) |
728 | { |
729 | return zone->initialized; |
730 | } |
731 | |
732 | static inline bool zone_is_empty(struct zone *zone) |
733 | { |
734 | return zone->spanned_pages == 0; |
735 | } |
736 | |
737 | #ifndef BUILD_VDSO32_64 |
738 | /* |
739 | * The zone field is never updated after free_area_init_core() |
740 | * sets it, so none of the operations on it need to be atomic. |
741 | */ |
742 | |
743 | /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ |
744 | #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) |
745 | #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) |
746 | #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) |
747 | #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) |
748 | #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) |
749 | |
750 | /* |
751 | * Define the bit shifts to access each section. For non-existent |
752 | * sections we define the shift as 0; that plus a 0 mask ensures |
753 | * the compiler will optimise away reference to them. |
754 | */ |
755 | #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) |
756 | #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) |
757 | #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) |
758 | #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) |
759 | #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) |
760 | |
761 | /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ |
762 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
763 | #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) |
764 | #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \ |
765 | SECTIONS_PGOFF : ZONES_PGOFF) |
766 | #else |
767 | #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) |
768 | #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \ |
769 | NODES_PGOFF : ZONES_PGOFF) |
770 | #endif |
771 | |
772 | #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) |
773 | |
774 | #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) |
775 | #define NODES_MASK ((1UL << NODES_WIDTH) - 1) |
776 | #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) |
777 | #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) |
778 | #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) |
779 | #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) |
780 | |
781 | static inline enum zone_type page_zonenum(const struct page *page) |
782 | { |
783 | ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); |
784 | return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; |
785 | } |
786 | |
787 | static inline enum zone_type folio_zonenum(const struct folio *folio) |
788 | { |
789 | return page_zonenum(&folio->page); |
790 | } |
791 | |
792 | #ifdef CONFIG_ZONE_DEVICE |
793 | static inline bool is_zone_device_page(const struct page *page) |
794 | { |
795 | return page_zonenum(page) == ZONE_DEVICE; |
796 | } |
797 | extern void memmap_init_zone_device(struct zone *, unsigned long, |
798 | unsigned long, struct dev_pagemap *); |
799 | #else |
800 | static inline bool is_zone_device_page(const struct page *page) |
801 | { |
802 | return false; |
803 | } |
804 | #endif |
805 | |
806 | static inline bool folio_is_zone_device(const struct folio *folio) |
807 | { |
808 | return is_zone_device_page(&folio->page); |
809 | } |
810 | |
811 | static inline bool is_zone_movable_page(const struct page *page) |
812 | { |
813 | return page_zonenum(page) == ZONE_MOVABLE; |
814 | } |
815 | #endif |
816 | |
817 | /* |
818 | * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty |
819 | * intersection with the given zone |
820 | */ |
821 | static inline bool zone_intersects(struct zone *zone, |
822 | unsigned long start_pfn, unsigned long nr_pages) |
823 | { |
824 | if (zone_is_empty(zone)) |
825 | return false; |
826 | if (start_pfn >= zone_end_pfn(zone) || |
827 | start_pfn + nr_pages <= zone->zone_start_pfn) |
828 | return false; |
829 | |
830 | return true; |
831 | } |
832 | |
833 | /* |
834 | * The "priority" of VM scanning is how much of the queues we will scan in one |
835 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
836 | * queues ("queue_length >> 12") during an aging round. |
837 | */ |
838 | #define DEF_PRIORITY 12 |
839 | |
840 | /* Maximum number of zones on a zonelist */ |
841 | #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) |
842 | |
843 | enum { |
844 | ZONELIST_FALLBACK, /* zonelist with fallback */ |
845 | #ifdef CONFIG_NUMA |
846 | /* |
847 | * The NUMA zonelists are doubled because we need zonelists that |
848 | * restrict the allocations to a single node for __GFP_THISNODE. |
849 | */ |
850 | ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ |
851 | #endif |
852 | MAX_ZONELISTS |
853 | }; |
854 | |
855 | /* |
856 | * This struct contains information about a zone in a zonelist. It is stored |
857 | * here to avoid dereferences into large structures and lookups of tables |
858 | */ |
859 | struct zoneref { |
860 | struct zone *zone; /* Pointer to actual zone */ |
861 | int zone_idx; /* zone_idx(zoneref->zone) */ |
862 | }; |
863 | |
864 | /* |
865 | * One allocation request operates on a zonelist. A zonelist |
866 | * is a list of zones, the first one is the 'goal' of the |
867 | * allocation, the other zones are fallback zones, in decreasing |
868 | * priority. |
869 | * |
870 | * To speed the reading of the zonelist, the zonerefs contain the zone index |
871 | * of the entry being read. Helper functions to access information given |
872 | * a struct zoneref are |
873 | * |
874 | * zonelist_zone() - Return the struct zone * for an entry in _zonerefs |
875 | * zonelist_zone_idx() - Return the index of the zone for an entry |
876 | * zonelist_node_idx() - Return the index of the node for an entry |
877 | */ |
878 | struct zonelist { |
879 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
880 | }; |
881 | |
882 | /* |
883 | * The array of struct pages for flatmem. |
884 | * It must be declared for SPARSEMEM as well because there are configurations |
885 | * that rely on that. |
886 | */ |
887 | extern struct page *mem_map; |
888 | |
889 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
890 | struct deferred_split { |
891 | spinlock_t split_queue_lock; |
892 | struct list_head split_queue; |
893 | unsigned long split_queue_len; |
894 | }; |
895 | #endif |
896 | |
897 | /* |
898 | * On NUMA machines, each NUMA node would have a pg_data_t to describe |
899 | * it's memory layout. On UMA machines there is a single pglist_data which |
900 | * describes the whole memory. |
901 | * |
902 | * Memory statistics and page replacement data structures are maintained on a |
903 | * per-zone basis. |
904 | */ |
905 | typedef struct pglist_data { |
906 | /* |
907 | * node_zones contains just the zones for THIS node. Not all of the |
908 | * zones may be populated, but it is the full list. It is referenced by |
909 | * this node's node_zonelists as well as other node's node_zonelists. |
910 | */ |
911 | struct zone node_zones[MAX_NR_ZONES]; |
912 | |
913 | /* |
914 | * node_zonelists contains references to all zones in all nodes. |
915 | * Generally the first zones will be references to this node's |
916 | * node_zones. |
917 | */ |
918 | struct zonelist node_zonelists[MAX_ZONELISTS]; |
919 | |
920 | int nr_zones; /* number of populated zones in this node */ |
921 | #ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ |
922 | struct page *node_mem_map; |
923 | #ifdef CONFIG_PAGE_EXTENSION |
924 | struct page_ext *node_page_ext; |
925 | #endif |
926 | #endif |
927 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) |
928 | /* |
929 | * Must be held any time you expect node_start_pfn, |
930 | * node_present_pages, node_spanned_pages or nr_zones to stay constant. |
931 | * Also synchronizes pgdat->first_deferred_pfn during deferred page |
932 | * init. |
933 | * |
934 | * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
935 | * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG |
936 | * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. |
937 | * |
938 | * Nests above zone->lock and zone->span_seqlock |
939 | */ |
940 | spinlock_t node_size_lock; |
941 | #endif |
942 | unsigned long node_start_pfn; |
943 | unsigned long node_present_pages; /* total number of physical pages */ |
944 | unsigned long node_spanned_pages; /* total size of physical page |
945 | range, including holes */ |
946 | int node_id; |
947 | wait_queue_head_t kswapd_wait; |
948 | wait_queue_head_t pfmemalloc_wait; |
949 | |
950 | /* workqueues for throttling reclaim for different reasons. */ |
951 | wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; |
952 | |
953 | atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ |
954 | unsigned long nr_reclaim_start; /* nr pages written while throttled |
955 | * when throttling started. */ |
956 | struct task_struct *kswapd; /* Protected by |
957 | mem_hotplug_begin/done() */ |
958 | int kswapd_order; |
959 | enum zone_type kswapd_highest_zoneidx; |
960 | |
961 | int kswapd_failures; /* Number of 'reclaimed == 0' runs */ |
962 | |
963 | #ifdef CONFIG_COMPACTION |
964 | int kcompactd_max_order; |
965 | enum zone_type kcompactd_highest_zoneidx; |
966 | wait_queue_head_t kcompactd_wait; |
967 | struct task_struct *kcompactd; |
968 | bool proactive_compact_trigger; |
969 | #endif |
970 | /* |
971 | * This is a per-node reserve of pages that are not available |
972 | * to userspace allocations. |
973 | */ |
974 | unsigned long totalreserve_pages; |
975 | |
976 | #ifdef CONFIG_NUMA |
977 | /* |
978 | * node reclaim becomes active if more unmapped pages exist. |
979 | */ |
980 | unsigned long min_unmapped_pages; |
981 | unsigned long min_slab_pages; |
982 | #endif /* CONFIG_NUMA */ |
983 | |
984 | /* Write-intensive fields used by page reclaim */ |
985 | ZONE_PADDING(_pad1_) |
986 | |
987 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
988 | /* |
989 | * If memory initialisation on large machines is deferred then this |
990 | * is the first PFN that needs to be initialised. |
991 | */ |
992 | unsigned long first_deferred_pfn; |
993 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
994 | |
995 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
996 | struct deferred_split deferred_split_queue; |
997 | #endif |
998 | |
999 | /* Fields commonly accessed by the page reclaim scanner */ |
1000 | |
1001 | /* |
1002 | * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. |
1003 | * |
1004 | * Use mem_cgroup_lruvec() to look up lruvecs. |
1005 | */ |
1006 | struct lruvec __lruvec; |
1007 | |
1008 | unsigned long flags; |
1009 | |
1010 | ZONE_PADDING(_pad2_) |
1011 | |
1012 | /* Per-node vmstats */ |
1013 | struct per_cpu_nodestat __percpu *per_cpu_nodestats; |
1014 | atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; |
1015 | } pg_data_t; |
1016 | |
1017 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
1018 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) |
1019 | |
1020 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
1021 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
1022 | |
1023 | static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) |
1024 | { |
1025 | return pgdat->node_start_pfn + pgdat->node_spanned_pages; |
1026 | } |
1027 | |
1028 | static inline bool pgdat_is_empty(pg_data_t *pgdat) |
1029 | { |
1030 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; |
1031 | } |
1032 | |
1033 | #include <linux/memory_hotplug.h> |
1034 | |
1035 | void build_all_zonelists(pg_data_t *pgdat); |
1036 | void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, |
1037 | enum zone_type highest_zoneidx); |
1038 | bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, |
1039 | int highest_zoneidx, unsigned int alloc_flags, |
1040 | long free_pages); |
1041 | bool zone_watermark_ok(struct zone *z, unsigned int order, |
1042 | unsigned long mark, int highest_zoneidx, |
1043 | unsigned int alloc_flags); |
1044 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, |
1045 | unsigned long mark, int highest_zoneidx); |
1046 | /* |
1047 | * Memory initialization context, use to differentiate memory added by |
1048 | * the platform statically or via memory hotplug interface. |
1049 | */ |
1050 | enum meminit_context { |
1051 | MEMINIT_EARLY, |
1052 | MEMINIT_HOTPLUG, |
1053 | }; |
1054 | |
1055 | extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
1056 | unsigned long size); |
1057 | |
1058 | extern void lruvec_init(struct lruvec *lruvec); |
1059 | |
1060 | static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) |
1061 | { |
1062 | #ifdef CONFIG_MEMCG |
1063 | return lruvec->pgdat; |
1064 | #else |
1065 | return container_of(lruvec, struct pglist_data, __lruvec); |
1066 | #endif |
1067 | } |
1068 | |
1069 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES |
1070 | int local_memory_node(int node_id); |
1071 | #else |
1072 | static inline int local_memory_node(int node_id) { return node_id; }; |
1073 | #endif |
1074 | |
1075 | /* |
1076 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. |
1077 | */ |
1078 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) |
1079 | |
1080 | #ifdef CONFIG_ZONE_DEVICE |
1081 | static inline bool zone_is_zone_device(struct zone *zone) |
1082 | { |
1083 | return zone_idx(zone) == ZONE_DEVICE; |
1084 | } |
1085 | #else |
1086 | static inline bool zone_is_zone_device(struct zone *zone) |
1087 | { |
1088 | return false; |
1089 | } |
1090 | #endif |
1091 | |
1092 | /* |
1093 | * Returns true if a zone has pages managed by the buddy allocator. |
1094 | * All the reclaim decisions have to use this function rather than |
1095 | * populated_zone(). If the whole zone is reserved then we can easily |
1096 | * end up with populated_zone() && !managed_zone(). |
1097 | */ |
1098 | static inline bool managed_zone(struct zone *zone) |
1099 | { |
1100 | return zone_managed_pages(zone); |
1101 | } |
1102 | |
1103 | /* Returns true if a zone has memory */ |
1104 | static inline bool populated_zone(struct zone *zone) |
1105 | { |
1106 | return zone->present_pages; |
1107 | } |
1108 | |
1109 | #ifdef CONFIG_NUMA |
1110 | static inline int zone_to_nid(struct zone *zone) |
1111 | { |
1112 | return zone->node; |
1113 | } |
1114 | |
1115 | static inline void zone_set_nid(struct zone *zone, int nid) |
1116 | { |
1117 | zone->node = nid; |
1118 | } |
1119 | #else |
1120 | static inline int zone_to_nid(struct zone *zone) |
1121 | { |
1122 | return 0; |
1123 | } |
1124 | |
1125 | static inline void zone_set_nid(struct zone *zone, int nid) {} |
1126 | #endif |
1127 | |
1128 | extern int movable_zone; |
1129 | |
1130 | static inline int is_highmem_idx(enum zone_type idx) |
1131 | { |
1132 | #ifdef CONFIG_HIGHMEM |
1133 | return (idx == ZONE_HIGHMEM || |
1134 | (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); |
1135 | #else |
1136 | return 0; |
1137 | #endif |
1138 | } |
1139 | |
1140 | /** |
1141 | * is_highmem - helper function to quickly check if a struct zone is a |
1142 | * highmem zone or not. This is an attempt to keep references |
1143 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. |
1144 | * @zone: pointer to struct zone variable |
1145 | * Return: 1 for a highmem zone, 0 otherwise |
1146 | */ |
1147 | static inline int is_highmem(struct zone *zone) |
1148 | { |
1149 | return is_highmem_idx(zone_idx(zone)); |
1150 | } |
1151 | |
1152 | #ifdef CONFIG_ZONE_DMA |
1153 | bool has_managed_dma(void); |
1154 | #else |
1155 | static inline bool has_managed_dma(void) |
1156 | { |
1157 | return false; |
1158 | } |
1159 | #endif |
1160 | |
1161 | /* These two functions are used to setup the per zone pages min values */ |
1162 | struct ctl_table; |
1163 | |
1164 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, |
1165 | loff_t *); |
1166 | int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, |
1167 | size_t *, loff_t *); |
1168 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; |
1169 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, |
1170 | size_t *, loff_t *); |
1171 | int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, |
1172 | void *, size_t *, loff_t *); |
1173 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, |
1174 | void *, size_t *, loff_t *); |
1175 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, |
1176 | void *, size_t *, loff_t *); |
1177 | int numa_zonelist_order_handler(struct ctl_table *, int, |
1178 | void *, size_t *, loff_t *); |
1179 | extern int percpu_pagelist_high_fraction; |
1180 | extern char numa_zonelist_order[]; |
1181 | #define NUMA_ZONELIST_ORDER_LEN 16 |
1182 | |
1183 | #ifndef CONFIG_NUMA |
1184 | |
1185 | extern struct pglist_data contig_page_data; |
1186 | static inline struct pglist_data *NODE_DATA(int nid) |
1187 | { |
1188 | return &contig_page_data; |
1189 | } |
1190 | |
1191 | #else /* CONFIG_NUMA */ |
1192 | |
1193 | #include <asm/mmzone.h> |
1194 | |
1195 | #endif /* !CONFIG_NUMA */ |
1196 | |
1197 | extern struct pglist_data *first_online_pgdat(void); |
1198 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); |
1199 | extern struct zone *next_zone(struct zone *zone); |
1200 | |
1201 | /** |
1202 | * for_each_online_pgdat - helper macro to iterate over all online nodes |
1203 | * @pgdat: pointer to a pg_data_t variable |
1204 | */ |
1205 | #define for_each_online_pgdat(pgdat) \ |
1206 | for (pgdat = first_online_pgdat(); \ |
1207 | pgdat; \ |
1208 | pgdat = next_online_pgdat(pgdat)) |
1209 | /** |
1210 | * for_each_zone - helper macro to iterate over all memory zones |
1211 | * @zone: pointer to struct zone variable |
1212 | * |
1213 | * The user only needs to declare the zone variable, for_each_zone |
1214 | * fills it in. |
1215 | */ |
1216 | #define for_each_zone(zone) \ |
1217 | for (zone = (first_online_pgdat())->node_zones; \ |
1218 | zone; \ |
1219 | zone = next_zone(zone)) |
1220 | |
1221 | #define for_each_populated_zone(zone) \ |
1222 | for (zone = (first_online_pgdat())->node_zones; \ |
1223 | zone; \ |
1224 | zone = next_zone(zone)) \ |
1225 | if (!populated_zone(zone)) \ |
1226 | ; /* do nothing */ \ |
1227 | else |
1228 | |
1229 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) |
1230 | { |
1231 | return zoneref->zone; |
1232 | } |
1233 | |
1234 | static inline int zonelist_zone_idx(struct zoneref *zoneref) |
1235 | { |
1236 | return zoneref->zone_idx; |
1237 | } |
1238 | |
1239 | static inline int zonelist_node_idx(struct zoneref *zoneref) |
1240 | { |
1241 | return zone_to_nid(zoneref->zone); |
1242 | } |
1243 | |
1244 | struct zoneref *__next_zones_zonelist(struct zoneref *z, |
1245 | enum zone_type highest_zoneidx, |
1246 | nodemask_t *nodes); |
1247 | |
1248 | /** |
1249 | * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point |
1250 | * @z: The cursor used as a starting point for the search |
1251 | * @highest_zoneidx: The zone index of the highest zone to return |
1252 | * @nodes: An optional nodemask to filter the zonelist with |
1253 | * |
1254 | * This function returns the next zone at or below a given zone index that is |
1255 | * within the allowed nodemask using a cursor as the starting point for the |
1256 | * search. The zoneref returned is a cursor that represents the current zone |
1257 | * being examined. It should be advanced by one before calling |
1258 | * next_zones_zonelist again. |
1259 | * |
1260 | * Return: the next zone at or below highest_zoneidx within the allowed |
1261 | * nodemask using a cursor within a zonelist as a starting point |
1262 | */ |
1263 | static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, |
1264 | enum zone_type highest_zoneidx, |
1265 | nodemask_t *nodes) |
1266 | { |
1267 | if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) |
1268 | return z; |
1269 | return __next_zones_zonelist(z, highest_zoneidx, nodes); |
1270 | } |
1271 | |
1272 | /** |
1273 | * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist |
1274 | * @zonelist: The zonelist to search for a suitable zone |
1275 | * @highest_zoneidx: The zone index of the highest zone to return |
1276 | * @nodes: An optional nodemask to filter the zonelist with |
1277 | * |
1278 | * This function returns the first zone at or below a given zone index that is |
1279 | * within the allowed nodemask. The zoneref returned is a cursor that can be |
1280 | * used to iterate the zonelist with next_zones_zonelist by advancing it by |
1281 | * one before calling. |
1282 | * |
1283 | * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is |
1284 | * never NULL). This may happen either genuinely, or due to concurrent nodemask |
1285 | * update due to cpuset modification. |
1286 | * |
1287 | * Return: Zoneref pointer for the first suitable zone found |
1288 | */ |
1289 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
1290 | enum zone_type highest_zoneidx, |
1291 | nodemask_t *nodes) |
1292 | { |
1293 | return next_zones_zonelist(zonelist->_zonerefs, |
1294 | highest_zoneidx, nodes); |
1295 | } |
1296 | |
1297 | /** |
1298 | * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask |
1299 | * @zone: The current zone in the iterator |
1300 | * @z: The current pointer within zonelist->_zonerefs being iterated |
1301 | * @zlist: The zonelist being iterated |
1302 | * @highidx: The zone index of the highest zone to return |
1303 | * @nodemask: Nodemask allowed by the allocator |
1304 | * |
1305 | * This iterator iterates though all zones at or below a given zone index and |
1306 | * within a given nodemask |
1307 | */ |
1308 | #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ |
1309 | for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ |
1310 | zone; \ |
1311 | z = next_zones_zonelist(++z, highidx, nodemask), \ |
1312 | zone = zonelist_zone(z)) |
1313 | |
1314 | #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ |
1315 | for (zone = z->zone; \ |
1316 | zone; \ |
1317 | z = next_zones_zonelist(++z, highidx, nodemask), \ |
1318 | zone = zonelist_zone(z)) |
1319 | |
1320 | |
1321 | /** |
1322 | * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index |
1323 | * @zone: The current zone in the iterator |
1324 | * @z: The current pointer within zonelist->zones being iterated |
1325 | * @zlist: The zonelist being iterated |
1326 | * @highidx: The zone index of the highest zone to return |
1327 | * |
1328 | * This iterator iterates though all zones at or below a given zone index. |
1329 | */ |
1330 | #define for_each_zone_zonelist(zone, z, zlist, highidx) \ |
1331 | for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) |
1332 | |
1333 | /* Whether the 'nodes' are all movable nodes */ |
1334 | static inline bool movable_only_nodes(nodemask_t *nodes) |
1335 | { |
1336 | struct zonelist *zonelist; |
1337 | struct zoneref *z; |
1338 | int nid; |
1339 | |
1340 | if (nodes_empty(*nodes)) |
1341 | return false; |
1342 | |
1343 | /* |
1344 | * We can chose arbitrary node from the nodemask to get a |
1345 | * zonelist as they are interlinked. We just need to find |
1346 | * at least one zone that can satisfy kernel allocations. |
1347 | */ |
1348 | nid = first_node(*nodes); |
1349 | zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; |
1350 | z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); |
1351 | return (!z->zone) ? true : false; |
1352 | } |
1353 | |
1354 | |
1355 | #ifdef CONFIG_SPARSEMEM |
1356 | #include <asm/sparsemem.h> |
1357 | #endif |
1358 | |
1359 | #ifdef CONFIG_FLATMEM |
1360 | #define pfn_to_nid(pfn) (0) |
1361 | #endif |
1362 | |
1363 | #ifdef CONFIG_SPARSEMEM |
1364 | |
1365 | /* |
1366 | * PA_SECTION_SHIFT physical address to/from section number |
1367 | * PFN_SECTION_SHIFT pfn to/from section number |
1368 | */ |
1369 | #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) |
1370 | #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) |
1371 | |
1372 | #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) |
1373 | |
1374 | #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) |
1375 | #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) |
1376 | |
1377 | #define SECTION_BLOCKFLAGS_BITS \ |
1378 | ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) |
1379 | |
1380 | #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS |
1381 | #error Allocator MAX_ORDER exceeds SECTION_SIZE |
1382 | #endif |
1383 | |
1384 | static inline unsigned long pfn_to_section_nr(unsigned long pfn) |
1385 | { |
1386 | return pfn >> PFN_SECTION_SHIFT; |
1387 | } |
1388 | static inline unsigned long section_nr_to_pfn(unsigned long sec) |
1389 | { |
1390 | return sec << PFN_SECTION_SHIFT; |
1391 | } |
1392 | |
1393 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
1394 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
1395 | |
1396 | #define SUBSECTION_SHIFT 21 |
1397 | #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) |
1398 | |
1399 | #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) |
1400 | #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) |
1401 | #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) |
1402 | |
1403 | #if SUBSECTION_SHIFT > SECTION_SIZE_BITS |
1404 | #error Subsection size exceeds section size |
1405 | #else |
1406 | #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) |
1407 | #endif |
1408 | |
1409 | #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) |
1410 | #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) |
1411 | |
1412 | struct mem_section_usage { |
1413 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1414 | DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); |
1415 | #endif |
1416 | /* See declaration of similar field in struct zone */ |
1417 | unsigned long pageblock_flags[0]; |
1418 | }; |
1419 | |
1420 | void subsection_map_init(unsigned long pfn, unsigned long nr_pages); |
1421 | |
1422 | struct page; |
1423 | struct page_ext; |
1424 | struct mem_section { |
1425 | /* |
1426 | * This is, logically, a pointer to an array of struct |
1427 | * pages. However, it is stored with some other magic. |
1428 | * (see sparse.c::sparse_init_one_section()) |
1429 | * |
1430 | * Additionally during early boot we encode node id of |
1431 | * the location of the section here to guide allocation. |
1432 | * (see sparse.c::memory_present()) |
1433 | * |
1434 | * Making it a UL at least makes someone do a cast |
1435 | * before using it wrong. |
1436 | */ |
1437 | unsigned long section_mem_map; |
1438 | |
1439 | struct mem_section_usage *usage; |
1440 | #ifdef CONFIG_PAGE_EXTENSION |
1441 | /* |
1442 | * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use |
1443 | * section. (see page_ext.h about this.) |
1444 | */ |
1445 | struct page_ext *page_ext; |
1446 | unsigned long pad; |
1447 | #endif |
1448 | /* |
1449 | * WARNING: mem_section must be a power-of-2 in size for the |
1450 | * calculation and use of SECTION_ROOT_MASK to make sense. |
1451 | */ |
1452 | }; |
1453 | |
1454 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1455 | #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) |
1456 | #else |
1457 | #define SECTIONS_PER_ROOT 1 |
1458 | #endif |
1459 | |
1460 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
1461 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
1462 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
1463 | |
1464 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1465 | extern struct mem_section **mem_section; |
1466 | #else |
1467 | extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; |
1468 | #endif |
1469 | |
1470 | static inline unsigned long *section_to_usemap(struct mem_section *ms) |
1471 | { |
1472 | return ms->usage->pageblock_flags; |
1473 | } |
1474 | |
1475 | static inline struct mem_section *__nr_to_section(unsigned long nr) |
1476 | { |
1477 | unsigned long root = SECTION_NR_TO_ROOT(nr); |
1478 | |
1479 | if (unlikely(root >= NR_SECTION_ROOTS)) |
1480 | return NULL; |
1481 | |
1482 | #ifdef CONFIG_SPARSEMEM_EXTREME |
1483 | if (!mem_section || !mem_section[root]) |
1484 | return NULL; |
1485 | #endif |
1486 | return &mem_section[root][nr & SECTION_ROOT_MASK]; |
1487 | } |
1488 | extern size_t mem_section_usage_size(void); |
1489 | |
1490 | /* |
1491 | * We use the lower bits of the mem_map pointer to store |
1492 | * a little bit of information. The pointer is calculated |
1493 | * as mem_map - section_nr_to_pfn(pnum). The result is |
1494 | * aligned to the minimum alignment of the two values: |
1495 | * 1. All mem_map arrays are page-aligned. |
1496 | * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT |
1497 | * lowest bits. PFN_SECTION_SHIFT is arch-specific |
1498 | * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the |
1499 | * worst combination is powerpc with 256k pages, |
1500 | * which results in PFN_SECTION_SHIFT equal 6. |
1501 | * To sum it up, at least 6 bits are available on all architectures. |
1502 | * However, we can exceed 6 bits on some other architectures except |
1503 | * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available |
1504 | * with the worst case of 64K pages on arm64) if we make sure the |
1505 | * exceeded bit is not applicable to powerpc. |
1506 | */ |
1507 | enum { |
1508 | SECTION_MARKED_PRESENT_BIT, |
1509 | SECTION_HAS_MEM_MAP_BIT, |
1510 | SECTION_IS_ONLINE_BIT, |
1511 | SECTION_IS_EARLY_BIT, |
1512 | #ifdef CONFIG_ZONE_DEVICE |
1513 | SECTION_TAINT_ZONE_DEVICE_BIT, |
1514 | #endif |
1515 | SECTION_MAP_LAST_BIT, |
1516 | }; |
1517 | |
1518 | #define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT) |
1519 | #define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT) |
1520 | #define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT) |
1521 | #define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT) |
1522 | #ifdef CONFIG_ZONE_DEVICE |
1523 | #define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) |
1524 | #endif |
1525 | #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) |
1526 | #define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT |
1527 | |
1528 | static inline struct page *__section_mem_map_addr(struct mem_section *section) |
1529 | { |
1530 | unsigned long map = section->section_mem_map; |
1531 | map &= SECTION_MAP_MASK; |
1532 | return (struct page *)map; |
1533 | } |
1534 | |
1535 | static inline int present_section(struct mem_section *section) |
1536 | { |
1537 | return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); |
1538 | } |
1539 | |
1540 | static inline int present_section_nr(unsigned long nr) |
1541 | { |
1542 | return present_section(__nr_to_section(nr)); |
1543 | } |
1544 | |
1545 | static inline int valid_section(struct mem_section *section) |
1546 | { |
1547 | return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); |
1548 | } |
1549 | |
1550 | static inline int early_section(struct mem_section *section) |
1551 | { |
1552 | return (section && (section->section_mem_map & SECTION_IS_EARLY)); |
1553 | } |
1554 | |
1555 | static inline int valid_section_nr(unsigned long nr) |
1556 | { |
1557 | return valid_section(__nr_to_section(nr)); |
1558 | } |
1559 | |
1560 | static inline int online_section(struct mem_section *section) |
1561 | { |
1562 | return (section && (section->section_mem_map & SECTION_IS_ONLINE)); |
1563 | } |
1564 | |
1565 | #ifdef CONFIG_ZONE_DEVICE |
1566 | static inline int online_device_section(struct mem_section *section) |
1567 | { |
1568 | unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; |
1569 | |
1570 | return section && ((section->section_mem_map & flags) == flags); |
1571 | } |
1572 | #else |
1573 | static inline int online_device_section(struct mem_section *section) |
1574 | { |
1575 | return 0; |
1576 | } |
1577 | #endif |
1578 | |
1579 | static inline int online_section_nr(unsigned long nr) |
1580 | { |
1581 | return online_section(__nr_to_section(nr)); |
1582 | } |
1583 | |
1584 | #ifdef CONFIG_MEMORY_HOTPLUG |
1585 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); |
1586 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); |
1587 | #endif |
1588 | |
1589 | static inline struct mem_section *__pfn_to_section(unsigned long pfn) |
1590 | { |
1591 | return __nr_to_section(pfn_to_section_nr(pfn)); |
1592 | } |
1593 | |
1594 | extern unsigned long __highest_present_section_nr; |
1595 | |
1596 | static inline int subsection_map_index(unsigned long pfn) |
1597 | { |
1598 | return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; |
1599 | } |
1600 | |
1601 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1602 | static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) |
1603 | { |
1604 | int idx = subsection_map_index(pfn); |
1605 | |
1606 | return test_bit(idx, ms->usage->subsection_map); |
1607 | } |
1608 | #else |
1609 | static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) |
1610 | { |
1611 | return 1; |
1612 | } |
1613 | #endif |
1614 | |
1615 | #ifndef CONFIG_HAVE_ARCH_PFN_VALID |
1616 | /** |
1617 | * pfn_valid - check if there is a valid memory map entry for a PFN |
1618 | * @pfn: the page frame number to check |
1619 | * |
1620 | * Check if there is a valid memory map entry aka struct page for the @pfn. |
1621 | * Note, that availability of the memory map entry does not imply that |
1622 | * there is actual usable memory at that @pfn. The struct page may |
1623 | * represent a hole or an unusable page frame. |
1624 | * |
1625 | * Return: 1 for PFNs that have memory map entries and 0 otherwise |
1626 | */ |
1627 | static inline int pfn_valid(unsigned long pfn) |
1628 | { |
1629 | struct mem_section *ms; |
1630 | |
1631 | /* |
1632 | * Ensure the upper PAGE_SHIFT bits are clear in the |
1633 | * pfn. Else it might lead to false positives when |
1634 | * some of the upper bits are set, but the lower bits |
1635 | * match a valid pfn. |
1636 | */ |
1637 | if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) |
1638 | return 0; |
1639 | |
1640 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
1641 | return 0; |
1642 | ms = __pfn_to_section(pfn); |
1643 | if (!valid_section(ms)) |
1644 | return 0; |
1645 | /* |
1646 | * Traditionally early sections always returned pfn_valid() for |
1647 | * the entire section-sized span. |
1648 | */ |
1649 | return early_section(ms) || pfn_section_valid(ms, pfn); |
1650 | } |
1651 | #endif |
1652 | |
1653 | static inline int pfn_in_present_section(unsigned long pfn) |
1654 | { |
1655 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
1656 | return 0; |
1657 | return present_section(__pfn_to_section(pfn)); |
1658 | } |
1659 | |
1660 | static inline unsigned long next_present_section_nr(unsigned long section_nr) |
1661 | { |
1662 | while (++section_nr <= __highest_present_section_nr) { |
1663 | if (present_section_nr(section_nr)) |
1664 | return section_nr; |
1665 | } |
1666 | |
1667 | return -1; |
1668 | } |
1669 | |
1670 | /* |
1671 | * These are _only_ used during initialisation, therefore they |
1672 | * can use __initdata ... They could have names to indicate |
1673 | * this restriction. |
1674 | */ |
1675 | #ifdef CONFIG_NUMA |
1676 | #define pfn_to_nid(pfn) \ |
1677 | ({ \ |
1678 | unsigned long __pfn_to_nid_pfn = (pfn); \ |
1679 | page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ |
1680 | }) |
1681 | #else |
1682 | #define pfn_to_nid(pfn) (0) |
1683 | #endif |
1684 | |
1685 | void sparse_init(void); |
1686 | #else |
1687 | #define sparse_init() do {} while (0) |
1688 | #define sparse_index_init(_sec, _nid) do {} while (0) |
1689 | #define pfn_in_present_section pfn_valid |
1690 | #define subsection_map_init(_pfn, _nr_pages) do {} while (0) |
1691 | #endif /* CONFIG_SPARSEMEM */ |
1692 | |
1693 | #endif /* !__GENERATING_BOUNDS.H */ |
1694 | #endif /* !__ASSEMBLY__ */ |
1695 | #endif /* _LINUX_MMZONE_H */ |
1696 | |