1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMZONE_H
3 #define _LINUX_MMZONE_H
4
5 #ifndef __ASSEMBLY__
6 #ifndef __GENERATING_BOUNDS_H
7
8 #include <linux/android_kabi.h>
9 #include <linux/spinlock.h>
10 #include <linux/list.h>
11 #include <linux/list_nulls.h>
12 #include <linux/wait.h>
13 #include <linux/bitops.h>
14 #include <linux/cache.h>
15 #include <linux/threads.h>
16 #include <linux/numa.h>
17 #include <linux/init.h>
18 #include <linux/seqlock.h>
19 #include <linux/nodemask.h>
20 #include <linux/pageblock-flags.h>
21 #include <linux/page-flags-layout.h>
22 #include <linux/atomic.h>
23 #include <linux/mm_types.h>
24 #include <linux/page-flags.h>
25 #include <linux/local_lock.h>
26 #include <linux/zswap.h>
27 #include <linux/android_kabi.h>
28 #include <asm/page.h>
29
30 /* Free memory management - zoned buddy allocator. */
31 #ifndef CONFIG_ARCH_FORCE_MAX_ORDER
32 #define MAX_PAGE_ORDER 10
33 #else
34 #define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
35 #endif
36 #define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
37
38 #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
39
40 #define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
41
42 /* Defines the order for the number of pages that have a migrate type. */
43 #ifndef CONFIG_PAGE_BLOCK_ORDER
44 #define PAGE_BLOCK_ORDER MAX_PAGE_ORDER
45 #else
46 #define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER
47 #endif /* CONFIG_PAGE_BLOCK_ORDER */
48
49 /*
50 * The MAX_PAGE_ORDER, which defines the max order of pages to be allocated
51 * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_ORDER,
52 * which defines the order for the number of pages that can have a migrate type
53 */
54 #if (PAGE_BLOCK_ORDER > MAX_PAGE_ORDER)
55 #error MAX_PAGE_ORDER must be >= PAGE_BLOCK_ORDER
56 #endif
57
58 /*
59 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
60 * costly to service. That is between allocation orders which should
61 * coalesce naturally under reasonable reclaim pressure and those which
62 * will not.
63 */
64 #define PAGE_ALLOC_COSTLY_ORDER 3
65
66 enum migratetype {
67 MIGRATE_UNMOVABLE,
68 MIGRATE_MOVABLE,
69 MIGRATE_RECLAIMABLE,
70 /* the number of types that have fallbacks */
71 MIGRATE_FALLBACKS,
72 #ifdef CONFIG_CMA
73 /*
74 * MIGRATE_CMA migration type is designed to mimic the way
75 * ZONE_MOVABLE works. Only movable pages can be allocated
76 * from MIGRATE_CMA pageblocks and page allocator never
77 * implicitly change migration type of MIGRATE_CMA pageblock.
78 *
79 * The way to use it is to change migratetype of a range of
80 * pageblocks to MIGRATE_CMA which can be done by
81 * __free_pageblock_cma() function.
82 */
83 MIGRATE_CMA = MIGRATE_FALLBACKS,
84 MIGRATE_PCPTYPES,
85 #else
86 /* the number of types on the pcp lists */
87 MIGRATE_PCPTYPES = MIGRATE_FALLBACKS,
88 #endif
89 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
90 #ifdef CONFIG_MEMORY_ISOLATION
91 MIGRATE_ISOLATE, /* can't allocate from here */
92 #endif
93 MIGRATE_TYPES
94 };
95
96 /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
97 extern const char * const migratetype_names[MIGRATE_TYPES];
98
99 #ifdef CONFIG_CMA
100 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
101 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
102 # define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
103 get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
104 # define get_cma_migrate_type() MIGRATE_CMA
105 #else
106 # define is_migrate_cma(migratetype) false
107 # define is_migrate_cma_page(_page) false
108 # define is_migrate_cma_folio(folio, pfn) false
109 # define get_cma_migrate_type() MIGRATE_MOVABLE
110 #endif
111
is_migrate_movable(int mt)112 static inline bool is_migrate_movable(int mt)
113 {
114 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
115 }
116
117 /*
118 * Check whether a migratetype can be merged with another migratetype.
119 *
120 * It is only mergeable when it can fall back to other migratetypes for
121 * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
122 */
migratetype_is_mergeable(int mt)123 static inline bool migratetype_is_mergeable(int mt)
124 {
125 return mt < MIGRATE_FALLBACKS;
126 }
127
128 #define for_each_migratetype_order(order, type) \
129 for (order = 0; order < NR_PAGE_ORDERS; order++) \
130 for (type = 0; type < MIGRATE_TYPES; type++)
131
132 extern int page_group_by_mobility_disabled;
133
134 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
135
136 #define get_pageblock_migratetype(page) \
137 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
138
139 #define folio_migratetype(folio) \
140 get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
141 MIGRATETYPE_MASK)
142 struct free_area {
143 struct list_head free_list[MIGRATE_TYPES];
144 unsigned long nr_free;
145 };
146
147 struct pglist_data;
148
149 #ifdef CONFIG_NUMA
150 enum numa_stat_item {
151 NUMA_HIT, /* allocated in intended node */
152 NUMA_MISS, /* allocated in non intended node */
153 NUMA_FOREIGN, /* was intended here, hit elsewhere */
154 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
155 NUMA_LOCAL, /* allocation from local node */
156 NUMA_OTHER, /* allocation from other node */
157 NR_VM_NUMA_EVENT_ITEMS
158 };
159 #else
160 #define NR_VM_NUMA_EVENT_ITEMS 0
161 #endif
162
163 enum zone_stat_item {
164 /* First 128 byte cacheline (assuming 64 bit words) */
165 NR_FREE_PAGES,
166 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
167 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
168 NR_ZONE_ACTIVE_ANON,
169 NR_ZONE_INACTIVE_FILE,
170 NR_ZONE_ACTIVE_FILE,
171 NR_ZONE_UNEVICTABLE,
172 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
173 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
174 /* Second 128 byte cacheline */
175 NR_BOUNCE,
176 NR_ZSPAGES, /* allocated in zsmalloc */
177 NR_FREE_CMA_PAGES,
178 #ifdef CONFIG_UNACCEPTED_MEMORY
179 NR_UNACCEPTED,
180 #endif
181 NR_VM_ZONE_STAT_ITEMS };
182
183 enum node_stat_item {
184 NR_LRU_BASE,
185 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
186 NR_ACTIVE_ANON, /* " " " " " */
187 NR_INACTIVE_FILE, /* " " " " " */
188 NR_ACTIVE_FILE, /* " " " " " */
189 NR_UNEVICTABLE, /* " " " " " */
190 NR_SLAB_RECLAIMABLE_B,
191 NR_SLAB_UNRECLAIMABLE_B,
192 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
193 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
194 WORKINGSET_NODES,
195 WORKINGSET_REFAULT_BASE,
196 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
197 WORKINGSET_REFAULT_FILE,
198 WORKINGSET_ACTIVATE_BASE,
199 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
200 WORKINGSET_ACTIVATE_FILE,
201 WORKINGSET_RESTORE_BASE,
202 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
203 WORKINGSET_RESTORE_FILE,
204 WORKINGSET_NODERECLAIM,
205 NR_ANON_MAPPED, /* Mapped anonymous pages */
206 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
207 only modified from process context */
208 NR_FILE_PAGES,
209 NR_FILE_DIRTY,
210 NR_WRITEBACK,
211 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
212 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
213 NR_SHMEM_THPS,
214 NR_SHMEM_PMDMAPPED,
215 NR_FILE_THPS,
216 NR_FILE_PMDMAPPED,
217 NR_ANON_THPS,
218 NR_VMSCAN_WRITE,
219 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
220 NR_DIRTIED, /* page dirtyings since bootup */
221 NR_WRITTEN, /* page writings since bootup */
222 NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
223 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
224 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
225 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
226 NR_KERNEL_STACK_KB, /* measured in KiB */
227 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
228 NR_KERNEL_SCS_KB, /* measured in KiB */
229 #endif
230 NR_PAGETABLE, /* used for pagetables */
231 NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */
232 #ifdef CONFIG_IOMMU_SUPPORT
233 NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */
234 #endif
235 #ifdef CONFIG_SWAP
236 NR_SWAPCACHE,
237 #endif
238 #ifdef CONFIG_NUMA_BALANCING
239 PGPROMOTE_SUCCESS, /* promote successfully */
240 PGPROMOTE_CANDIDATE, /* candidate pages to promote */
241 #endif
242 /* PGDEMOTE_*: pages demoted */
243 PGDEMOTE_KSWAPD,
244 PGDEMOTE_DIRECT,
245 PGDEMOTE_KHUGEPAGED,
246 NR_VM_NODE_STAT_ITEMS
247 };
248
249 /*
250 * Returns true if the item should be printed in THPs (/proc/vmstat
251 * currently prints number of anon, file and shmem THPs. But the item
252 * is charged in pages).
253 */
vmstat_item_print_in_thp(enum node_stat_item item)254 static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
255 {
256 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
257 return false;
258
259 return item == NR_ANON_THPS ||
260 item == NR_FILE_THPS ||
261 item == NR_SHMEM_THPS ||
262 item == NR_SHMEM_PMDMAPPED ||
263 item == NR_FILE_PMDMAPPED;
264 }
265
266 /*
267 * Returns true if the value is measured in bytes (most vmstat values are
268 * measured in pages). This defines the API part, the internal representation
269 * might be different.
270 */
vmstat_item_in_bytes(int idx)271 static __always_inline bool vmstat_item_in_bytes(int idx)
272 {
273 /*
274 * Global and per-node slab counters track slab pages.
275 * It's expected that changes are multiples of PAGE_SIZE.
276 * Internally values are stored in pages.
277 *
278 * Per-memcg and per-lruvec counters track memory, consumed
279 * by individual slab objects. These counters are actually
280 * byte-precise.
281 */
282 return (idx == NR_SLAB_RECLAIMABLE_B ||
283 idx == NR_SLAB_UNRECLAIMABLE_B);
284 }
285
286 /*
287 * We do arithmetic on the LRU lists in various places in the code,
288 * so it is important to keep the active lists LRU_ACTIVE higher in
289 * the array than the corresponding inactive lists, and to keep
290 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
291 *
292 * This has to be kept in sync with the statistics in zone_stat_item
293 * above and the descriptions in vmstat_text in mm/vmstat.c
294 */
295 #define LRU_BASE 0
296 #define LRU_ACTIVE 1
297 #define LRU_FILE 2
298
299 enum lru_list {
300 LRU_INACTIVE_ANON = LRU_BASE,
301 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
302 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
303 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
304 LRU_UNEVICTABLE,
305 NR_LRU_LISTS
306 };
307
308 enum vmscan_throttle_state {
309 VMSCAN_THROTTLE_WRITEBACK,
310 VMSCAN_THROTTLE_ISOLATED,
311 VMSCAN_THROTTLE_NOPROGRESS,
312 VMSCAN_THROTTLE_CONGESTED,
313 NR_VMSCAN_THROTTLE,
314 };
315
316 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
317
318 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
319
is_file_lru(enum lru_list lru)320 static inline bool is_file_lru(enum lru_list lru)
321 {
322 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
323 }
324
is_active_lru(enum lru_list lru)325 static inline bool is_active_lru(enum lru_list lru)
326 {
327 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
328 }
329
330 #define WORKINGSET_ANON 0
331 #define WORKINGSET_FILE 1
332 #define ANON_AND_FILE 2
333
334 enum lruvec_flags {
335 /*
336 * An lruvec has many dirty pages backed by a congested BDI:
337 * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
338 * It can be cleared by cgroup reclaim or kswapd.
339 * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
340 * It can only be cleared by kswapd.
341 *
342 * Essentially, kswapd can unthrottle an lruvec throttled by cgroup
343 * reclaim, but not vice versa. This only applies to the root cgroup.
344 * The goal is to prevent cgroup reclaim on the root cgroup (e.g.
345 * memory.reclaim) to unthrottle an unbalanced node (that was throttled
346 * by kswapd).
347 */
348 LRUVEC_CGROUP_CONGESTED,
349 LRUVEC_NODE_CONGESTED,
350 };
351
352 #endif /* !__GENERATING_BOUNDS_H */
353
354 /*
355 * Evictable folios are divided into multiple generations. The youngest and the
356 * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
357 * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
358 * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
359 * corresponding generation. The gen counter in folio->flags stores gen+1 while
360 * a folio is on one of lrugen->folios[]. Otherwise it stores 0.
361 *
362 * After a folio is faulted in, the aging needs to check the accessed bit at
363 * least twice before handing this folio over to the eviction. The first check
364 * clears the accessed bit from the initial fault; the second check makes sure
365 * this folio hasn't been used since then. This process, AKA second chance,
366 * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI
367 * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two
368 * generations are considered active; the rest of generations, if they exist,
369 * are considered inactive. See lru_gen_is_active().
370 *
371 * PG_active is always cleared while a folio is on one of lrugen->folios[] so
372 * that the sliding window needs not to worry about it. And it's set again when
373 * a folio considered active is isolated for non-reclaiming purposes, e.g.,
374 * migration. See lru_gen_add_folio() and lru_gen_del_folio().
375 *
376 * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
377 * number of categories of the active/inactive LRU when keeping track of
378 * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
379 * in folio->flags, masked by LRU_GEN_MASK.
380 */
381 #define MIN_NR_GENS 2U
382 #define MAX_NR_GENS 4U
383
384 /*
385 * Each generation is divided into multiple tiers. A folio accessed N times
386 * through file descriptors is in tier order_base_2(N). A folio in the first
387 * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page
388 * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by
389 * PG_workingset. A folio in any other tier (1<N<5) between the first and last
390 * is marked by additional bits of LRU_REFS_WIDTH in folio->flags.
391 *
392 * In contrast to moving across generations which requires the LRU lock, moving
393 * across tiers only involves atomic operations on folio->flags and therefore
394 * has a negligible cost in the buffered access path. In the eviction path,
395 * comparisons of refaulted/(evicted+protected) from the first tier and the rest
396 * infer whether folios accessed multiple times through file descriptors are
397 * statistically hot and thus worth protecting.
398 *
399 * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
400 * number of categories of the active/inactive LRU when keeping track of
401 * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
402 * folio->flags, masked by LRU_REFS_MASK.
403 */
404 #define MAX_NR_TIERS 4U
405
406 #ifndef __GENERATING_BOUNDS_H
407
408 #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
409 #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
410
411 /*
412 * For folios accessed multiple times through file descriptors,
413 * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags
414 * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its
415 * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily
416 * promoted into the second oldest generation in the eviction path. And when
417 * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that
418 * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is
419 * only valid when PG_referenced is set.
420 *
421 * For folios accessed multiple times through page tables, folio_update_gen()
422 * from a page table walk or lru_gen_set_refs() from a rmap walk sets
423 * PG_referenced after the accessed bit is cleared for the first time.
424 * Thereafter, those two paths set PG_workingset and promote folios to the
425 * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears
426 * PG_referenced. Note that for this case, LRU_REFS_MASK is not used.
427 *
428 * For both cases above, after PG_workingset is set on a folio, it remains until
429 * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It
430 * can be set again if lru_gen_test_recent() returns true upon a refault.
431 */
432 #define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced))
433
434 struct lruvec;
435 struct page_vma_mapped_walk;
436
437 #ifdef CONFIG_LRU_GEN
438
439 enum {
440 LRU_GEN_ANON,
441 LRU_GEN_FILE,
442 };
443
444 enum {
445 LRU_GEN_CORE,
446 LRU_GEN_MM_WALK,
447 LRU_GEN_NONLEAF_YOUNG,
448 NR_LRU_GEN_CAPS
449 };
450
451 #define MIN_LRU_BATCH BITS_PER_LONG
452 #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
453
454 /* whether to keep historical stats from evicted generations */
455 #ifdef CONFIG_LRU_GEN_STATS
456 #define NR_HIST_GENS MAX_NR_GENS
457 #else
458 #define NR_HIST_GENS 1U
459 #endif
460
461 /*
462 * The youngest generation number is stored in max_seq for both anon and file
463 * types as they are aged on an equal footing. The oldest generation numbers are
464 * stored in min_seq[] separately for anon and file types so that they can be
465 * incremented independently. Ideally min_seq[] are kept in sync when both anon
466 * and file types are evictable. However, to adapt to situations like extreme
467 * swappiness, they are allowed to be out of sync by at most
468 * MAX_NR_GENS-MIN_NR_GENS-1.
469 *
470 * The number of pages in each generation is eventually consistent and therefore
471 * can be transiently negative when reset_batch_size() is pending.
472 */
473 struct lru_gen_folio {
474 /* the aging increments the youngest generation number */
475 unsigned long max_seq;
476 /* the eviction increments the oldest generation numbers */
477 unsigned long min_seq[ANON_AND_FILE];
478 /* the birth time of each generation in jiffies */
479 unsigned long timestamps[MAX_NR_GENS];
480 /* the multi-gen LRU lists, lazily sorted on eviction */
481 struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
482 /* the multi-gen LRU sizes, eventually consistent */
483 long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
484 /* the exponential moving average of refaulted */
485 unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
486 /* the exponential moving average of evicted+protected */
487 unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
488 /* can only be modified under the LRU lock */
489 unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
490 /* can be modified without holding the LRU lock */
491 atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
492 atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
493 /* whether the multi-gen LRU is enabled */
494 bool enabled;
495 /* the memcg generation this lru_gen_folio belongs to */
496 u8 gen;
497 /* the list segment this lru_gen_folio belongs to */
498 u8 seg;
499 /* per-node lru_gen_folio list for global reclaim */
500 struct hlist_nulls_node list;
501
502 ANDROID_KABI_RESERVE(1);
503 ANDROID_KABI_RESERVE(2);
504 ANDROID_OEM_DATA_ARRAY(1, 6);
505 };
506
507 enum {
508 MM_LEAF_TOTAL, /* total leaf entries */
509 MM_LEAF_YOUNG, /* young leaf entries */
510 MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
511 MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
512 NR_MM_STATS
513 };
514
515 /* double-buffering Bloom filters */
516 #define NR_BLOOM_FILTERS 2
517
518 struct lru_gen_mm_state {
519 /* synced with max_seq after each iteration */
520 unsigned long seq;
521 /* where the current iteration continues after */
522 struct list_head *head;
523 /* where the last iteration ended before */
524 struct list_head *tail;
525 /* Bloom filters flip after each iteration */
526 unsigned long *filters[NR_BLOOM_FILTERS];
527 /* the mm stats for debugging */
528 unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
529
530 ANDROID_KABI_RESERVE(1);
531 };
532
533 struct lru_gen_mm_walk {
534 /* the lruvec under reclaim */
535 struct lruvec *lruvec;
536 /* max_seq from lru_gen_folio: can be out of date */
537 unsigned long seq;
538 /* the next address within an mm to scan */
539 unsigned long next_addr;
540 /* to batch promoted pages */
541 int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
542 /* to batch the mm stats */
543 int mm_stats[NR_MM_STATS];
544 /* total batched items */
545 int batched;
546 int swappiness;
547 bool force_scan;
548
549 ANDROID_KABI_RESERVE(1);
550 };
551
552 /*
553 * For each node, memcgs are divided into two generations: the old and the
554 * young. For each generation, memcgs are randomly sharded into multiple bins
555 * to improve scalability. For each bin, the hlist_nulls is virtually divided
556 * into three segments: the head, the tail and the default.
557 *
558 * An onlining memcg is added to the tail of a random bin in the old generation.
559 * The eviction starts at the head of a random bin in the old generation. The
560 * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
561 * the old generation, is incremented when all its bins become empty.
562 *
563 * There are four operations:
564 * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
565 * current generation (old or young) and updates its "seg" to "head";
566 * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
567 * current generation (old or young) and updates its "seg" to "tail";
568 * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
569 * generation, updates its "gen" to "old" and resets its "seg" to "default";
570 * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
571 * young generation, updates its "gen" to "young" and resets its "seg" to
572 * "default".
573 *
574 * The events that trigger the above operations are:
575 * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
576 * 2. The first attempt to reclaim a memcg below low, which triggers
577 * MEMCG_LRU_TAIL;
578 * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
579 * threshold, which triggers MEMCG_LRU_TAIL;
580 * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
581 * threshold, which triggers MEMCG_LRU_YOUNG;
582 * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
583 * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
584 * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
585 *
586 * Notes:
587 * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
588 * of their max_seq counters ensures the eventual fairness to all eligible
589 * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
590 * 2. There are only two valid generations: old (seq) and young (seq+1).
591 * MEMCG_NR_GENS is set to three so that when reading the generation counter
592 * locklessly, a stale value (seq-1) does not wraparound to young.
593 */
594 #define MEMCG_NR_GENS 3
595 #define MEMCG_NR_BINS 8
596
597 struct lru_gen_memcg {
598 /* the per-node memcg generation counter */
599 unsigned long seq;
600 /* each memcg has one lru_gen_folio per node */
601 unsigned long nr_memcgs[MEMCG_NR_GENS];
602 /* per-node lru_gen_folio list for global reclaim */
603 struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
604 /* protects the above */
605 spinlock_t lock;
606 };
607
608 void lru_gen_init_pgdat(struct pglist_data *pgdat);
609 void lru_gen_init_lruvec(struct lruvec *lruvec);
610 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
611
612 void lru_gen_init_memcg(struct mem_cgroup *memcg);
613 void lru_gen_exit_memcg(struct mem_cgroup *memcg);
614 void lru_gen_online_memcg(struct mem_cgroup *memcg);
615 void lru_gen_offline_memcg(struct mem_cgroup *memcg);
616 void lru_gen_release_memcg(struct mem_cgroup *memcg);
617 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
618
619 #else /* !CONFIG_LRU_GEN */
620
lru_gen_init_pgdat(struct pglist_data * pgdat)621 static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
622 {
623 }
624
lru_gen_init_lruvec(struct lruvec * lruvec)625 static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
626 {
627 }
628
lru_gen_look_around(struct page_vma_mapped_walk * pvmw)629 static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
630 {
631 return false;
632 }
633
lru_gen_init_memcg(struct mem_cgroup * memcg)634 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
635 {
636 }
637
lru_gen_exit_memcg(struct mem_cgroup * memcg)638 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
639 {
640 }
641
lru_gen_online_memcg(struct mem_cgroup * memcg)642 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
643 {
644 }
645
lru_gen_offline_memcg(struct mem_cgroup * memcg)646 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
647 {
648 }
649
lru_gen_release_memcg(struct mem_cgroup * memcg)650 static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
651 {
652 }
653
lru_gen_soft_reclaim(struct mem_cgroup * memcg,int nid)654 static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
655 {
656 }
657
658 #endif /* CONFIG_LRU_GEN */
659
660 struct lruvec {
661 struct list_head lists[NR_LRU_LISTS];
662 /* per lruvec lru_lock for memcg */
663 spinlock_t lru_lock;
664 /*
665 * These track the cost of reclaiming one LRU - file or anon -
666 * over the other. As the observed cost of reclaiming one LRU
667 * increases, the reclaim scan balance tips toward the other.
668 */
669 unsigned long anon_cost;
670 unsigned long file_cost;
671 /* Non-resident age, driven by LRU movement */
672 atomic_long_t nonresident_age;
673 /* Refaults at the time of last reclaim cycle */
674 unsigned long refaults[ANON_AND_FILE];
675 /* Various lruvec state flags (enum lruvec_flags) */
676 unsigned long flags;
677 #ifdef CONFIG_LRU_GEN
678 /* evictable pages divided into generations */
679 struct lru_gen_folio lrugen;
680 #ifdef CONFIG_LRU_GEN_WALKS_MMU
681 /* to concurrently iterate lru_gen_mm_list */
682 struct lru_gen_mm_state mm_state;
683 #endif
684 #endif /* CONFIG_LRU_GEN */
685 #ifdef CONFIG_MEMCG
686 struct pglist_data *pgdat;
687 #endif
688 struct zswap_lruvec_state zswap_lruvec_state;
689
690 ANDROID_BACKPORT_RESERVE(1);
691 };
692
693 /* Isolate for asynchronous migration */
694 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
695 /* Isolate unevictable pages */
696 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
697
698 /* LRU Isolation modes. */
699 typedef unsigned __bitwise isolate_mode_t;
700
701 enum zone_watermarks {
702 WMARK_MIN,
703 WMARK_LOW,
704 WMARK_HIGH,
705 WMARK_PROMO,
706 NR_WMARK
707 };
708
709 /*
710 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
711 * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
712 * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
713 */
714 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
715 #define NR_PCP_THP 2
716 #else
717 #define NR_PCP_THP 0
718 #endif
719 #define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
720 #define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
721
722 /*
723 * Flags used in pcp->flags field.
724 *
725 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
726 * previous page freeing. To avoid to drain PCP for an accident
727 * high-order page freeing.
728 *
729 * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
730 * draining PCP for consecutive high-order pages freeing without
731 * allocation if data cache slice of CPU is large enough. To reduce
732 * zone lock contention and keep cache-hot pages reusing.
733 */
734 #define PCPF_PREV_FREE_HIGH_ORDER BIT(0)
735 #define PCPF_FREE_HIGH_BATCH BIT(1)
736
737 struct per_cpu_pages {
738 spinlock_t lock; /* Protects lists field */
739 int count; /* number of pages in the list */
740 int high; /* high watermark, emptying needed */
741 int high_min; /* min high watermark */
742 int high_max; /* max high watermark */
743 int batch; /* chunk size for buddy add/remove */
744 u8 flags; /* protected by pcp->lock */
745 u8 alloc_factor; /* batch scaling factor during allocate */
746 #ifdef CONFIG_NUMA
747 u8 expire; /* When 0, remote pagesets are drained */
748 #endif
749 short free_count; /* consecutive free count */
750
751 /* Lists of pages, one per migrate type stored on the pcp-lists */
752 struct list_head lists[NR_PCP_LISTS];
753 } ____cacheline_aligned_in_smp;
754
755 struct per_cpu_zonestat {
756 #ifdef CONFIG_SMP
757 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
758 s8 stat_threshold;
759 #endif
760 #ifdef CONFIG_NUMA
761 /*
762 * Low priority inaccurate counters that are only folded
763 * on demand. Use a large type to avoid the overhead of
764 * folding during refresh_cpu_vm_stats.
765 */
766 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
767 #endif
768 };
769
770 struct per_cpu_nodestat {
771 s8 stat_threshold;
772 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
773 };
774
775 #endif /* !__GENERATING_BOUNDS.H */
776
777 enum zone_type {
778 /*
779 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
780 * to DMA to all of the addressable memory (ZONE_NORMAL).
781 * On architectures where this area covers the whole 32 bit address
782 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
783 * DMA addressing constraints. This distinction is important as a 32bit
784 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
785 * platforms may need both zones as they support peripherals with
786 * different DMA addressing limitations.
787 */
788 #ifdef CONFIG_ZONE_DMA
789 ZONE_DMA,
790 #endif
791 #ifdef CONFIG_ZONE_DMA32
792 ZONE_DMA32,
793 #endif
794 /*
795 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
796 * performed on pages in ZONE_NORMAL if the DMA devices support
797 * transfers to all addressable memory.
798 */
799 ZONE_NORMAL,
800 #ifdef CONFIG_HIGHMEM
801 /*
802 * A memory area that is only addressable by the kernel through
803 * mapping portions into its own address space. This is for example
804 * used by i386 to allow the kernel to address the memory beyond
805 * 900MB. The kernel will set up special mappings (page
806 * table entries on i386) for each page that the kernel needs to
807 * access.
808 */
809 ZONE_HIGHMEM,
810 #endif
811 /*
812 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
813 * movable pages with few exceptional cases described below. Main use
814 * cases for ZONE_MOVABLE are to make memory offlining/unplug more
815 * likely to succeed, and to locally limit unmovable allocations - e.g.,
816 * to increase the number of THP/huge pages. Notable special cases are:
817 *
818 * 1. Pinned pages: (long-term) pinning of movable pages might
819 * essentially turn such pages unmovable. Therefore, we do not allow
820 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
821 * faulted, they come from the right zone right away. However, it is
822 * still possible that address space already has pages in
823 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
824 * touches that memory before pinning). In such case we migrate them
825 * to a different zone. When migration fails - pinning fails.
826 * 2. memblock allocations: kernelcore/movablecore setups might create
827 * situations where ZONE_MOVABLE contains unmovable allocations
828 * after boot. Memory offlining and allocations fail early.
829 * 3. Memory holes: kernelcore/movablecore setups might create very rare
830 * situations where ZONE_MOVABLE contains memory holes after boot,
831 * for example, if we have sections that are only partially
832 * populated. Memory offlining and allocations fail early.
833 * 4. PG_hwpoison pages: while poisoned pages can be skipped during
834 * memory offlining, such pages cannot be allocated.
835 * 5. Unmovable PG_offline pages: in paravirtualized environments,
836 * hotplugged memory blocks might only partially be managed by the
837 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
838 * parts not manged by the buddy are unmovable PG_offline pages. In
839 * some cases (virtio-mem), such pages can be skipped during
840 * memory offlining, however, cannot be moved/allocated. These
841 * techniques might use alloc_contig_range() to hide previously
842 * exposed pages from the buddy again (e.g., to implement some sort
843 * of memory unplug in virtio-mem).
844 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
845 * situations where ZERO_PAGE(0) which is allocated differently
846 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
847 * cannot be migrated.
848 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
849 * memory to the MOVABLE zone, the vmemmap pages are also placed in
850 * such zone. Such pages cannot be really moved around as they are
851 * self-stored in the range, but they are treated as movable when
852 * the range they describe is about to be offlined.
853 *
854 * In general, no unmovable allocations that degrade memory offlining
855 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
856 * have to expect that migrating pages in ZONE_MOVABLE can fail (even
857 * if has_unmovable_pages() states that there are no unmovable pages,
858 * there can be false negatives).
859 */
860 ZONE_MOVABLE,
861 #ifdef CONFIG_ZONE_DEVICE
862 ZONE_DEVICE,
863 #endif
864 __MAX_NR_ZONES
865
866 };
867
868 #ifndef __GENERATING_BOUNDS_H
869
870 #define ASYNC_AND_SYNC 2
871
872 struct zone {
873 /* Read-mostly fields */
874
875 /* zone watermarks, access with *_wmark_pages(zone) macros */
876 unsigned long _watermark[NR_WMARK];
877 unsigned long watermark_boost;
878
879 unsigned long nr_reserved_highatomic;
880 unsigned long nr_free_highatomic;
881
882 /*
883 * We don't know if the memory that we're going to allocate will be
884 * freeable or/and it will be released eventually, so to avoid totally
885 * wasting several GB of ram we must reserve some of the lower zone
886 * memory (otherwise we risk to run OOM on the lower zones despite
887 * there being tons of freeable ram on the higher zones). This array is
888 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
889 * changes.
890 */
891 long lowmem_reserve[MAX_NR_ZONES];
892
893 #ifdef CONFIG_NUMA
894 int node;
895 #endif
896 struct pglist_data *zone_pgdat;
897 struct per_cpu_pages __percpu *per_cpu_pageset;
898 struct per_cpu_zonestat __percpu *per_cpu_zonestats;
899 /*
900 * the high and batch values are copied to individual pagesets for
901 * faster access
902 */
903 int pageset_high_min;
904 int pageset_high_max;
905 int pageset_batch;
906
907 #ifndef CONFIG_SPARSEMEM
908 /*
909 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
910 * In SPARSEMEM, this map is stored in struct mem_section
911 */
912 unsigned long *pageblock_flags;
913 #endif /* CONFIG_SPARSEMEM */
914
915 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
916 unsigned long zone_start_pfn;
917
918 /*
919 * spanned_pages is the total pages spanned by the zone, including
920 * holes, which is calculated as:
921 * spanned_pages = zone_end_pfn - zone_start_pfn;
922 *
923 * present_pages is physical pages existing within the zone, which
924 * is calculated as:
925 * present_pages = spanned_pages - absent_pages(pages in holes);
926 *
927 * present_early_pages is present pages existing within the zone
928 * located on memory available since early boot, excluding hotplugged
929 * memory.
930 *
931 * managed_pages is present pages managed by the buddy system, which
932 * is calculated as (reserved_pages includes pages allocated by the
933 * bootmem allocator):
934 * managed_pages = present_pages - reserved_pages;
935 *
936 * cma pages is present pages that are assigned for CMA use
937 * (MIGRATE_CMA).
938 *
939 * So present_pages may be used by memory hotplug or memory power
940 * management logic to figure out unmanaged pages by checking
941 * (present_pages - managed_pages). And managed_pages should be used
942 * by page allocator and vm scanner to calculate all kinds of watermarks
943 * and thresholds.
944 *
945 * Locking rules:
946 *
947 * zone_start_pfn and spanned_pages are protected by span_seqlock.
948 * It is a seqlock because it has to be read outside of zone->lock,
949 * and it is done in the main allocator path. But, it is written
950 * quite infrequently.
951 *
952 * The span_seq lock is declared along with zone->lock because it is
953 * frequently read in proximity to zone->lock. It's good to
954 * give them a chance of being in the same cacheline.
955 *
956 * Write access to present_pages at runtime should be protected by
957 * mem_hotplug_begin/done(). Any reader who can't tolerant drift of
958 * present_pages should use get_online_mems() to get a stable value.
959 */
960 atomic_long_t managed_pages;
961 unsigned long spanned_pages;
962 unsigned long present_pages;
963 #if defined(CONFIG_MEMORY_HOTPLUG)
964 unsigned long present_early_pages;
965 #endif
966 #ifdef CONFIG_CMA
967 unsigned long cma_pages;
968 #endif
969
970 const char *name;
971
972 #ifdef CONFIG_MEMORY_ISOLATION
973 /*
974 * Number of isolated pageblock. It is used to solve incorrect
975 * freepage counting problem due to racy retrieving migratetype
976 * of pageblock. Protected by zone->lock.
977 */
978 unsigned long nr_isolate_pageblock;
979 #endif
980
981 #ifdef CONFIG_MEMORY_HOTPLUG
982 /* see spanned/present_pages for more description */
983 seqlock_t span_seqlock;
984 #endif
985
986 int initialized;
987
988 /* Write-intensive fields used from the page allocator */
989 CACHELINE_PADDING(_pad1_);
990
991 /* free areas of different sizes */
992 struct free_area free_area[NR_PAGE_ORDERS];
993
994 #ifdef CONFIG_UNACCEPTED_MEMORY
995 /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
996 struct list_head unaccepted_pages;
997 #endif
998
999 /* zone flags, see below */
1000 unsigned long flags;
1001
1002 /* Primarily protects free_area */
1003 spinlock_t lock;
1004
1005 /* Write-intensive fields used by compaction and vmstats. */
1006 CACHELINE_PADDING(_pad2_);
1007
1008 /*
1009 * When free pages are below this point, additional steps are taken
1010 * when reading the number of free pages to avoid per-cpu counter
1011 * drift allowing watermarks to be breached
1012 */
1013 unsigned long percpu_drift_mark;
1014
1015 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
1016 /* pfn where compaction free scanner should start */
1017 unsigned long compact_cached_free_pfn;
1018 /* pfn where compaction migration scanner should start */
1019 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
1020 unsigned long compact_init_migrate_pfn;
1021 unsigned long compact_init_free_pfn;
1022 #endif
1023
1024 #ifdef CONFIG_COMPACTION
1025 /*
1026 * On compaction failure, 1<<compact_defer_shift compactions
1027 * are skipped before trying again. The number attempted since
1028 * last failure is tracked with compact_considered.
1029 * compact_order_failed is the minimum compaction failed order.
1030 */
1031 unsigned int compact_considered;
1032 unsigned int compact_defer_shift;
1033 int compact_order_failed;
1034 #endif
1035
1036 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
1037 /* Set to true when the PG_migrate_skip bits should be cleared */
1038 bool compact_blockskip_flush;
1039 #endif
1040
1041 bool contiguous;
1042
1043 CACHELINE_PADDING(_pad3_);
1044 /* Zone statistics */
1045 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
1046 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
1047 } ____cacheline_internodealigned_in_smp;
1048
1049 enum pgdat_flags {
1050 PGDAT_DIRTY, /* reclaim scanning has recently found
1051 * many dirty file pages at the tail
1052 * of the LRU.
1053 */
1054 PGDAT_WRITEBACK, /* reclaim scanning has recently found
1055 * many pages under writeback
1056 */
1057 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
1058 };
1059
1060 enum zone_flags {
1061 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
1062 * Cleared when kswapd is woken.
1063 */
1064 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
1065 ZONE_BELOW_HIGH, /* zone is below high watermark. */
1066 };
1067
wmark_pages(const struct zone * z,enum zone_watermarks w)1068 static inline unsigned long wmark_pages(const struct zone *z,
1069 enum zone_watermarks w)
1070 {
1071 return z->_watermark[w] + z->watermark_boost;
1072 }
1073
min_wmark_pages(const struct zone * z)1074 static inline unsigned long min_wmark_pages(const struct zone *z)
1075 {
1076 return wmark_pages(z, WMARK_MIN);
1077 }
1078
low_wmark_pages(const struct zone * z)1079 static inline unsigned long low_wmark_pages(const struct zone *z)
1080 {
1081 return wmark_pages(z, WMARK_LOW);
1082 }
1083
high_wmark_pages(const struct zone * z)1084 static inline unsigned long high_wmark_pages(const struct zone *z)
1085 {
1086 return wmark_pages(z, WMARK_HIGH);
1087 }
1088
promo_wmark_pages(const struct zone * z)1089 static inline unsigned long promo_wmark_pages(const struct zone *z)
1090 {
1091 return wmark_pages(z, WMARK_PROMO);
1092 }
1093
zone_managed_pages(struct zone * zone)1094 static inline unsigned long zone_managed_pages(struct zone *zone)
1095 {
1096 return (unsigned long)atomic_long_read(&zone->managed_pages);
1097 }
1098
zone_cma_pages(struct zone * zone)1099 static inline unsigned long zone_cma_pages(struct zone *zone)
1100 {
1101 #ifdef CONFIG_CMA
1102 return zone->cma_pages;
1103 #else
1104 return 0;
1105 #endif
1106 }
1107
zone_end_pfn(const struct zone * zone)1108 static inline unsigned long zone_end_pfn(const struct zone *zone)
1109 {
1110 return zone->zone_start_pfn + zone->spanned_pages;
1111 }
1112
zone_spans_pfn(const struct zone * zone,unsigned long pfn)1113 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
1114 {
1115 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
1116 }
1117
zone_is_initialized(struct zone * zone)1118 static inline bool zone_is_initialized(struct zone *zone)
1119 {
1120 return zone->initialized;
1121 }
1122
zone_is_empty(struct zone * zone)1123 static inline bool zone_is_empty(struct zone *zone)
1124 {
1125 return zone->spanned_pages == 0;
1126 }
1127
1128 #ifndef BUILD_VDSO32_64
1129 /*
1130 * The zone field is never updated after free_area_init_core()
1131 * sets it, so none of the operations on it need to be atomic.
1132 */
1133
1134 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
1135 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1136 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
1137 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
1138 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
1139 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1140 #define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
1141 #define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
1142
1143 /*
1144 * Define the bit shifts to access each section. For non-existent
1145 * sections we define the shift as 0; that plus a 0 mask ensures
1146 * the compiler will optimise away reference to them.
1147 */
1148 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
1149 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
1150 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
1151 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
1152 #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
1153
1154 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
1155 #ifdef NODE_NOT_IN_PAGE_FLAGS
1156 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
1157 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
1158 SECTIONS_PGOFF : ZONES_PGOFF)
1159 #else
1160 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
1161 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
1162 NODES_PGOFF : ZONES_PGOFF)
1163 #endif
1164
1165 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
1166
1167 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
1168 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
1169 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
1170 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
1171 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
1172 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
1173
page_zonenum(const struct page * page)1174 static inline enum zone_type page_zonenum(const struct page *page)
1175 {
1176 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
1177 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1178 }
1179
folio_zonenum(const struct folio * folio)1180 static inline enum zone_type folio_zonenum(const struct folio *folio)
1181 {
1182 return page_zonenum(&folio->page);
1183 }
1184
1185 #ifdef CONFIG_ZONE_DEVICE
is_zone_device_page(const struct page * page)1186 static inline bool is_zone_device_page(const struct page *page)
1187 {
1188 return page_zonenum(page) == ZONE_DEVICE;
1189 }
1190
1191 /*
1192 * Consecutive zone device pages should not be merged into the same sgl
1193 * or bvec segment with other types of pages or if they belong to different
1194 * pgmaps. Otherwise getting the pgmap of a given segment is not possible
1195 * without scanning the entire segment. This helper returns true either if
1196 * both pages are not zone device pages or both pages are zone device pages
1197 * with the same pgmap.
1198 */
zone_device_pages_have_same_pgmap(const struct page * a,const struct page * b)1199 static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
1200 const struct page *b)
1201 {
1202 if (is_zone_device_page(a) != is_zone_device_page(b))
1203 return false;
1204 if (!is_zone_device_page(a))
1205 return true;
1206 return a->pgmap == b->pgmap;
1207 }
1208
1209 extern void memmap_init_zone_device(struct zone *, unsigned long,
1210 unsigned long, struct dev_pagemap *);
1211 #else
is_zone_device_page(const struct page * page)1212 static inline bool is_zone_device_page(const struct page *page)
1213 {
1214 return false;
1215 }
zone_device_pages_have_same_pgmap(const struct page * a,const struct page * b)1216 static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
1217 const struct page *b)
1218 {
1219 return true;
1220 }
1221 #endif
1222
folio_is_zone_device(const struct folio * folio)1223 static inline bool folio_is_zone_device(const struct folio *folio)
1224 {
1225 return is_zone_device_page(&folio->page);
1226 }
1227
is_zone_movable_page(const struct page * page)1228 static inline bool is_zone_movable_page(const struct page *page)
1229 {
1230 return page_zonenum(page) == ZONE_MOVABLE;
1231 }
1232
folio_is_zone_movable(const struct folio * folio)1233 static inline bool folio_is_zone_movable(const struct folio *folio)
1234 {
1235 return folio_zonenum(folio) == ZONE_MOVABLE;
1236 }
1237 #endif
1238
1239 /*
1240 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
1241 * intersection with the given zone
1242 */
zone_intersects(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)1243 static inline bool zone_intersects(struct zone *zone,
1244 unsigned long start_pfn, unsigned long nr_pages)
1245 {
1246 if (zone_is_empty(zone))
1247 return false;
1248 if (start_pfn >= zone_end_pfn(zone) ||
1249 start_pfn + nr_pages <= zone->zone_start_pfn)
1250 return false;
1251
1252 return true;
1253 }
1254
1255 /*
1256 * The "priority" of VM scanning is how much of the queues we will scan in one
1257 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
1258 * queues ("queue_length >> 12") during an aging round.
1259 */
1260 #define DEF_PRIORITY 12
1261
1262 /* Maximum number of zones on a zonelist */
1263 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
1264
1265 enum {
1266 ZONELIST_FALLBACK, /* zonelist with fallback */
1267 #ifdef CONFIG_NUMA
1268 /*
1269 * The NUMA zonelists are doubled because we need zonelists that
1270 * restrict the allocations to a single node for __GFP_THISNODE.
1271 */
1272 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */
1273 #endif
1274 MAX_ZONELISTS
1275 };
1276
1277 /*
1278 * This struct contains information about a zone in a zonelist. It is stored
1279 * here to avoid dereferences into large structures and lookups of tables
1280 */
1281 struct zoneref {
1282 struct zone *zone; /* Pointer to actual zone */
1283 int zone_idx; /* zone_idx(zoneref->zone) */
1284 };
1285
1286 /*
1287 * One allocation request operates on a zonelist. A zonelist
1288 * is a list of zones, the first one is the 'goal' of the
1289 * allocation, the other zones are fallback zones, in decreasing
1290 * priority.
1291 *
1292 * To speed the reading of the zonelist, the zonerefs contain the zone index
1293 * of the entry being read. Helper functions to access information given
1294 * a struct zoneref are
1295 *
1296 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
1297 * zonelist_zone_idx() - Return the index of the zone for an entry
1298 * zonelist_node_idx() - Return the index of the node for an entry
1299 */
1300 struct zonelist {
1301 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
1302 };
1303
1304 /*
1305 * The array of struct pages for flatmem.
1306 * It must be declared for SPARSEMEM as well because there are configurations
1307 * that rely on that.
1308 */
1309 extern struct page *mem_map;
1310
1311 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1312 struct deferred_split {
1313 spinlock_t split_queue_lock;
1314 struct list_head split_queue;
1315 unsigned long split_queue_len;
1316 };
1317 #endif
1318
1319 #ifdef CONFIG_MEMORY_FAILURE
1320 /*
1321 * Per NUMA node memory failure handling statistics.
1322 */
1323 struct memory_failure_stats {
1324 /*
1325 * Number of raw pages poisoned.
1326 * Cases not accounted: memory outside kernel control, offline page,
1327 * arch-specific memory_failure (SGX), hwpoison_filter() filtered
1328 * error events, and unpoison actions from hwpoison_unpoison.
1329 */
1330 unsigned long total;
1331 /*
1332 * Recovery results of poisoned raw pages handled by memory_failure,
1333 * in sync with mf_result.
1334 * total = ignored + failed + delayed + recovered.
1335 * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted.
1336 */
1337 unsigned long ignored;
1338 unsigned long failed;
1339 unsigned long delayed;
1340 unsigned long recovered;
1341 };
1342 #endif
1343
1344 /*
1345 * On NUMA machines, each NUMA node would have a pg_data_t to describe
1346 * it's memory layout. On UMA machines there is a single pglist_data which
1347 * describes the whole memory.
1348 *
1349 * Memory statistics and page replacement data structures are maintained on a
1350 * per-zone basis.
1351 */
1352 typedef struct pglist_data {
1353 /*
1354 * node_zones contains just the zones for THIS node. Not all of the
1355 * zones may be populated, but it is the full list. It is referenced by
1356 * this node's node_zonelists as well as other node's node_zonelists.
1357 */
1358 struct zone node_zones[MAX_NR_ZONES];
1359
1360 /*
1361 * node_zonelists contains references to all zones in all nodes.
1362 * Generally the first zones will be references to this node's
1363 * node_zones.
1364 */
1365 struct zonelist node_zonelists[MAX_ZONELISTS];
1366
1367 int nr_zones; /* number of populated zones in this node */
1368 #ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
1369 struct page *node_mem_map;
1370 #ifdef CONFIG_PAGE_EXTENSION
1371 struct page_ext *node_page_ext;
1372 #endif
1373 #endif
1374 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
1375 /*
1376 * Must be held any time you expect node_start_pfn,
1377 * node_present_pages, node_spanned_pages or nr_zones to stay constant.
1378 * Also synchronizes pgdat->first_deferred_pfn during deferred page
1379 * init.
1380 *
1381 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
1382 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
1383 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
1384 *
1385 * Nests above zone->lock and zone->span_seqlock
1386 */
1387 spinlock_t node_size_lock;
1388 #endif
1389 unsigned long node_start_pfn;
1390 unsigned long node_present_pages; /* total number of physical pages */
1391 unsigned long node_spanned_pages; /* total size of physical page
1392 range, including holes */
1393 int node_id;
1394 wait_queue_head_t kswapd_wait;
1395 wait_queue_head_t pfmemalloc_wait;
1396
1397 /* workqueues for throttling reclaim for different reasons. */
1398 wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
1399
1400 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
1401 unsigned long nr_reclaim_start; /* nr pages written while throttled
1402 * when throttling started. */
1403 #ifdef CONFIG_MEMORY_HOTPLUG
1404 struct mutex kswapd_lock;
1405 #endif
1406 struct task_struct *kswapd; /* Protected by kswapd_lock */
1407 int kswapd_order;
1408 enum zone_type kswapd_highest_zoneidx;
1409
1410 int kswapd_failures; /* Number of 'reclaimed == 0' runs */
1411
1412 #ifdef CONFIG_COMPACTION
1413 int kcompactd_max_order;
1414 enum zone_type kcompactd_highest_zoneidx;
1415 wait_queue_head_t kcompactd_wait;
1416 struct task_struct *kcompactd;
1417 bool proactive_compact_trigger;
1418 #endif
1419 /*
1420 * This is a per-node reserve of pages that are not available
1421 * to userspace allocations.
1422 */
1423 unsigned long totalreserve_pages;
1424
1425 #ifdef CONFIG_NUMA
1426 /*
1427 * node reclaim becomes active if more unmapped pages exist.
1428 */
1429 unsigned long min_unmapped_pages;
1430 unsigned long min_slab_pages;
1431 #endif /* CONFIG_NUMA */
1432
1433 /* Write-intensive fields used by page reclaim */
1434 CACHELINE_PADDING(_pad1_);
1435
1436 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1437 /*
1438 * If memory initialisation on large machines is deferred then this
1439 * is the first PFN that needs to be initialised.
1440 */
1441 unsigned long first_deferred_pfn;
1442 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1443
1444 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1445 struct deferred_split deferred_split_queue;
1446 #endif
1447
1448 #ifdef CONFIG_NUMA_BALANCING
1449 /* start time in ms of current promote rate limit period */
1450 unsigned int nbp_rl_start;
1451 /* number of promote candidate pages at start time of current rate limit period */
1452 unsigned long nbp_rl_nr_cand;
1453 /* promote threshold in ms */
1454 unsigned int nbp_threshold;
1455 /* start time in ms of current promote threshold adjustment period */
1456 unsigned int nbp_th_start;
1457 /*
1458 * number of promote candidate pages at start time of current promote
1459 * threshold adjustment period
1460 */
1461 unsigned long nbp_th_nr_cand;
1462 #endif
1463 /* Fields commonly accessed by the page reclaim scanner */
1464
1465 /*
1466 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
1467 *
1468 * Use mem_cgroup_lruvec() to look up lruvecs.
1469 */
1470 struct lruvec __lruvec;
1471
1472 unsigned long flags;
1473
1474 #ifdef CONFIG_LRU_GEN
1475 /* kswap mm walk data */
1476 struct lru_gen_mm_walk mm_walk;
1477 /* lru_gen_folio list */
1478 struct lru_gen_memcg memcg_lru;
1479 #endif
1480
1481 CACHELINE_PADDING(_pad2_);
1482
1483 /* Per-node vmstats */
1484 struct per_cpu_nodestat __percpu *per_cpu_nodestats;
1485 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
1486 #ifdef CONFIG_NUMA
1487 struct memory_tier __rcu *memtier;
1488 #endif
1489 #ifdef CONFIG_MEMORY_FAILURE
1490 struct memory_failure_stats mf_stats;
1491 #endif
1492
1493 ANDROID_KABI_RESERVE(1);
1494 ANDROID_BACKPORT_RESERVE(1);
1495 ANDROID_OEM_DATA(1);
1496 } pg_data_t;
1497
1498 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
1499 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
1500
1501 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
1502 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
1503
pgdat_end_pfn(pg_data_t * pgdat)1504 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
1505 {
1506 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
1507 }
1508
1509 #include <linux/memory_hotplug.h>
1510
1511 void build_all_zonelists(pg_data_t *pgdat);
1512 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
1513 enum zone_type highest_zoneidx);
1514 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1515 int highest_zoneidx, unsigned int alloc_flags,
1516 long free_pages);
1517 bool zone_watermark_ok(struct zone *z, unsigned int order,
1518 unsigned long mark, int highest_zoneidx,
1519 unsigned int alloc_flags);
1520 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
1521 unsigned long mark, int highest_zoneidx);
1522 /*
1523 * Memory initialization context, use to differentiate memory added by
1524 * the platform statically or via memory hotplug interface.
1525 */
1526 enum meminit_context {
1527 MEMINIT_EARLY,
1528 MEMINIT_HOTPLUG,
1529 };
1530
1531 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
1532 unsigned long size);
1533
1534 extern void lruvec_init(struct lruvec *lruvec);
1535
lruvec_pgdat(struct lruvec * lruvec)1536 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
1537 {
1538 #ifdef CONFIG_MEMCG
1539 return lruvec->pgdat;
1540 #else
1541 return container_of(lruvec, struct pglist_data, __lruvec);
1542 #endif
1543 }
1544
1545 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
1546 int local_memory_node(int node_id);
1547 #else
local_memory_node(int node_id)1548 static inline int local_memory_node(int node_id) { return node_id; };
1549 #endif
1550
1551 /*
1552 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
1553 */
1554 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
1555
1556 #ifdef CONFIG_ZONE_DEVICE
zone_is_zone_device(struct zone * zone)1557 static inline bool zone_is_zone_device(struct zone *zone)
1558 {
1559 return zone_idx(zone) == ZONE_DEVICE;
1560 }
1561 #else
zone_is_zone_device(struct zone * zone)1562 static inline bool zone_is_zone_device(struct zone *zone)
1563 {
1564 return false;
1565 }
1566 #endif
1567
1568 /*
1569 * Returns true if a zone has pages managed by the buddy allocator.
1570 * All the reclaim decisions have to use this function rather than
1571 * populated_zone(). If the whole zone is reserved then we can easily
1572 * end up with populated_zone() && !managed_zone().
1573 */
managed_zone(struct zone * zone)1574 static inline bool managed_zone(struct zone *zone)
1575 {
1576 return zone_managed_pages(zone);
1577 }
1578
1579 /* Returns true if a zone has memory */
populated_zone(struct zone * zone)1580 static inline bool populated_zone(struct zone *zone)
1581 {
1582 return zone->present_pages;
1583 }
1584
1585 #ifdef CONFIG_NUMA
zone_to_nid(struct zone * zone)1586 static inline int zone_to_nid(struct zone *zone)
1587 {
1588 return zone->node;
1589 }
1590
zone_set_nid(struct zone * zone,int nid)1591 static inline void zone_set_nid(struct zone *zone, int nid)
1592 {
1593 zone->node = nid;
1594 }
1595 #else
zone_to_nid(struct zone * zone)1596 static inline int zone_to_nid(struct zone *zone)
1597 {
1598 return 0;
1599 }
1600
zone_set_nid(struct zone * zone,int nid)1601 static inline void zone_set_nid(struct zone *zone, int nid) {}
1602 #endif
1603
1604 extern int movable_zone;
1605
is_highmem_idx(enum zone_type idx)1606 static inline int is_highmem_idx(enum zone_type idx)
1607 {
1608 #ifdef CONFIG_HIGHMEM
1609 return (idx == ZONE_HIGHMEM ||
1610 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
1611 #else
1612 return 0;
1613 #endif
1614 }
1615
1616 /**
1617 * is_highmem - helper function to quickly check if a struct zone is a
1618 * highmem zone or not. This is an attempt to keep references
1619 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
1620 * @zone: pointer to struct zone variable
1621 * Return: 1 for a highmem zone, 0 otherwise
1622 */
is_highmem(struct zone * zone)1623 static inline int is_highmem(struct zone *zone)
1624 {
1625 return is_highmem_idx(zone_idx(zone));
1626 }
1627
1628 #ifdef CONFIG_ZONE_DMA
1629 bool has_managed_dma(void);
1630 #else
has_managed_dma(void)1631 static inline bool has_managed_dma(void)
1632 {
1633 return false;
1634 }
1635 #endif
1636
1637
1638 #ifndef CONFIG_NUMA
1639
1640 extern struct pglist_data contig_page_data;
NODE_DATA(int nid)1641 static inline struct pglist_data *NODE_DATA(int nid)
1642 {
1643 return &contig_page_data;
1644 }
1645
1646 #else /* CONFIG_NUMA */
1647
1648 #include <asm/mmzone.h>
1649
1650 #endif /* !CONFIG_NUMA */
1651
1652 extern struct pglist_data *first_online_pgdat(void);
1653 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1654 extern struct zone *next_zone(struct zone *zone);
1655 extern int isolate_anon_lru_page(struct page *page);
1656
1657 #ifdef CONFIG_COMPACTION
1658 extern unsigned long isolate_and_split_free_page(struct page *page,
1659 struct list_head *list);
1660 #else
isolate_and_split_free_page(struct page * page,struct list_head * list)1661 static inline unsigned long isolate_and_split_free_page(struct page *page,
1662 struct list_head *list)
1663 {
1664 return 0;
1665 }
1666 #endif /* CONFIG_COMPACTION */
1667
1668 /**
1669 * for_each_online_pgdat - helper macro to iterate over all online nodes
1670 * @pgdat: pointer to a pg_data_t variable
1671 */
1672 #define for_each_online_pgdat(pgdat) \
1673 for (pgdat = first_online_pgdat(); \
1674 pgdat; \
1675 pgdat = next_online_pgdat(pgdat))
1676 /**
1677 * for_each_zone - helper macro to iterate over all memory zones
1678 * @zone: pointer to struct zone variable
1679 *
1680 * The user only needs to declare the zone variable, for_each_zone
1681 * fills it in.
1682 */
1683 #define for_each_zone(zone) \
1684 for (zone = (first_online_pgdat())->node_zones; \
1685 zone; \
1686 zone = next_zone(zone))
1687
1688 #define for_each_populated_zone(zone) \
1689 for (zone = (first_online_pgdat())->node_zones; \
1690 zone; \
1691 zone = next_zone(zone)) \
1692 if (!populated_zone(zone)) \
1693 ; /* do nothing */ \
1694 else
1695
zonelist_zone(struct zoneref * zoneref)1696 static inline struct zone *zonelist_zone(struct zoneref *zoneref)
1697 {
1698 return zoneref->zone;
1699 }
1700
zonelist_zone_idx(struct zoneref * zoneref)1701 static inline int zonelist_zone_idx(struct zoneref *zoneref)
1702 {
1703 return zoneref->zone_idx;
1704 }
1705
zonelist_node_idx(struct zoneref * zoneref)1706 static inline int zonelist_node_idx(struct zoneref *zoneref)
1707 {
1708 return zone_to_nid(zoneref->zone);
1709 }
1710
1711 struct zoneref *__next_zones_zonelist(struct zoneref *z,
1712 enum zone_type highest_zoneidx,
1713 nodemask_t *nodes);
1714
1715 /**
1716 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
1717 * @z: The cursor used as a starting point for the search
1718 * @highest_zoneidx: The zone index of the highest zone to return
1719 * @nodes: An optional nodemask to filter the zonelist with
1720 *
1721 * This function returns the next zone at or below a given zone index that is
1722 * within the allowed nodemask using a cursor as the starting point for the
1723 * search. The zoneref returned is a cursor that represents the current zone
1724 * being examined. It should be advanced by one before calling
1725 * next_zones_zonelist again.
1726 *
1727 * Return: the next zone at or below highest_zoneidx within the allowed
1728 * nodemask using a cursor within a zonelist as a starting point
1729 */
next_zones_zonelist(struct zoneref * z,enum zone_type highest_zoneidx,nodemask_t * nodes)1730 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1731 enum zone_type highest_zoneidx,
1732 nodemask_t *nodes)
1733 {
1734 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
1735 return z;
1736 return __next_zones_zonelist(z, highest_zoneidx, nodes);
1737 }
1738
1739 /**
1740 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
1741 * @zonelist: The zonelist to search for a suitable zone
1742 * @highest_zoneidx: The zone index of the highest zone to return
1743 * @nodes: An optional nodemask to filter the zonelist with
1744 *
1745 * This function returns the first zone at or below a given zone index that is
1746 * within the allowed nodemask. The zoneref returned is a cursor that can be
1747 * used to iterate the zonelist with next_zones_zonelist by advancing it by
1748 * one before calling.
1749 *
1750 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1751 * never NULL). This may happen either genuinely, or due to concurrent nodemask
1752 * update due to cpuset modification.
1753 *
1754 * Return: Zoneref pointer for the first suitable zone found
1755 */
first_zones_zonelist(struct zonelist * zonelist,enum zone_type highest_zoneidx,nodemask_t * nodes)1756 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1757 enum zone_type highest_zoneidx,
1758 nodemask_t *nodes)
1759 {
1760 return next_zones_zonelist(zonelist->_zonerefs,
1761 highest_zoneidx, nodes);
1762 }
1763
1764 /**
1765 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
1766 * @zone: The current zone in the iterator
1767 * @z: The current pointer within zonelist->_zonerefs being iterated
1768 * @zlist: The zonelist being iterated
1769 * @highidx: The zone index of the highest zone to return
1770 * @nodemask: Nodemask allowed by the allocator
1771 *
1772 * This iterator iterates though all zones at or below a given zone index and
1773 * within a given nodemask
1774 */
1775 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1776 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1777 zone; \
1778 z = next_zones_zonelist(++z, highidx, nodemask), \
1779 zone = zonelist_zone(z))
1780
1781 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
1782 for (zone = zonelist_zone(z); \
1783 zone; \
1784 z = next_zones_zonelist(++z, highidx, nodemask), \
1785 zone = zonelist_zone(z))
1786
1787
1788 /**
1789 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
1790 * @zone: The current zone in the iterator
1791 * @z: The current pointer within zonelist->zones being iterated
1792 * @zlist: The zonelist being iterated
1793 * @highidx: The zone index of the highest zone to return
1794 *
1795 * This iterator iterates though all zones at or below a given zone index.
1796 */
1797 #define for_each_zone_zonelist(zone, z, zlist, highidx) \
1798 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1799
1800 /* Whether the 'nodes' are all movable nodes */
movable_only_nodes(nodemask_t * nodes)1801 static inline bool movable_only_nodes(nodemask_t *nodes)
1802 {
1803 struct zonelist *zonelist;
1804 struct zoneref *z;
1805 int nid;
1806
1807 if (nodes_empty(*nodes))
1808 return false;
1809
1810 /*
1811 * We can chose arbitrary node from the nodemask to get a
1812 * zonelist as they are interlinked. We just need to find
1813 * at least one zone that can satisfy kernel allocations.
1814 */
1815 nid = first_node(*nodes);
1816 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
1817 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
1818 return (!zonelist_zone(z)) ? true : false;
1819 }
1820
1821
1822 #ifdef CONFIG_SPARSEMEM
1823 #include <asm/sparsemem.h>
1824 #endif
1825
1826 #ifdef CONFIG_FLATMEM
1827 #define pfn_to_nid(pfn) (0)
1828 #endif
1829
1830 #ifdef CONFIG_SPARSEMEM
1831
1832 /*
1833 * PA_SECTION_SHIFT physical address to/from section number
1834 * PFN_SECTION_SHIFT pfn to/from section number
1835 */
1836 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1837 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1838
1839 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1840
1841 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1842 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1843
1844 #define SECTION_BLOCKFLAGS_BITS \
1845 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1846
1847 #if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
1848 #error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
1849 #endif
1850
pfn_to_section_nr(unsigned long pfn)1851 static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1852 {
1853 return pfn >> PFN_SECTION_SHIFT;
1854 }
section_nr_to_pfn(unsigned long sec)1855 static inline unsigned long section_nr_to_pfn(unsigned long sec)
1856 {
1857 return sec << PFN_SECTION_SHIFT;
1858 }
1859
1860 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1861 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1862
1863 #define SUBSECTION_SHIFT 21
1864 #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
1865
1866 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1867 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
1868 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1869
1870 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS
1871 #error Subsection size exceeds section size
1872 #else
1873 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1874 #endif
1875
1876 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
1877 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
1878
1879 struct mem_section_usage {
1880 struct rcu_head rcu;
1881 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1882 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
1883 #endif
1884 /* See declaration of similar field in struct zone */
1885 unsigned long pageblock_flags[0];
1886 };
1887
1888 void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
1889
1890 struct page;
1891 struct page_ext;
1892 struct mem_section {
1893 /*
1894 * This is, logically, a pointer to an array of struct
1895 * pages. However, it is stored with some other magic.
1896 * (see sparse.c::sparse_init_one_section())
1897 *
1898 * Additionally during early boot we encode node id of
1899 * the location of the section here to guide allocation.
1900 * (see sparse.c::memory_present())
1901 *
1902 * Making it a UL at least makes someone do a cast
1903 * before using it wrong.
1904 */
1905 unsigned long section_mem_map;
1906
1907 struct mem_section_usage *usage;
1908 #ifdef CONFIG_PAGE_EXTENSION
1909 /*
1910 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1911 * section. (see page_ext.h about this.)
1912 */
1913 struct page_ext *page_ext;
1914 unsigned long pad;
1915 #endif
1916 /*
1917 * WARNING: mem_section must be a power-of-2 in size for the
1918 * calculation and use of SECTION_ROOT_MASK to make sense.
1919 */
1920 };
1921
1922 #ifdef CONFIG_SPARSEMEM_EXTREME
1923 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1924 #else
1925 #define SECTIONS_PER_ROOT 1
1926 #endif
1927
1928 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1929 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1930 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1931
1932 #ifdef CONFIG_SPARSEMEM_EXTREME
1933 extern struct mem_section **mem_section;
1934 #else
1935 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1936 #endif
1937
section_to_usemap(struct mem_section * ms)1938 static inline unsigned long *section_to_usemap(struct mem_section *ms)
1939 {
1940 return ms->usage->pageblock_flags;
1941 }
1942
__nr_to_section(unsigned long nr)1943 static inline struct mem_section *__nr_to_section(unsigned long nr)
1944 {
1945 unsigned long root = SECTION_NR_TO_ROOT(nr);
1946
1947 if (unlikely(root >= NR_SECTION_ROOTS))
1948 return NULL;
1949
1950 #ifdef CONFIG_SPARSEMEM_EXTREME
1951 if (!mem_section || !mem_section[root])
1952 return NULL;
1953 #endif
1954 return &mem_section[root][nr & SECTION_ROOT_MASK];
1955 }
1956 extern size_t mem_section_usage_size(void);
1957
1958 /*
1959 * We use the lower bits of the mem_map pointer to store
1960 * a little bit of information. The pointer is calculated
1961 * as mem_map - section_nr_to_pfn(pnum). The result is
1962 * aligned to the minimum alignment of the two values:
1963 * 1. All mem_map arrays are page-aligned.
1964 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
1965 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1966 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1967 * worst combination is powerpc with 256k pages,
1968 * which results in PFN_SECTION_SHIFT equal 6.
1969 * To sum it up, at least 6 bits are available on all architectures.
1970 * However, we can exceed 6 bits on some other architectures except
1971 * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
1972 * with the worst case of 64K pages on arm64) if we make sure the
1973 * exceeded bit is not applicable to powerpc.
1974 */
1975 enum {
1976 SECTION_MARKED_PRESENT_BIT,
1977 SECTION_HAS_MEM_MAP_BIT,
1978 SECTION_IS_ONLINE_BIT,
1979 SECTION_IS_EARLY_BIT,
1980 #ifdef CONFIG_ZONE_DEVICE
1981 SECTION_TAINT_ZONE_DEVICE_BIT,
1982 #endif
1983 SECTION_MAP_LAST_BIT,
1984 };
1985
1986 #define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
1987 #define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
1988 #define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
1989 #define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
1990 #ifdef CONFIG_ZONE_DEVICE
1991 #define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
1992 #endif
1993 #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
1994 #define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
1995
__section_mem_map_addr(struct mem_section * section)1996 static inline struct page *__section_mem_map_addr(struct mem_section *section)
1997 {
1998 unsigned long map = section->section_mem_map;
1999 map &= SECTION_MAP_MASK;
2000 return (struct page *)map;
2001 }
2002
present_section(struct mem_section * section)2003 static inline int present_section(struct mem_section *section)
2004 {
2005 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
2006 }
2007
present_section_nr(unsigned long nr)2008 static inline int present_section_nr(unsigned long nr)
2009 {
2010 return present_section(__nr_to_section(nr));
2011 }
2012
valid_section(struct mem_section * section)2013 static inline int valid_section(struct mem_section *section)
2014 {
2015 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
2016 }
2017
early_section(struct mem_section * section)2018 static inline int early_section(struct mem_section *section)
2019 {
2020 return (section && (section->section_mem_map & SECTION_IS_EARLY));
2021 }
2022
valid_section_nr(unsigned long nr)2023 static inline int valid_section_nr(unsigned long nr)
2024 {
2025 return valid_section(__nr_to_section(nr));
2026 }
2027
online_section(struct mem_section * section)2028 static inline int online_section(struct mem_section *section)
2029 {
2030 return (section && (section->section_mem_map & SECTION_IS_ONLINE));
2031 }
2032
2033 #ifdef CONFIG_ZONE_DEVICE
online_device_section(struct mem_section * section)2034 static inline int online_device_section(struct mem_section *section)
2035 {
2036 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
2037
2038 return section && ((section->section_mem_map & flags) == flags);
2039 }
2040 #else
online_device_section(struct mem_section * section)2041 static inline int online_device_section(struct mem_section *section)
2042 {
2043 return 0;
2044 }
2045 #endif
2046
online_section_nr(unsigned long nr)2047 static inline int online_section_nr(unsigned long nr)
2048 {
2049 return online_section(__nr_to_section(nr));
2050 }
2051
2052 #ifdef CONFIG_MEMORY_HOTPLUG
2053 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
2054 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
2055 #endif
2056
__pfn_to_section(unsigned long pfn)2057 static inline struct mem_section *__pfn_to_section(unsigned long pfn)
2058 {
2059 return __nr_to_section(pfn_to_section_nr(pfn));
2060 }
2061
2062 extern unsigned long __highest_present_section_nr;
2063
subsection_map_index(unsigned long pfn)2064 static inline int subsection_map_index(unsigned long pfn)
2065 {
2066 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
2067 }
2068
2069 #ifdef CONFIG_SPARSEMEM_VMEMMAP
pfn_section_valid(struct mem_section * ms,unsigned long pfn)2070 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
2071 {
2072 int idx = subsection_map_index(pfn);
2073 struct mem_section_usage *usage = READ_ONCE(ms->usage);
2074
2075 return usage ? test_bit(idx, usage->subsection_map) : 0;
2076 }
2077 #else
pfn_section_valid(struct mem_section * ms,unsigned long pfn)2078 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
2079 {
2080 return 1;
2081 }
2082 #endif
2083
2084 #ifndef CONFIG_HAVE_ARCH_PFN_VALID
2085 /**
2086 * pfn_valid - check if there is a valid memory map entry for a PFN
2087 * @pfn: the page frame number to check
2088 *
2089 * Check if there is a valid memory map entry aka struct page for the @pfn.
2090 * Note, that availability of the memory map entry does not imply that
2091 * there is actual usable memory at that @pfn. The struct page may
2092 * represent a hole or an unusable page frame.
2093 *
2094 * Return: 1 for PFNs that have memory map entries and 0 otherwise
2095 */
pfn_valid(unsigned long pfn)2096 static inline int pfn_valid(unsigned long pfn)
2097 {
2098 struct mem_section *ms;
2099 int ret;
2100
2101 /*
2102 * Ensure the upper PAGE_SHIFT bits are clear in the
2103 * pfn. Else it might lead to false positives when
2104 * some of the upper bits are set, but the lower bits
2105 * match a valid pfn.
2106 */
2107 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
2108 return 0;
2109
2110 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
2111 return 0;
2112 ms = __pfn_to_section(pfn);
2113 rcu_read_lock_sched();
2114 if (!valid_section(ms)) {
2115 rcu_read_unlock_sched();
2116 return 0;
2117 }
2118 /*
2119 * Traditionally early sections always returned pfn_valid() for
2120 * the entire section-sized span.
2121 */
2122 ret = early_section(ms) || pfn_section_valid(ms, pfn);
2123 rcu_read_unlock_sched();
2124
2125 return ret;
2126 }
2127 #endif
2128
pfn_in_present_section(unsigned long pfn)2129 static inline int pfn_in_present_section(unsigned long pfn)
2130 {
2131 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
2132 return 0;
2133 return present_section(__pfn_to_section(pfn));
2134 }
2135
next_present_section_nr(unsigned long section_nr)2136 static inline unsigned long next_present_section_nr(unsigned long section_nr)
2137 {
2138 while (++section_nr <= __highest_present_section_nr) {
2139 if (present_section_nr(section_nr))
2140 return section_nr;
2141 }
2142
2143 return -1;
2144 }
2145
2146 /*
2147 * These are _only_ used during initialisation, therefore they
2148 * can use __initdata ... They could have names to indicate
2149 * this restriction.
2150 */
2151 #ifdef CONFIG_NUMA
2152 #define pfn_to_nid(pfn) \
2153 ({ \
2154 unsigned long __pfn_to_nid_pfn = (pfn); \
2155 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
2156 })
2157 #else
2158 #define pfn_to_nid(pfn) (0)
2159 #endif
2160
2161 void sparse_init(void);
2162 #else
2163 #define sparse_init() do {} while (0)
2164 #define sparse_index_init(_sec, _nid) do {} while (0)
2165 #define pfn_in_present_section pfn_valid
2166 #define subsection_map_init(_pfn, _nr_pages) do {} while (0)
2167 #endif /* CONFIG_SPARSEMEM */
2168
2169 #endif /* !__GENERATING_BOUNDS.H */
2170 #endif /* !__ASSEMBLY__ */
2171 #endif /* _LINUX_MMZONE_H */
2172