• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAP_H
3 #define _LINUX_SWAP_H
4 
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
14 #include <linux/atomic.h>
15 #include <linux/page-flags.h>
16 #include <uapi/linux/mempolicy.h>
17 #include <asm/page.h>
18 
19 struct notifier_block;
20 
21 struct bio;
22 
23 struct pagevec;
24 
25 #define SWAP_FLAG_PREFER	0x8000	/* set if swap priority specified */
26 #define SWAP_FLAG_PRIO_MASK	0x7fff
27 #define SWAP_FLAG_PRIO_SHIFT	0
28 #define SWAP_FLAG_DISCARD	0x10000 /* enable discard for swap */
29 #define SWAP_FLAG_DISCARD_ONCE	0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
31 
32 #define SWAP_FLAGS_VALID	(SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 				 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 				 SWAP_FLAG_DISCARD_PAGES)
35 #define SWAP_BATCH 64
36 
current_is_kswapd(void)37 static inline int current_is_kswapd(void)
38 {
39 	return current->flags & PF_KSWAPD;
40 }
41 
42 /*
43  * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44  * be swapped to.  The swap type and the offset into that swap type are
45  * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
46  * for the type means that the maximum number of swapcache pages is 27 bits
47  * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
48  * the type/offset into the pte as 5/27 as well.
49  */
50 #define MAX_SWAPFILES_SHIFT	5
51 
52 /*
53  * Use some of the swap files numbers for other purposes. This
54  * is a convenient way to hook into the VM to trigger special
55  * actions on faults.
56  */
57 
58 /*
59  * PTE markers are used to persist information onto PTEs that otherwise
60  * should be a none pte.  As its name "PTE" hints, it should only be
61  * applied to the leaves of pgtables.
62  */
63 #define SWP_PTE_MARKER_NUM 1
64 #define SWP_PTE_MARKER     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
65 			    SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
66 
67 /*
68  * Unaddressable device memory support. See include/linux/hmm.h and
69  * Documentation/mm/hmm.rst. Short description is we need struct pages for
70  * device memory that is unaddressable (inaccessible) by CPU, so that we can
71  * migrate part of a process memory to device memory.
72  *
73  * When a page is migrated from CPU to device, we set the CPU page table entry
74  * to a special SWP_DEVICE_{READ|WRITE} entry.
75  *
76  * When a page is mapped by the device for exclusive access we set the CPU page
77  * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
78  */
79 #ifdef CONFIG_DEVICE_PRIVATE
80 #define SWP_DEVICE_NUM 4
81 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
82 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
83 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
84 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
85 #else
86 #define SWP_DEVICE_NUM 0
87 #endif
88 
89 /*
90  * Page migration support.
91  *
92  * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
93  * indicates that the referenced (part of) an anonymous page is exclusive to
94  * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
95  * (part of) an anonymous page that are mapped writable are exclusive to a
96  * single process.
97  */
98 #ifdef CONFIG_MIGRATION
99 #define SWP_MIGRATION_NUM 3
100 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
101 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
102 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
103 #else
104 #define SWP_MIGRATION_NUM 0
105 #endif
106 
107 /*
108  * Handling of hardware poisoned pages with memory corruption.
109  */
110 #ifdef CONFIG_MEMORY_FAILURE
111 #define SWP_HWPOISON_NUM 1
112 #define SWP_HWPOISON		MAX_SWAPFILES
113 #else
114 #define SWP_HWPOISON_NUM 0
115 #endif
116 
117 #define MAX_SWAPFILES \
118 	((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
119 	SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
120 	SWP_PTE_MARKER_NUM)
121 
122 /*
123  * Magic header for a swap area. The first part of the union is
124  * what the swap magic looks like for the old (limited to 128MB)
125  * swap area format, the second part of the union adds - in the
126  * old reserved area - some extra information. Note that the first
127  * kilobyte is reserved for boot loader or disk label stuff...
128  *
129  * Having the magic at the end of the PAGE_SIZE makes detecting swap
130  * areas somewhat tricky on machines that support multiple page sizes.
131  * For 2.5 we'll probably want to move the magic to just beyond the
132  * bootbits...
133  */
134 union swap_header {
135 	struct {
136 		char reserved[PAGE_SIZE - 10];
137 		char magic[10];			/* SWAP-SPACE or SWAPSPACE2 */
138 	} magic;
139 	struct {
140 		char		bootbits[1024];	/* Space for disklabel etc. */
141 		__u32		version;
142 		__u32		last_page;
143 		__u32		nr_badpages;
144 		unsigned char	sws_uuid[16];
145 		unsigned char	sws_volume[16];
146 		__u32		padding[117];
147 		__u32		badpages[1];
148 	} info;
149 };
150 
151 /*
152  * current->reclaim_state points to one of these when a task is running
153  * memory reclaim
154  */
155 struct reclaim_state {
156 	/* pages reclaimed outside of LRU-based reclaim */
157 	unsigned long reclaimed;
158 #ifdef CONFIG_LRU_GEN
159 	/* per-thread mm walk data */
160 	struct lru_gen_mm_walk *mm_walk;
161 #endif
162 };
163 
164 /*
165  * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
166  * reclaim
167  * @pages: number of pages reclaimed
168  *
169  * If the current process is undergoing a reclaim operation, increment the
170  * number of reclaimed pages by @pages.
171  */
mm_account_reclaimed_pages(unsigned long pages)172 static inline void mm_account_reclaimed_pages(unsigned long pages)
173 {
174 	if (current->reclaim_state)
175 		current->reclaim_state->reclaimed += pages;
176 }
177 
178 #ifdef __KERNEL__
179 
180 struct address_space;
181 struct sysinfo;
182 struct writeback_control;
183 struct zone;
184 
185 /*
186  * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
187  * disk blocks.  A rbtree of swap extents maps the entire swapfile (Where the
188  * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
189  * from setup, they're handled identically.
190  *
191  * We always assume that blocks are of size PAGE_SIZE.
192  */
193 struct swap_extent {
194 	struct rb_node rb_node;
195 	pgoff_t start_page;
196 	pgoff_t nr_pages;
197 	sector_t start_block;
198 };
199 
200 /*
201  * Max bad pages in the new format..
202  */
203 #define MAX_SWAP_BADPAGES \
204 	((offsetof(union swap_header, magic.magic) - \
205 	  offsetof(union swap_header, info.badpages)) / sizeof(int))
206 
207 enum {
208 	SWP_USED	= (1 << 0),	/* is slot in swap_info[] used? */
209 	SWP_WRITEOK	= (1 << 1),	/* ok to write to this swap?	*/
210 	SWP_DISCARDABLE = (1 << 2),	/* blkdev support discard */
211 	SWP_DISCARDING	= (1 << 3),	/* now discarding a free cluster */
212 	SWP_SOLIDSTATE	= (1 << 4),	/* blkdev seeks are cheap */
213 	SWP_CONTINUED	= (1 << 5),	/* swap_map has count continuation */
214 	SWP_BLKDEV	= (1 << 6),	/* its a block device */
215 	SWP_ACTIVATED	= (1 << 7),	/* set after swap_activate success */
216 	SWP_FS_OPS	= (1 << 8),	/* swapfile operations go through fs */
217 	SWP_AREA_DISCARD = (1 << 9),	/* single-time swap area discards */
218 	SWP_PAGE_DISCARD = (1 << 10),	/* freed swap page-cluster discards */
219 	SWP_STABLE_WRITES = (1 << 11),	/* no overwrite PG_writeback pages */
220 	__SWP_READ_SYNCHRONOUS_IO = (1 << 12),	/* synchronous read IO is efficient */
221 	__SWP_WRITE_SYNCHRONOUS_IO = (1 << 13),	/* synchronous write IO is efficient */
222 	SWP_SYNCHRONOUS_IO = (__SWP_READ_SYNCHRONOUS_IO | __SWP_WRITE_SYNCHRONOUS_IO),
223 					/* add others here before... */
224 	SWP_SCANNING	= (1 << 14),	/* refcount in scan_swap_map */
225 };
226 
227 #define SWAP_CLUSTER_MAX 32UL
228 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
229 
230 /* Bit flag in swap_map */
231 #define SWAP_HAS_CACHE	0x40	/* Flag page is cached, in first swap_map */
232 #define COUNT_CONTINUED	0x80	/* Flag swap_map continuation for full count */
233 
234 /* Special value in first swap_map */
235 #define SWAP_MAP_MAX	0x3e	/* Max count */
236 #define SWAP_MAP_BAD	0x3f	/* Note page is bad */
237 #define SWAP_MAP_SHMEM	0xbf	/* Owned by shmem/tmpfs */
238 
239 /* Special value in each swap_map continuation */
240 #define SWAP_CONT_MAX	0x7f	/* Max count */
241 
242 /*
243  * We use this to track usage of a cluster. A cluster is a block of swap disk
244  * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
245  * free clusters are organized into a list. We fetch an entry from the list to
246  * get a free cluster.
247  *
248  * The flags field determines if a cluster is free. This is
249  * protected by cluster lock.
250  */
251 struct swap_cluster_info {
252 	spinlock_t lock;	/*
253 				 * Protect swap_cluster_info fields
254 				 * other than list, and swap_info_struct->swap_map
255 				 * elements corresponding to the swap cluster.
256 				 */
257 	u16 count;
258 	u8 flags;
259 	u8 order;
260 	struct list_head list;
261 };
262 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
263 #define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
264 #define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
265 #define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
266 
267 /*
268  * The first page in the swap file is the swap header, which is always marked
269  * bad to prevent it from being allocated as an entry. This also prevents the
270  * cluster to which it belongs being marked free. Therefore 0 is safe to use as
271  * a sentinel to indicate next is not valid in percpu_cluster.
272  */
273 #define SWAP_NEXT_INVALID	0
274 
275 #ifdef CONFIG_THP_SWAP
276 #define SWAP_NR_ORDERS		(PMD_ORDER + 1)
277 #else
278 #define SWAP_NR_ORDERS		1
279 #endif
280 
281 /*
282  * We assign a cluster to each CPU, so each CPU can allocate swap entry from
283  * its own cluster and swapout sequentially. The purpose is to optimize swapout
284  * throughput.
285  */
286 struct percpu_cluster {
287 	unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
288 };
289 
290 /*
291  * The in-memory structure used to track swap areas.
292  */
293 struct swap_info_struct {
294 	struct percpu_ref users;	/* indicate and keep swap device valid. */
295 	unsigned long	flags;		/* SWP_USED etc: see above */
296 	signed short	prio;		/* swap priority of this type */
297 	struct plist_node list;		/* entry in swap_active_head */
298 	signed char	type;		/* strange name for an index */
299 	unsigned int	max;		/* extent of the swap_map */
300 	unsigned char *swap_map;	/* vmalloc'ed array of usage counts */
301 	unsigned long *zeromap;		/* kvmalloc'ed bitmap to track zero pages */
302 	struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
303 	struct list_head free_clusters; /* free clusters list */
304 	struct list_head full_clusters; /* full clusters list */
305 	struct list_head nonfull_clusters[SWAP_NR_ORDERS];
306 					/* list of cluster that contains at least one free slot */
307 	struct list_head frag_clusters[SWAP_NR_ORDERS];
308 					/* list of cluster that are fragmented or contented */
309 	unsigned int frag_cluster_nr[SWAP_NR_ORDERS];
310 	unsigned int lowest_bit;	/* index of first free in swap_map */
311 	unsigned int highest_bit;	/* index of last free in swap_map */
312 	unsigned int pages;		/* total of usable pages of swap */
313 	unsigned int inuse_pages;	/* number of those currently in use */
314 	unsigned int cluster_next;	/* likely index for next allocation */
315 	unsigned int cluster_nr;	/* countdown to next cluster search */
316 	unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
317 	struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
318 	struct rb_root swap_extent_root;/* root of the swap extent rbtree */
319 	struct block_device *bdev;	/* swap device or bdev of swap file */
320 	struct file *swap_file;		/* seldom referenced */
321 	struct completion comp;		/* seldom referenced */
322 	spinlock_t lock;		/*
323 					 * protect map scan related fields like
324 					 * swap_map, lowest_bit, highest_bit,
325 					 * inuse_pages, cluster_next,
326 					 * cluster_nr, lowest_alloc,
327 					 * highest_alloc, free/discard cluster
328 					 * list. other fields are only changed
329 					 * at swapon/swapoff, so are protected
330 					 * by swap_lock. changing flags need
331 					 * hold this lock and swap_lock. If
332 					 * both locks need hold, hold swap_lock
333 					 * first.
334 					 */
335 	spinlock_t cont_lock;		/*
336 					 * protect swap count continuation page
337 					 * list.
338 					 */
339 	struct work_struct discard_work; /* discard worker */
340 	struct work_struct reclaim_work; /* reclaim worker */
341 	struct list_head discard_clusters; /* discard clusters list */
342 	struct plist_node avail_lists[]; /*
343 					   * entries in swap_avail_heads, one
344 					   * entry per node.
345 					   * Must be last as the number of the
346 					   * array is nr_node_ids, which is not
347 					   * a fixed value so have to allocate
348 					   * dynamically.
349 					   * And it has to be an array so that
350 					   * plist_for_each_* can work.
351 					   */
352 };
353 
page_swap_entry(struct page * page)354 static inline swp_entry_t page_swap_entry(struct page *page)
355 {
356 	struct folio *folio = page_folio(page);
357 	swp_entry_t entry = folio->swap;
358 
359 	entry.val += folio_page_idx(folio, page);
360 	return entry;
361 }
362 
363 /* linux/mm/workingset.c */
364 bool workingset_test_recent(void *shadow, bool file, bool *workingset,
365 				bool flush);
366 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
367 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
368 void workingset_refault(struct folio *folio, void *shadow);
369 void workingset_activation(struct folio *folio);
370 
371 /* linux/mm/page_alloc.c */
372 extern unsigned long totalreserve_pages;
373 
374 /* Definition of global_zone_page_state not available yet */
375 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
376 
377 
378 /* linux/mm/swap.c */
379 void lru_note_cost(struct lruvec *lruvec, bool file,
380 		   unsigned int nr_io, unsigned int nr_rotated);
381 void lru_note_cost_refault(struct folio *);
382 void folio_add_lru(struct folio *);
383 void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
384 void mark_page_accessed(struct page *);
385 void folio_mark_accessed(struct folio *);
386 
folio_may_be_lru_cached(struct folio * folio)387 static inline bool folio_may_be_lru_cached(struct folio *folio)
388 {
389 	/*
390 	 * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
391 	 * Holding small numbers of low-order mTHP folios in per-CPU LRU cache
392 	 * will be sensible, but nobody has implemented and tested that yet.
393 	 */
394 	return !folio_test_large(folio);
395 }
396 
397 extern atomic_t lru_disable_count;
398 
lru_cache_disabled(void)399 static inline bool lru_cache_disabled(void)
400 {
401 	return atomic_read(&lru_disable_count);
402 }
403 
lru_cache_enable(void)404 static inline void lru_cache_enable(void)
405 {
406 	atomic_dec(&lru_disable_count);
407 }
408 
409 extern void lru_cache_disable(void);
410 extern void lru_add_drain(void);
411 extern void lru_add_drain_cpu(int cpu);
412 extern void lru_add_drain_cpu_zone(struct zone *zone);
413 extern void lru_add_drain_all(void);
414 void folio_deactivate(struct folio *folio);
415 void folio_mark_lazyfree(struct folio *folio);
416 extern void swap_setup(void);
417 
418 /* linux/mm/vmscan.c */
419 extern unsigned long zone_reclaimable_pages(struct zone *zone);
420 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
421 					gfp_t gfp_mask, nodemask_t *mask);
422 
423 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
424 #define MEMCG_RECLAIM_PROACTIVE (1 << 2)
425 #define MIN_SWAPPINESS 0
426 #define MAX_SWAPPINESS 200
427 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
428 						  unsigned long nr_pages,
429 						  gfp_t gfp_mask,
430 						  unsigned int reclaim_options,
431 						  int *swappiness);
432 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
433 						gfp_t gfp_mask, bool noswap,
434 						pg_data_t *pgdat,
435 						unsigned long *nr_scanned);
436 extern unsigned long shrink_all_memory(unsigned long nr_pages);
437 extern int vm_swappiness;
438 long remove_mapping(struct address_space *mapping, struct folio *folio);
439 
440 extern unsigned long reclaim_pages(struct list_head *folio_list);
441 extern unsigned long __reclaim_pages(struct list_head *folio_list,
442 				     void *private);
443 #ifdef CONFIG_NUMA
444 extern int node_reclaim_mode;
445 extern int sysctl_min_unmapped_ratio;
446 extern int sysctl_min_slab_ratio;
447 #else
448 #define node_reclaim_mode 0
449 #endif
450 
node_reclaim_enabled(void)451 static inline bool node_reclaim_enabled(void)
452 {
453 	/* Is any node_reclaim_mode bit set? */
454 	return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
455 }
456 
457 void check_move_unevictable_folios(struct folio_batch *fbatch);
458 
459 extern void __meminit kswapd_run(int nid);
460 extern void __meminit kswapd_stop(int nid);
461 
462 #ifdef CONFIG_SWAP
463 
464 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
465 		unsigned long nr_pages, sector_t start_block);
466 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
467 		sector_t *);
468 
total_swapcache_pages(void)469 static inline unsigned long total_swapcache_pages(void)
470 {
471 	return global_node_page_state(NR_SWAPCACHE);
472 }
473 
474 void delete_from_swap_cache(struct folio *folio);
475 void free_swap_cache(struct folio *folio);
476 void free_page_and_swap_cache(struct page *);
477 void free_pages_and_swap_cache(struct encoded_page **, int);
478 /* linux/mm/swapfile.c */
479 extern atomic_long_t nr_swap_pages;
480 extern long total_swap_pages;
481 extern atomic_t nr_rotate_swap;
482 extern bool has_usable_swap(void);
483 
484 /* Swap 50% full? Release swapcache more aggressively.. */
vm_swap_full(void)485 static inline bool vm_swap_full(void)
486 {
487 	return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
488 }
489 
get_nr_swap_pages(void)490 static inline long get_nr_swap_pages(void)
491 {
492 	return atomic_long_read(&nr_swap_pages);
493 }
494 
495 extern void si_swapinfo(struct sysinfo *);
496 swp_entry_t folio_alloc_swap(struct folio *folio);
497 bool folio_free_swap(struct folio *folio);
498 void put_swap_folio(struct folio *folio, swp_entry_t entry);
499 extern swp_entry_t get_swap_page_of_type(int);
500 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
501 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
502 extern void swap_shmem_alloc(swp_entry_t, int);
503 extern int swap_duplicate(swp_entry_t);
504 extern int swapcache_prepare(swp_entry_t entry, int nr);
505 extern void swap_free_nr(swp_entry_t entry, int nr_pages);
506 extern void swapcache_free_entries(swp_entry_t *entries, int n);
507 extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
508 int swap_type_of(dev_t device, sector_t offset);
509 int find_first_swap(dev_t *device);
510 extern unsigned int count_swap_pages(int, int);
511 extern sector_t swapdev_block(int, pgoff_t);
512 extern int __swap_count(swp_entry_t entry);
513 extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
514 extern int swp_swapcount(swp_entry_t entry);
515 struct swap_info_struct *swp_swap_info(swp_entry_t entry);
516 struct backing_dev_info;
517 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
518 extern void exit_swap_address_space(unsigned int type);
519 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
520 sector_t swap_folio_sector(struct folio *folio);
521 extern sector_t alloc_swapdev_block(int swap);
522 
put_swap_device(struct swap_info_struct * si)523 static inline void put_swap_device(struct swap_info_struct *si)
524 {
525 	percpu_ref_put(&si->users);
526 }
527 
528 #else /* CONFIG_SWAP */
swp_swap_info(swp_entry_t entry)529 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
530 {
531 	return NULL;
532 }
533 
get_swap_device(swp_entry_t entry)534 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
535 {
536 	return NULL;
537 }
538 
put_swap_device(struct swap_info_struct * si)539 static inline void put_swap_device(struct swap_info_struct *si)
540 {
541 }
542 
543 #define get_nr_swap_pages()			0L
544 #define total_swap_pages			0L
545 #define total_swapcache_pages()			0UL
546 #define vm_swap_full()				0
547 
548 #define si_swapinfo(val) \
549 	do { (val)->freeswap = (val)->totalswap = 0; } while (0)
550 /* only sparc can not include linux/pagemap.h in this file
551  * so leave put_page and release_pages undeclared... */
552 #define free_page_and_swap_cache(page) \
553 	put_page(page)
554 #define free_pages_and_swap_cache(pages, nr) \
555 	release_pages((pages), (nr));
556 
free_swap_and_cache_nr(swp_entry_t entry,int nr)557 static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr)
558 {
559 }
560 
free_swap_cache(struct folio * folio)561 static inline void free_swap_cache(struct folio *folio)
562 {
563 }
564 
add_swap_count_continuation(swp_entry_t swp,gfp_t gfp_mask)565 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
566 {
567 	return 0;
568 }
569 
swap_shmem_alloc(swp_entry_t swp,int nr)570 static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
571 {
572 }
573 
swap_duplicate(swp_entry_t swp)574 static inline int swap_duplicate(swp_entry_t swp)
575 {
576 	return 0;
577 }
578 
swapcache_prepare(swp_entry_t swp,int nr)579 static inline int swapcache_prepare(swp_entry_t swp, int nr)
580 {
581 	return 0;
582 }
583 
swap_free_nr(swp_entry_t entry,int nr_pages)584 static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
585 {
586 }
587 
put_swap_folio(struct folio * folio,swp_entry_t swp)588 static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
589 {
590 }
591 
__swap_count(swp_entry_t entry)592 static inline int __swap_count(swp_entry_t entry)
593 {
594 	return 0;
595 }
596 
swap_swapcount(struct swap_info_struct * si,swp_entry_t entry)597 static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
598 {
599 	return 0;
600 }
601 
swp_swapcount(swp_entry_t entry)602 static inline int swp_swapcount(swp_entry_t entry)
603 {
604 	return 0;
605 }
606 
folio_alloc_swap(struct folio * folio)607 static inline swp_entry_t folio_alloc_swap(struct folio *folio)
608 {
609 	swp_entry_t entry;
610 	entry.val = 0;
611 	return entry;
612 }
613 
folio_free_swap(struct folio * folio)614 static inline bool folio_free_swap(struct folio *folio)
615 {
616 	return false;
617 }
618 
add_swap_extent(struct swap_info_struct * sis,unsigned long start_page,unsigned long nr_pages,sector_t start_block)619 static inline int add_swap_extent(struct swap_info_struct *sis,
620 				  unsigned long start_page,
621 				  unsigned long nr_pages, sector_t start_block)
622 {
623 	return -EINVAL;
624 }
625 #endif /* CONFIG_SWAP */
626 
free_swap_and_cache(swp_entry_t entry)627 static inline void free_swap_and_cache(swp_entry_t entry)
628 {
629 	free_swap_and_cache_nr(entry, 1);
630 }
631 
swap_free(swp_entry_t entry)632 static inline void swap_free(swp_entry_t entry)
633 {
634 	swap_free_nr(entry, 1);
635 }
636 
637 #ifdef CONFIG_MEMCG
638 extern void _trace_android_vh_use_vm_swappiness(bool *use_vm_swappiness);
639 
mem_cgroup_swappiness(struct mem_cgroup * memcg)640 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
641 {
642 	bool use_vm_swappiness = false;
643 
644 	_trace_android_vh_use_vm_swappiness(&use_vm_swappiness);
645 	if (use_vm_swappiness)
646 		return READ_ONCE(vm_swappiness);
647 
648 	/* Cgroup2 doesn't have per-cgroup swappiness */
649 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
650 		return READ_ONCE(vm_swappiness);
651 
652 	/* root ? */
653 	if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
654 		return READ_ONCE(vm_swappiness);
655 
656 	return READ_ONCE(memcg->swappiness);
657 }
658 #else
mem_cgroup_swappiness(struct mem_cgroup * mem)659 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
660 {
661 	return READ_ONCE(vm_swappiness);
662 }
663 #endif
664 
665 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
666 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
folio_throttle_swaprate(struct folio * folio,gfp_t gfp)667 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
668 {
669 	if (mem_cgroup_disabled())
670 		return;
671 	__folio_throttle_swaprate(folio, gfp);
672 }
673 #else
folio_throttle_swaprate(struct folio * folio,gfp_t gfp)674 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
675 {
676 }
677 #endif
678 
679 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
680 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
681 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)682 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
683 		swp_entry_t entry)
684 {
685 	if (mem_cgroup_disabled())
686 		return 0;
687 	return __mem_cgroup_try_charge_swap(folio, entry);
688 }
689 
690 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)691 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
692 {
693 	if (mem_cgroup_disabled())
694 		return;
695 	__mem_cgroup_uncharge_swap(entry, nr_pages);
696 }
697 
698 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
699 extern bool mem_cgroup_swap_full(struct folio *folio);
700 #else
mem_cgroup_swapout(struct folio * folio,swp_entry_t entry)701 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
702 {
703 }
704 
mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)705 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
706 					     swp_entry_t entry)
707 {
708 	return 0;
709 }
710 
mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)711 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
712 					    unsigned int nr_pages)
713 {
714 }
715 
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)716 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
717 {
718 	return get_nr_swap_pages();
719 }
720 
mem_cgroup_swap_full(struct folio * folio)721 static inline bool mem_cgroup_swap_full(struct folio *folio)
722 {
723 	return vm_swap_full();
724 }
725 #endif
726 
727 #endif /* __KERNEL__*/
728 #endif /* _LINUX_SWAP_H */
729