• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/swapfile.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *  Swap reorganised 29.12.95, Stephen Tweedie
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/task.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mman.h>
14 #include <linux/slab.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/swap.h>
17 #include <linux/vmalloc.h>
18 #include <linux/pagemap.h>
19 #include <linux/namei.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/writeback.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/init.h>
27 #include <linux/ksm.h>
28 #include <linux/rmap.h>
29 #include <linux/security.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mutex.h>
32 #include <linux/capability.h>
33 #include <linux/syscalls.h>
34 #include <linux/memcontrol.h>
35 #include <linux/poll.h>
36 #include <linux/oom.h>
37 #include <linux/frontswap.h>
38 #include <linux/swapfile.h>
39 #include <linux/export.h>
40 #include <linux/swap_slots.h>
41 #include <linux/sort.h>
42 
43 #include <asm/pgtable.h>
44 #include <asm/tlbflush.h>
45 #include <linux/swapops.h>
46 #include <linux/swap_cgroup.h>
47 
48 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
49 				 unsigned char);
50 static void free_swap_count_continuations(struct swap_info_struct *);
51 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
52 
53 DEFINE_SPINLOCK(swap_lock);
54 static unsigned int nr_swapfiles;
55 atomic_long_t nr_swap_pages;
56 /*
57  * Some modules use swappable objects and may try to swap them out under
58  * memory pressure (via the shrinker). Before doing so, they may wish to
59  * check to see if any swap space is available.
60  */
61 EXPORT_SYMBOL_GPL(nr_swap_pages);
62 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
63 long total_swap_pages;
64 static int least_priority = -1;
65 
66 static const char Bad_file[] = "Bad swap file entry ";
67 static const char Unused_file[] = "Unused swap file entry ";
68 static const char Bad_offset[] = "Bad swap offset entry ";
69 static const char Unused_offset[] = "Unused swap offset entry ";
70 
71 /*
72  * all active swap_info_structs
73  * protected with swap_lock, and ordered by priority.
74  */
75 PLIST_HEAD(swap_active_head);
76 
77 /*
78  * all available (active, not full) swap_info_structs
79  * protected with swap_avail_lock, ordered by priority.
80  * This is used by get_swap_page() instead of swap_active_head
81  * because swap_active_head includes all swap_info_structs,
82  * but get_swap_page() doesn't need to look at full ones.
83  * This uses its own lock instead of swap_lock because when a
84  * swap_info_struct changes between not-full/full, it needs to
85  * add/remove itself to/from this list, but the swap_info_struct->lock
86  * is held and the locking order requires swap_lock to be taken
87  * before any swap_info_struct->lock.
88  */
89 static struct plist_head *swap_avail_heads;
90 static DEFINE_SPINLOCK(swap_avail_lock);
91 
92 struct swap_info_struct *swap_info[MAX_SWAPFILES];
93 
94 static DEFINE_MUTEX(swapon_mutex);
95 
96 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
97 /* Activity counter to indicate that a swapon or swapoff has occurred */
98 static atomic_t proc_poll_event = ATOMIC_INIT(0);
99 
100 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
101 
swap_type_to_swap_info(int type)102 static struct swap_info_struct *swap_type_to_swap_info(int type)
103 {
104 	if (type >= READ_ONCE(nr_swapfiles))
105 		return NULL;
106 
107 	smp_rmb();	/* Pairs with smp_wmb in alloc_swap_info. */
108 	return READ_ONCE(swap_info[type]);
109 }
110 
swap_count(unsigned char ent)111 static inline unsigned char swap_count(unsigned char ent)
112 {
113 	return ent & ~SWAP_HAS_CACHE;	/* may include COUNT_CONTINUED flag */
114 }
115 
116 /* Reclaim the swap entry anyway if possible */
117 #define TTRS_ANYWAY		0x1
118 /*
119  * Reclaim the swap entry if there are no more mappings of the
120  * corresponding page
121  */
122 #define TTRS_UNMAPPED		0x2
123 /* Reclaim the swap entry if swap is getting full*/
124 #define TTRS_FULL		0x4
125 
126 /* returns 1 if swap entry is freed */
__try_to_reclaim_swap(struct swap_info_struct * si,unsigned long offset,unsigned long flags)127 static int __try_to_reclaim_swap(struct swap_info_struct *si,
128 				 unsigned long offset, unsigned long flags)
129 {
130 	swp_entry_t entry = swp_entry(si->type, offset);
131 	struct page *page;
132 	int ret = 0;
133 
134 	page = find_get_page(swap_address_space(entry), offset);
135 	if (!page)
136 		return 0;
137 	/*
138 	 * When this function is called from scan_swap_map_slots() and it's
139 	 * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
140 	 * here. We have to use trylock for avoiding deadlock. This is a special
141 	 * case and you should use try_to_free_swap() with explicit lock_page()
142 	 * in usual operations.
143 	 */
144 	if (trylock_page(page)) {
145 		if ((flags & TTRS_ANYWAY) ||
146 		    ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
147 		    ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
148 			ret = try_to_free_swap(page);
149 		unlock_page(page);
150 	}
151 	put_page(page);
152 	return ret;
153 }
154 
first_se(struct swap_info_struct * sis)155 static inline struct swap_extent *first_se(struct swap_info_struct *sis)
156 {
157 	struct rb_node *rb = rb_first(&sis->swap_extent_root);
158 	return rb_entry(rb, struct swap_extent, rb_node);
159 }
160 
next_se(struct swap_extent * se)161 static inline struct swap_extent *next_se(struct swap_extent *se)
162 {
163 	struct rb_node *rb = rb_next(&se->rb_node);
164 	return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
165 }
166 
167 /*
168  * swapon tell device that all the old swap contents can be discarded,
169  * to allow the swap device to optimize its wear-levelling.
170  */
discard_swap(struct swap_info_struct * si)171 static int discard_swap(struct swap_info_struct *si)
172 {
173 	struct swap_extent *se;
174 	sector_t start_block;
175 	sector_t nr_blocks;
176 	int err = 0;
177 
178 	/* Do not discard the swap header page! */
179 	se = first_se(si);
180 	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
181 	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
182 	if (nr_blocks) {
183 		err = blkdev_issue_discard(si->bdev, start_block,
184 				nr_blocks, GFP_KERNEL, 0);
185 		if (err)
186 			return err;
187 		cond_resched();
188 	}
189 
190 	for (se = next_se(se); se; se = next_se(se)) {
191 		start_block = se->start_block << (PAGE_SHIFT - 9);
192 		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
193 
194 		err = blkdev_issue_discard(si->bdev, start_block,
195 				nr_blocks, GFP_KERNEL, 0);
196 		if (err)
197 			break;
198 
199 		cond_resched();
200 	}
201 	return err;		/* That will often be -EOPNOTSUPP */
202 }
203 
204 static struct swap_extent *
offset_to_swap_extent(struct swap_info_struct * sis,unsigned long offset)205 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
206 {
207 	struct swap_extent *se;
208 	struct rb_node *rb;
209 
210 	rb = sis->swap_extent_root.rb_node;
211 	while (rb) {
212 		se = rb_entry(rb, struct swap_extent, rb_node);
213 		if (offset < se->start_page)
214 			rb = rb->rb_left;
215 		else if (offset >= se->start_page + se->nr_pages)
216 			rb = rb->rb_right;
217 		else
218 			return se;
219 	}
220 	/* It *must* be present */
221 	BUG();
222 }
223 
swap_page_sector(struct page * page)224 sector_t swap_page_sector(struct page *page)
225 {
226 	struct swap_info_struct *sis = page_swap_info(page);
227 	struct swap_extent *se;
228 	sector_t sector;
229 	pgoff_t offset;
230 
231 	offset = __page_file_index(page);
232 	se = offset_to_swap_extent(sis, offset);
233 	sector = se->start_block + (offset - se->start_page);
234 	return sector << (PAGE_SHIFT - 9);
235 }
236 
237 /*
238  * swap allocation tell device that a cluster of swap can now be discarded,
239  * to allow the swap device to optimize its wear-levelling.
240  */
discard_swap_cluster(struct swap_info_struct * si,pgoff_t start_page,pgoff_t nr_pages)241 static void discard_swap_cluster(struct swap_info_struct *si,
242 				 pgoff_t start_page, pgoff_t nr_pages)
243 {
244 	struct swap_extent *se = offset_to_swap_extent(si, start_page);
245 
246 	while (nr_pages) {
247 		pgoff_t offset = start_page - se->start_page;
248 		sector_t start_block = se->start_block + offset;
249 		sector_t nr_blocks = se->nr_pages - offset;
250 
251 		if (nr_blocks > nr_pages)
252 			nr_blocks = nr_pages;
253 		start_page += nr_blocks;
254 		nr_pages -= nr_blocks;
255 
256 		start_block <<= PAGE_SHIFT - 9;
257 		nr_blocks <<= PAGE_SHIFT - 9;
258 		if (blkdev_issue_discard(si->bdev, start_block,
259 					nr_blocks, GFP_NOIO, 0))
260 			break;
261 
262 		se = next_se(se);
263 	}
264 }
265 
266 #ifdef CONFIG_THP_SWAP
267 #define SWAPFILE_CLUSTER	HPAGE_PMD_NR
268 
269 #define swap_entry_size(size)	(size)
270 #else
271 #define SWAPFILE_CLUSTER	256
272 
273 /*
274  * Define swap_entry_size() as constant to let compiler to optimize
275  * out some code if !CONFIG_THP_SWAP
276  */
277 #define swap_entry_size(size)	1
278 #endif
279 #define LATENCY_LIMIT		256
280 
cluster_set_flag(struct swap_cluster_info * info,unsigned int flag)281 static inline void cluster_set_flag(struct swap_cluster_info *info,
282 	unsigned int flag)
283 {
284 	info->flags = flag;
285 }
286 
cluster_count(struct swap_cluster_info * info)287 static inline unsigned int cluster_count(struct swap_cluster_info *info)
288 {
289 	return info->data;
290 }
291 
cluster_set_count(struct swap_cluster_info * info,unsigned int c)292 static inline void cluster_set_count(struct swap_cluster_info *info,
293 				     unsigned int c)
294 {
295 	info->data = c;
296 }
297 
cluster_set_count_flag(struct swap_cluster_info * info,unsigned int c,unsigned int f)298 static inline void cluster_set_count_flag(struct swap_cluster_info *info,
299 					 unsigned int c, unsigned int f)
300 {
301 	info->flags = f;
302 	info->data = c;
303 }
304 
cluster_next(struct swap_cluster_info * info)305 static inline unsigned int cluster_next(struct swap_cluster_info *info)
306 {
307 	return info->data;
308 }
309 
cluster_set_next(struct swap_cluster_info * info,unsigned int n)310 static inline void cluster_set_next(struct swap_cluster_info *info,
311 				    unsigned int n)
312 {
313 	info->data = n;
314 }
315 
cluster_set_next_flag(struct swap_cluster_info * info,unsigned int n,unsigned int f)316 static inline void cluster_set_next_flag(struct swap_cluster_info *info,
317 					 unsigned int n, unsigned int f)
318 {
319 	info->flags = f;
320 	info->data = n;
321 }
322 
cluster_is_free(struct swap_cluster_info * info)323 static inline bool cluster_is_free(struct swap_cluster_info *info)
324 {
325 	return info->flags & CLUSTER_FLAG_FREE;
326 }
327 
cluster_is_null(struct swap_cluster_info * info)328 static inline bool cluster_is_null(struct swap_cluster_info *info)
329 {
330 	return info->flags & CLUSTER_FLAG_NEXT_NULL;
331 }
332 
cluster_set_null(struct swap_cluster_info * info)333 static inline void cluster_set_null(struct swap_cluster_info *info)
334 {
335 	info->flags = CLUSTER_FLAG_NEXT_NULL;
336 	info->data = 0;
337 }
338 
cluster_is_huge(struct swap_cluster_info * info)339 static inline bool cluster_is_huge(struct swap_cluster_info *info)
340 {
341 	if (IS_ENABLED(CONFIG_THP_SWAP))
342 		return info->flags & CLUSTER_FLAG_HUGE;
343 	return false;
344 }
345 
cluster_clear_huge(struct swap_cluster_info * info)346 static inline void cluster_clear_huge(struct swap_cluster_info *info)
347 {
348 	info->flags &= ~CLUSTER_FLAG_HUGE;
349 }
350 
lock_cluster(struct swap_info_struct * si,unsigned long offset)351 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
352 						     unsigned long offset)
353 {
354 	struct swap_cluster_info *ci;
355 
356 	ci = si->cluster_info;
357 	if (ci) {
358 		ci += offset / SWAPFILE_CLUSTER;
359 		spin_lock(&ci->lock);
360 	}
361 	return ci;
362 }
363 
unlock_cluster(struct swap_cluster_info * ci)364 static inline void unlock_cluster(struct swap_cluster_info *ci)
365 {
366 	if (ci)
367 		spin_unlock(&ci->lock);
368 }
369 
370 /*
371  * Determine the locking method in use for this device.  Return
372  * swap_cluster_info if SSD-style cluster-based locking is in place.
373  */
lock_cluster_or_swap_info(struct swap_info_struct * si,unsigned long offset)374 static inline struct swap_cluster_info *lock_cluster_or_swap_info(
375 		struct swap_info_struct *si, unsigned long offset)
376 {
377 	struct swap_cluster_info *ci;
378 
379 	/* Try to use fine-grained SSD-style locking if available: */
380 	ci = lock_cluster(si, offset);
381 	/* Otherwise, fall back to traditional, coarse locking: */
382 	if (!ci)
383 		spin_lock(&si->lock);
384 
385 	return ci;
386 }
387 
unlock_cluster_or_swap_info(struct swap_info_struct * si,struct swap_cluster_info * ci)388 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
389 					       struct swap_cluster_info *ci)
390 {
391 	if (ci)
392 		unlock_cluster(ci);
393 	else
394 		spin_unlock(&si->lock);
395 }
396 
cluster_list_empty(struct swap_cluster_list * list)397 static inline bool cluster_list_empty(struct swap_cluster_list *list)
398 {
399 	return cluster_is_null(&list->head);
400 }
401 
cluster_list_first(struct swap_cluster_list * list)402 static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
403 {
404 	return cluster_next(&list->head);
405 }
406 
cluster_list_init(struct swap_cluster_list * list)407 static void cluster_list_init(struct swap_cluster_list *list)
408 {
409 	cluster_set_null(&list->head);
410 	cluster_set_null(&list->tail);
411 }
412 
cluster_list_add_tail(struct swap_cluster_list * list,struct swap_cluster_info * ci,unsigned int idx)413 static void cluster_list_add_tail(struct swap_cluster_list *list,
414 				  struct swap_cluster_info *ci,
415 				  unsigned int idx)
416 {
417 	if (cluster_list_empty(list)) {
418 		cluster_set_next_flag(&list->head, idx, 0);
419 		cluster_set_next_flag(&list->tail, idx, 0);
420 	} else {
421 		struct swap_cluster_info *ci_tail;
422 		unsigned int tail = cluster_next(&list->tail);
423 
424 		/*
425 		 * Nested cluster lock, but both cluster locks are
426 		 * only acquired when we held swap_info_struct->lock
427 		 */
428 		ci_tail = ci + tail;
429 		spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
430 		cluster_set_next(ci_tail, idx);
431 		spin_unlock(&ci_tail->lock);
432 		cluster_set_next_flag(&list->tail, idx, 0);
433 	}
434 }
435 
cluster_list_del_first(struct swap_cluster_list * list,struct swap_cluster_info * ci)436 static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
437 					   struct swap_cluster_info *ci)
438 {
439 	unsigned int idx;
440 
441 	idx = cluster_next(&list->head);
442 	if (cluster_next(&list->tail) == idx) {
443 		cluster_set_null(&list->head);
444 		cluster_set_null(&list->tail);
445 	} else
446 		cluster_set_next_flag(&list->head,
447 				      cluster_next(&ci[idx]), 0);
448 
449 	return idx;
450 }
451 
452 /* Add a cluster to discard list and schedule it to do discard */
swap_cluster_schedule_discard(struct swap_info_struct * si,unsigned int idx)453 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
454 		unsigned int idx)
455 {
456 	/*
457 	 * If scan_swap_map() can't find a free cluster, it will check
458 	 * si->swap_map directly. To make sure the discarding cluster isn't
459 	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
460 	 * will be cleared after discard
461 	 */
462 	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
463 			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
464 
465 	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
466 
467 	schedule_work(&si->discard_work);
468 }
469 
__free_cluster(struct swap_info_struct * si,unsigned long idx)470 static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
471 {
472 	struct swap_cluster_info *ci = si->cluster_info;
473 
474 	cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
475 	cluster_list_add_tail(&si->free_clusters, ci, idx);
476 }
477 
478 /*
479  * Doing discard actually. After a cluster discard is finished, the cluster
480  * will be added to free cluster list. caller should hold si->lock.
481 */
swap_do_scheduled_discard(struct swap_info_struct * si)482 static void swap_do_scheduled_discard(struct swap_info_struct *si)
483 {
484 	struct swap_cluster_info *info, *ci;
485 	unsigned int idx;
486 
487 	info = si->cluster_info;
488 
489 	while (!cluster_list_empty(&si->discard_clusters)) {
490 		idx = cluster_list_del_first(&si->discard_clusters, info);
491 		spin_unlock(&si->lock);
492 
493 		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
494 				SWAPFILE_CLUSTER);
495 
496 		spin_lock(&si->lock);
497 		ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
498 		__free_cluster(si, idx);
499 		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
500 				0, SWAPFILE_CLUSTER);
501 		unlock_cluster(ci);
502 	}
503 }
504 
swap_discard_work(struct work_struct * work)505 static void swap_discard_work(struct work_struct *work)
506 {
507 	struct swap_info_struct *si;
508 
509 	si = container_of(work, struct swap_info_struct, discard_work);
510 
511 	spin_lock(&si->lock);
512 	swap_do_scheduled_discard(si);
513 	spin_unlock(&si->lock);
514 }
515 
alloc_cluster(struct swap_info_struct * si,unsigned long idx)516 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
517 {
518 	struct swap_cluster_info *ci = si->cluster_info;
519 
520 	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
521 	cluster_list_del_first(&si->free_clusters, ci);
522 	cluster_set_count_flag(ci + idx, 0, 0);
523 }
524 
free_cluster(struct swap_info_struct * si,unsigned long idx)525 static void free_cluster(struct swap_info_struct *si, unsigned long idx)
526 {
527 	struct swap_cluster_info *ci = si->cluster_info + idx;
528 
529 	VM_BUG_ON(cluster_count(ci) != 0);
530 	/*
531 	 * If the swap is discardable, prepare discard the cluster
532 	 * instead of free it immediately. The cluster will be freed
533 	 * after discard.
534 	 */
535 	if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
536 	    (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
537 		swap_cluster_schedule_discard(si, idx);
538 		return;
539 	}
540 
541 	__free_cluster(si, idx);
542 }
543 
544 /*
545  * The cluster corresponding to page_nr will be used. The cluster will be
546  * removed from free cluster list and its usage counter will be increased.
547  */
inc_cluster_info_page(struct swap_info_struct * p,struct swap_cluster_info * cluster_info,unsigned long page_nr)548 static void inc_cluster_info_page(struct swap_info_struct *p,
549 	struct swap_cluster_info *cluster_info, unsigned long page_nr)
550 {
551 	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
552 
553 	if (!cluster_info)
554 		return;
555 	if (cluster_is_free(&cluster_info[idx]))
556 		alloc_cluster(p, idx);
557 
558 	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
559 	cluster_set_count(&cluster_info[idx],
560 		cluster_count(&cluster_info[idx]) + 1);
561 }
562 
563 /*
564  * The cluster corresponding to page_nr decreases one usage. If the usage
565  * counter becomes 0, which means no page in the cluster is in using, we can
566  * optionally discard the cluster and add it to free cluster list.
567  */
dec_cluster_info_page(struct swap_info_struct * p,struct swap_cluster_info * cluster_info,unsigned long page_nr)568 static void dec_cluster_info_page(struct swap_info_struct *p,
569 	struct swap_cluster_info *cluster_info, unsigned long page_nr)
570 {
571 	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
572 
573 	if (!cluster_info)
574 		return;
575 
576 	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
577 	cluster_set_count(&cluster_info[idx],
578 		cluster_count(&cluster_info[idx]) - 1);
579 
580 	if (cluster_count(&cluster_info[idx]) == 0)
581 		free_cluster(p, idx);
582 }
583 
584 /*
585  * It's possible scan_swap_map() uses a free cluster in the middle of free
586  * cluster list. Avoiding such abuse to avoid list corruption.
587  */
588 static bool
scan_swap_map_ssd_cluster_conflict(struct swap_info_struct * si,unsigned long offset)589 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
590 	unsigned long offset)
591 {
592 	struct percpu_cluster *percpu_cluster;
593 	bool conflict;
594 
595 	offset /= SWAPFILE_CLUSTER;
596 	conflict = !cluster_list_empty(&si->free_clusters) &&
597 		offset != cluster_list_first(&si->free_clusters) &&
598 		cluster_is_free(&si->cluster_info[offset]);
599 
600 	if (!conflict)
601 		return false;
602 
603 	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
604 	cluster_set_null(&percpu_cluster->index);
605 	return true;
606 }
607 
608 /*
609  * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
610  * might involve allocating a new cluster for current CPU too.
611  */
scan_swap_map_try_ssd_cluster(struct swap_info_struct * si,unsigned long * offset,unsigned long * scan_base)612 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
613 	unsigned long *offset, unsigned long *scan_base)
614 {
615 	struct percpu_cluster *cluster;
616 	struct swap_cluster_info *ci;
617 	bool found_free;
618 	unsigned long tmp, max;
619 
620 new_cluster:
621 	cluster = this_cpu_ptr(si->percpu_cluster);
622 	if (cluster_is_null(&cluster->index)) {
623 		if (!cluster_list_empty(&si->free_clusters)) {
624 			cluster->index = si->free_clusters.head;
625 			cluster->next = cluster_next(&cluster->index) *
626 					SWAPFILE_CLUSTER;
627 		} else if (!cluster_list_empty(&si->discard_clusters)) {
628 			/*
629 			 * we don't have free cluster but have some clusters in
630 			 * discarding, do discard now and reclaim them
631 			 */
632 			swap_do_scheduled_discard(si);
633 			*scan_base = *offset = si->cluster_next;
634 			goto new_cluster;
635 		} else
636 			return false;
637 	}
638 
639 	found_free = false;
640 
641 	/*
642 	 * Other CPUs can use our cluster if they can't find a free cluster,
643 	 * check if there is still free entry in the cluster
644 	 */
645 	tmp = cluster->next;
646 	max = min_t(unsigned long, si->max,
647 		    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
648 	if (tmp >= max) {
649 		cluster_set_null(&cluster->index);
650 		goto new_cluster;
651 	}
652 	ci = lock_cluster(si, tmp);
653 	while (tmp < max) {
654 		if (!si->swap_map[tmp]) {
655 			found_free = true;
656 			break;
657 		}
658 		tmp++;
659 	}
660 	unlock_cluster(ci);
661 	if (!found_free) {
662 		cluster_set_null(&cluster->index);
663 		goto new_cluster;
664 	}
665 	cluster->next = tmp + 1;
666 	*offset = tmp;
667 	*scan_base = tmp;
668 	return found_free;
669 }
670 
__del_from_avail_list(struct swap_info_struct * p)671 static void __del_from_avail_list(struct swap_info_struct *p)
672 {
673 	int nid;
674 
675 	assert_spin_locked(&p->lock);
676 	for_each_node(nid)
677 		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
678 }
679 
del_from_avail_list(struct swap_info_struct * p)680 static void del_from_avail_list(struct swap_info_struct *p)
681 {
682 	spin_lock(&swap_avail_lock);
683 	__del_from_avail_list(p);
684 	spin_unlock(&swap_avail_lock);
685 }
686 
swap_range_alloc(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)687 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
688 			     unsigned int nr_entries)
689 {
690 	unsigned int end = offset + nr_entries - 1;
691 
692 	if (offset == si->lowest_bit)
693 		si->lowest_bit += nr_entries;
694 	if (end == si->highest_bit)
695 		si->highest_bit -= nr_entries;
696 	si->inuse_pages += nr_entries;
697 	if (si->inuse_pages == si->pages) {
698 		si->lowest_bit = si->max;
699 		si->highest_bit = 0;
700 		del_from_avail_list(si);
701 	}
702 }
703 
add_to_avail_list(struct swap_info_struct * p)704 static void add_to_avail_list(struct swap_info_struct *p)
705 {
706 	int nid;
707 
708 	spin_lock(&swap_avail_lock);
709 	for_each_node(nid) {
710 		WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
711 		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
712 	}
713 	spin_unlock(&swap_avail_lock);
714 }
715 
swap_range_free(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)716 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
717 			    unsigned int nr_entries)
718 {
719 	unsigned long end = offset + nr_entries - 1;
720 	void (*swap_slot_free_notify)(struct block_device *, unsigned long);
721 
722 	if (offset < si->lowest_bit)
723 		si->lowest_bit = offset;
724 	if (end > si->highest_bit) {
725 		bool was_full = !si->highest_bit;
726 
727 		si->highest_bit = end;
728 		if (was_full && (si->flags & SWP_WRITEOK))
729 			add_to_avail_list(si);
730 	}
731 	atomic_long_add(nr_entries, &nr_swap_pages);
732 	si->inuse_pages -= nr_entries;
733 	if (si->flags & SWP_BLKDEV)
734 		swap_slot_free_notify =
735 			si->bdev->bd_disk->fops->swap_slot_free_notify;
736 	else
737 		swap_slot_free_notify = NULL;
738 	while (offset <= end) {
739 		frontswap_invalidate_page(si->type, offset);
740 		if (swap_slot_free_notify)
741 			swap_slot_free_notify(si->bdev, offset);
742 		offset++;
743 	}
744 }
745 
scan_swap_map_slots(struct swap_info_struct * si,unsigned char usage,int nr,swp_entry_t slots[])746 static int scan_swap_map_slots(struct swap_info_struct *si,
747 			       unsigned char usage, int nr,
748 			       swp_entry_t slots[])
749 {
750 	struct swap_cluster_info *ci;
751 	unsigned long offset;
752 	unsigned long scan_base;
753 	unsigned long last_in_cluster = 0;
754 	int latency_ration = LATENCY_LIMIT;
755 	int n_ret = 0;
756 
757 	if (nr > SWAP_BATCH)
758 		nr = SWAP_BATCH;
759 
760 	/*
761 	 * We try to cluster swap pages by allocating them sequentially
762 	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
763 	 * way, however, we resort to first-free allocation, starting
764 	 * a new cluster.  This prevents us from scattering swap pages
765 	 * all over the entire swap partition, so that we reduce
766 	 * overall disk seek times between swap pages.  -- sct
767 	 * But we do now try to find an empty cluster.  -Andrea
768 	 * And we let swap pages go all over an SSD partition.  Hugh
769 	 */
770 
771 	si->flags += SWP_SCANNING;
772 	scan_base = offset = si->cluster_next;
773 
774 	/* SSD algorithm */
775 	if (si->cluster_info) {
776 		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
777 			goto checks;
778 		else
779 			goto scan;
780 	}
781 
782 	if (unlikely(!si->cluster_nr--)) {
783 		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
784 			si->cluster_nr = SWAPFILE_CLUSTER - 1;
785 			goto checks;
786 		}
787 
788 		spin_unlock(&si->lock);
789 
790 		/*
791 		 * If seek is expensive, start searching for new cluster from
792 		 * start of partition, to minimize the span of allocated swap.
793 		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
794 		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
795 		 */
796 		scan_base = offset = si->lowest_bit;
797 		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
798 
799 		/* Locate the first empty (unaligned) cluster */
800 		for (; last_in_cluster <= si->highest_bit; offset++) {
801 			if (si->swap_map[offset])
802 				last_in_cluster = offset + SWAPFILE_CLUSTER;
803 			else if (offset == last_in_cluster) {
804 				spin_lock(&si->lock);
805 				offset -= SWAPFILE_CLUSTER - 1;
806 				si->cluster_next = offset;
807 				si->cluster_nr = SWAPFILE_CLUSTER - 1;
808 				goto checks;
809 			}
810 			if (unlikely(--latency_ration < 0)) {
811 				cond_resched();
812 				latency_ration = LATENCY_LIMIT;
813 			}
814 		}
815 
816 		offset = scan_base;
817 		spin_lock(&si->lock);
818 		si->cluster_nr = SWAPFILE_CLUSTER - 1;
819 	}
820 
821 checks:
822 	if (si->cluster_info) {
823 		while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
824 		/* take a break if we already got some slots */
825 			if (n_ret)
826 				goto done;
827 			if (!scan_swap_map_try_ssd_cluster(si, &offset,
828 							&scan_base))
829 				goto scan;
830 		}
831 	}
832 	if (!(si->flags & SWP_WRITEOK))
833 		goto no_page;
834 	if (!si->highest_bit)
835 		goto no_page;
836 	if (offset > si->highest_bit)
837 		scan_base = offset = si->lowest_bit;
838 
839 	ci = lock_cluster(si, offset);
840 	/* reuse swap entry of cache-only swap if not busy. */
841 	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
842 		int swap_was_freed;
843 		unlock_cluster(ci);
844 		spin_unlock(&si->lock);
845 		swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
846 		spin_lock(&si->lock);
847 		/* entry was freed successfully, try to use this again */
848 		if (swap_was_freed)
849 			goto checks;
850 		goto scan; /* check next one */
851 	}
852 
853 	if (si->swap_map[offset]) {
854 		unlock_cluster(ci);
855 		if (!n_ret)
856 			goto scan;
857 		else
858 			goto done;
859 	}
860 	si->swap_map[offset] = usage;
861 	inc_cluster_info_page(si, si->cluster_info, offset);
862 	unlock_cluster(ci);
863 
864 	swap_range_alloc(si, offset, 1);
865 	si->cluster_next = offset + 1;
866 	slots[n_ret++] = swp_entry(si->type, offset);
867 
868 	/* got enough slots or reach max slots? */
869 	if ((n_ret == nr) || (offset >= si->highest_bit))
870 		goto done;
871 
872 	/* search for next available slot */
873 
874 	/* time to take a break? */
875 	if (unlikely(--latency_ration < 0)) {
876 		if (n_ret)
877 			goto done;
878 		spin_unlock(&si->lock);
879 		cond_resched();
880 		spin_lock(&si->lock);
881 		latency_ration = LATENCY_LIMIT;
882 	}
883 
884 	/* try to get more slots in cluster */
885 	if (si->cluster_info) {
886 		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
887 			goto checks;
888 		else
889 			goto done;
890 	}
891 	/* non-ssd case */
892 	++offset;
893 
894 	/* non-ssd case, still more slots in cluster? */
895 	if (si->cluster_nr && !si->swap_map[offset]) {
896 		--si->cluster_nr;
897 		goto checks;
898 	}
899 
900 done:
901 	si->flags -= SWP_SCANNING;
902 	return n_ret;
903 
904 scan:
905 	spin_unlock(&si->lock);
906 	while (++offset <= si->highest_bit) {
907 		if (!si->swap_map[offset]) {
908 			spin_lock(&si->lock);
909 			goto checks;
910 		}
911 		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
912 			spin_lock(&si->lock);
913 			goto checks;
914 		}
915 		if (unlikely(--latency_ration < 0)) {
916 			cond_resched();
917 			latency_ration = LATENCY_LIMIT;
918 		}
919 	}
920 	offset = si->lowest_bit;
921 	while (offset < scan_base) {
922 		if (!si->swap_map[offset]) {
923 			spin_lock(&si->lock);
924 			goto checks;
925 		}
926 		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
927 			spin_lock(&si->lock);
928 			goto checks;
929 		}
930 		if (unlikely(--latency_ration < 0)) {
931 			cond_resched();
932 			latency_ration = LATENCY_LIMIT;
933 		}
934 		offset++;
935 	}
936 	spin_lock(&si->lock);
937 
938 no_page:
939 	si->flags -= SWP_SCANNING;
940 	return n_ret;
941 }
942 
swap_alloc_cluster(struct swap_info_struct * si,swp_entry_t * slot)943 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
944 {
945 	unsigned long idx;
946 	struct swap_cluster_info *ci;
947 	unsigned long offset, i;
948 	unsigned char *map;
949 
950 	/*
951 	 * Should not even be attempting cluster allocations when huge
952 	 * page swap is disabled.  Warn and fail the allocation.
953 	 */
954 	if (!IS_ENABLED(CONFIG_THP_SWAP)) {
955 		VM_WARN_ON_ONCE(1);
956 		return 0;
957 	}
958 
959 	if (cluster_list_empty(&si->free_clusters))
960 		return 0;
961 
962 	idx = cluster_list_first(&si->free_clusters);
963 	offset = idx * SWAPFILE_CLUSTER;
964 	ci = lock_cluster(si, offset);
965 	alloc_cluster(si, idx);
966 	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
967 
968 	map = si->swap_map + offset;
969 	for (i = 0; i < SWAPFILE_CLUSTER; i++)
970 		map[i] = SWAP_HAS_CACHE;
971 	unlock_cluster(ci);
972 	swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
973 	*slot = swp_entry(si->type, offset);
974 
975 	return 1;
976 }
977 
swap_free_cluster(struct swap_info_struct * si,unsigned long idx)978 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
979 {
980 	unsigned long offset = idx * SWAPFILE_CLUSTER;
981 	struct swap_cluster_info *ci;
982 
983 	ci = lock_cluster(si, offset);
984 	memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
985 	cluster_set_count_flag(ci, 0, 0);
986 	free_cluster(si, idx);
987 	unlock_cluster(ci);
988 	swap_range_free(si, offset, SWAPFILE_CLUSTER);
989 }
990 
scan_swap_map(struct swap_info_struct * si,unsigned char usage)991 static unsigned long scan_swap_map(struct swap_info_struct *si,
992 				   unsigned char usage)
993 {
994 	swp_entry_t entry;
995 	int n_ret;
996 
997 	n_ret = scan_swap_map_slots(si, usage, 1, &entry);
998 
999 	if (n_ret)
1000 		return swp_offset(entry);
1001 	else
1002 		return 0;
1003 
1004 }
1005 
get_swap_pages(int n_goal,swp_entry_t swp_entries[],int entry_size)1006 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
1007 {
1008 	unsigned long size = swap_entry_size(entry_size);
1009 	struct swap_info_struct *si, *next;
1010 	long avail_pgs;
1011 	int n_ret = 0;
1012 	int node;
1013 
1014 	/* Only single cluster request supported */
1015 	WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1016 
1017 	avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1018 	if (avail_pgs <= 0)
1019 		goto noswap;
1020 
1021 	if (n_goal > SWAP_BATCH)
1022 		n_goal = SWAP_BATCH;
1023 
1024 	if (n_goal > avail_pgs)
1025 		n_goal = avail_pgs;
1026 
1027 	atomic_long_sub(n_goal * size, &nr_swap_pages);
1028 
1029 	spin_lock(&swap_avail_lock);
1030 
1031 start_over:
1032 	node = numa_node_id();
1033 	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1034 		/* requeue si to after same-priority siblings */
1035 		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1036 		spin_unlock(&swap_avail_lock);
1037 		spin_lock(&si->lock);
1038 		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1039 			spin_lock(&swap_avail_lock);
1040 			if (plist_node_empty(&si->avail_lists[node])) {
1041 				spin_unlock(&si->lock);
1042 				goto nextsi;
1043 			}
1044 			WARN(!si->highest_bit,
1045 			     "swap_info %d in list but !highest_bit\n",
1046 			     si->type);
1047 			WARN(!(si->flags & SWP_WRITEOK),
1048 			     "swap_info %d in list but !SWP_WRITEOK\n",
1049 			     si->type);
1050 			__del_from_avail_list(si);
1051 			spin_unlock(&si->lock);
1052 			goto nextsi;
1053 		}
1054 		if (size == SWAPFILE_CLUSTER) {
1055 			if (si->flags & SWP_BLKDEV)
1056 				n_ret = swap_alloc_cluster(si, swp_entries);
1057 		} else
1058 			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1059 						    n_goal, swp_entries);
1060 		spin_unlock(&si->lock);
1061 		if (n_ret || size == SWAPFILE_CLUSTER)
1062 			goto check_out;
1063 		pr_debug("scan_swap_map of si %d failed to find offset\n",
1064 			si->type);
1065 		cond_resched();
1066 
1067 		spin_lock(&swap_avail_lock);
1068 nextsi:
1069 		/*
1070 		 * if we got here, it's likely that si was almost full before,
1071 		 * and since scan_swap_map() can drop the si->lock, multiple
1072 		 * callers probably all tried to get a page from the same si
1073 		 * and it filled up before we could get one; or, the si filled
1074 		 * up between us dropping swap_avail_lock and taking si->lock.
1075 		 * Since we dropped the swap_avail_lock, the swap_avail_head
1076 		 * list may have been modified; so if next is still in the
1077 		 * swap_avail_head list then try it, otherwise start over
1078 		 * if we have not gotten any slots.
1079 		 */
1080 		if (plist_node_empty(&next->avail_lists[node]))
1081 			goto start_over;
1082 	}
1083 
1084 	spin_unlock(&swap_avail_lock);
1085 
1086 check_out:
1087 	if (n_ret < n_goal)
1088 		atomic_long_add((long)(n_goal - n_ret) * size,
1089 				&nr_swap_pages);
1090 noswap:
1091 	return n_ret;
1092 }
1093 
1094 /* The only caller of this function is now suspend routine */
get_swap_page_of_type(int type)1095 swp_entry_t get_swap_page_of_type(int type)
1096 {
1097 	struct swap_info_struct *si = swap_type_to_swap_info(type);
1098 	pgoff_t offset;
1099 
1100 	if (!si)
1101 		goto fail;
1102 
1103 	spin_lock(&si->lock);
1104 	if (si->flags & SWP_WRITEOK) {
1105 		atomic_long_dec(&nr_swap_pages);
1106 		/* This is called for allocating swap entry, not cache */
1107 		offset = scan_swap_map(si, 1);
1108 		if (offset) {
1109 			spin_unlock(&si->lock);
1110 			return swp_entry(type, offset);
1111 		}
1112 		atomic_long_inc(&nr_swap_pages);
1113 	}
1114 	spin_unlock(&si->lock);
1115 fail:
1116 	return (swp_entry_t) {0};
1117 }
1118 
__swap_info_get(swp_entry_t entry)1119 static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1120 {
1121 	struct swap_info_struct *p;
1122 	unsigned long offset;
1123 
1124 	if (!entry.val)
1125 		goto out;
1126 	p = swp_swap_info(entry);
1127 	if (!p)
1128 		goto bad_nofile;
1129 	if (!(p->flags & SWP_USED))
1130 		goto bad_device;
1131 	offset = swp_offset(entry);
1132 	if (offset >= p->max)
1133 		goto bad_offset;
1134 	return p;
1135 
1136 bad_offset:
1137 	pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val);
1138 	goto out;
1139 bad_device:
1140 	pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val);
1141 	goto out;
1142 bad_nofile:
1143 	pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val);
1144 out:
1145 	return NULL;
1146 }
1147 
_swap_info_get(swp_entry_t entry)1148 static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1149 {
1150 	struct swap_info_struct *p;
1151 
1152 	p = __swap_info_get(entry);
1153 	if (!p)
1154 		goto out;
1155 	if (!p->swap_map[swp_offset(entry)])
1156 		goto bad_free;
1157 	return p;
1158 
1159 bad_free:
1160 	pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val);
1161 	goto out;
1162 out:
1163 	return NULL;
1164 }
1165 
swap_info_get(swp_entry_t entry)1166 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1167 {
1168 	struct swap_info_struct *p;
1169 
1170 	p = _swap_info_get(entry);
1171 	if (p)
1172 		spin_lock(&p->lock);
1173 	return p;
1174 }
1175 
swap_info_get_cont(swp_entry_t entry,struct swap_info_struct * q)1176 static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1177 					struct swap_info_struct *q)
1178 {
1179 	struct swap_info_struct *p;
1180 
1181 	p = _swap_info_get(entry);
1182 
1183 	if (p != q) {
1184 		if (q != NULL)
1185 			spin_unlock(&q->lock);
1186 		if (p != NULL)
1187 			spin_lock(&p->lock);
1188 	}
1189 	return p;
1190 }
1191 
__swap_entry_free_locked(struct swap_info_struct * p,unsigned long offset,unsigned char usage)1192 static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1193 					      unsigned long offset,
1194 					      unsigned char usage)
1195 {
1196 	unsigned char count;
1197 	unsigned char has_cache;
1198 
1199 	count = p->swap_map[offset];
1200 
1201 	has_cache = count & SWAP_HAS_CACHE;
1202 	count &= ~SWAP_HAS_CACHE;
1203 
1204 	if (usage == SWAP_HAS_CACHE) {
1205 		VM_BUG_ON(!has_cache);
1206 		has_cache = 0;
1207 	} else if (count == SWAP_MAP_SHMEM) {
1208 		/*
1209 		 * Or we could insist on shmem.c using a special
1210 		 * swap_shmem_free() and free_shmem_swap_and_cache()...
1211 		 */
1212 		count = 0;
1213 	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1214 		if (count == COUNT_CONTINUED) {
1215 			if (swap_count_continued(p, offset, count))
1216 				count = SWAP_MAP_MAX | COUNT_CONTINUED;
1217 			else
1218 				count = SWAP_MAP_MAX;
1219 		} else
1220 			count--;
1221 	}
1222 
1223 	usage = count | has_cache;
1224 	p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
1225 
1226 	return usage;
1227 }
1228 
1229 /*
1230  * Check whether swap entry is valid in the swap device.  If so,
1231  * return pointer to swap_info_struct, and keep the swap entry valid
1232  * via preventing the swap device from being swapoff, until
1233  * put_swap_device() is called.  Otherwise return NULL.
1234  *
1235  * The entirety of the RCU read critical section must come before the
1236  * return from or after the call to synchronize_rcu() in
1237  * enable_swap_info() or swapoff().  So if "si->flags & SWP_VALID" is
1238  * true, the si->map, si->cluster_info, etc. must be valid in the
1239  * critical section.
1240  *
1241  * Notice that swapoff or swapoff+swapon can still happen before the
1242  * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock()
1243  * in put_swap_device() if there isn't any other way to prevent
1244  * swapoff, such as page lock, page table lock, etc.  The caller must
1245  * be prepared for that.  For example, the following situation is
1246  * possible.
1247  *
1248  *   CPU1				CPU2
1249  *   do_swap_page()
1250  *     ...				swapoff+swapon
1251  *     __read_swap_cache_async()
1252  *       swapcache_prepare()
1253  *         __swap_duplicate()
1254  *           // check swap_map
1255  *     // verify PTE not changed
1256  *
1257  * In __swap_duplicate(), the swap_map need to be checked before
1258  * changing partly because the specified swap entry may be for another
1259  * swap device which has been swapoff.  And in do_swap_page(), after
1260  * the page is read from the swap device, the PTE is verified not
1261  * changed with the page table locked to check whether the swap device
1262  * has been swapoff or swapoff+swapon.
1263  */
get_swap_device(swp_entry_t entry)1264 struct swap_info_struct *get_swap_device(swp_entry_t entry)
1265 {
1266 	struct swap_info_struct *si;
1267 	unsigned long offset;
1268 
1269 	if (!entry.val)
1270 		goto out;
1271 	si = swp_swap_info(entry);
1272 	if (!si)
1273 		goto bad_nofile;
1274 
1275 	rcu_read_lock();
1276 	if (!(si->flags & SWP_VALID))
1277 		goto unlock_out;
1278 	offset = swp_offset(entry);
1279 	if (offset >= si->max)
1280 		goto unlock_out;
1281 
1282 	return si;
1283 bad_nofile:
1284 	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1285 out:
1286 	return NULL;
1287 unlock_out:
1288 	rcu_read_unlock();
1289 	return NULL;
1290 }
1291 
__swap_entry_free(struct swap_info_struct * p,swp_entry_t entry,unsigned char usage)1292 static unsigned char __swap_entry_free(struct swap_info_struct *p,
1293 				       swp_entry_t entry, unsigned char usage)
1294 {
1295 	struct swap_cluster_info *ci;
1296 	unsigned long offset = swp_offset(entry);
1297 
1298 	ci = lock_cluster_or_swap_info(p, offset);
1299 	usage = __swap_entry_free_locked(p, offset, usage);
1300 	unlock_cluster_or_swap_info(p, ci);
1301 	if (!usage)
1302 		free_swap_slot(entry);
1303 
1304 	return usage;
1305 }
1306 
swap_entry_free(struct swap_info_struct * p,swp_entry_t entry)1307 static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1308 {
1309 	struct swap_cluster_info *ci;
1310 	unsigned long offset = swp_offset(entry);
1311 	unsigned char count;
1312 
1313 	ci = lock_cluster(p, offset);
1314 	count = p->swap_map[offset];
1315 	VM_BUG_ON(count != SWAP_HAS_CACHE);
1316 	p->swap_map[offset] = 0;
1317 	dec_cluster_info_page(p, p->cluster_info, offset);
1318 	unlock_cluster(ci);
1319 
1320 	mem_cgroup_uncharge_swap(entry, 1);
1321 	swap_range_free(p, offset, 1);
1322 }
1323 
1324 /*
1325  * Caller has made sure that the swap device corresponding to entry
1326  * is still around or has not been recycled.
1327  */
swap_free(swp_entry_t entry)1328 void swap_free(swp_entry_t entry)
1329 {
1330 	struct swap_info_struct *p;
1331 
1332 	p = _swap_info_get(entry);
1333 	if (p)
1334 		__swap_entry_free(p, entry, 1);
1335 }
1336 
1337 /*
1338  * Called after dropping swapcache to decrease refcnt to swap entries.
1339  */
put_swap_page(struct page * page,swp_entry_t entry)1340 void put_swap_page(struct page *page, swp_entry_t entry)
1341 {
1342 	unsigned long offset = swp_offset(entry);
1343 	unsigned long idx = offset / SWAPFILE_CLUSTER;
1344 	struct swap_cluster_info *ci;
1345 	struct swap_info_struct *si;
1346 	unsigned char *map;
1347 	unsigned int i, free_entries = 0;
1348 	unsigned char val;
1349 	int size = swap_entry_size(hpage_nr_pages(page));
1350 
1351 	si = _swap_info_get(entry);
1352 	if (!si)
1353 		return;
1354 
1355 	ci = lock_cluster_or_swap_info(si, offset);
1356 	if (size == SWAPFILE_CLUSTER) {
1357 		VM_BUG_ON(!cluster_is_huge(ci));
1358 		map = si->swap_map + offset;
1359 		for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1360 			val = map[i];
1361 			VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1362 			if (val == SWAP_HAS_CACHE)
1363 				free_entries++;
1364 		}
1365 		cluster_clear_huge(ci);
1366 		if (free_entries == SWAPFILE_CLUSTER) {
1367 			unlock_cluster_or_swap_info(si, ci);
1368 			spin_lock(&si->lock);
1369 			mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1370 			swap_free_cluster(si, idx);
1371 			spin_unlock(&si->lock);
1372 			return;
1373 		}
1374 	}
1375 	for (i = 0; i < size; i++, entry.val++) {
1376 		if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1377 			unlock_cluster_or_swap_info(si, ci);
1378 			free_swap_slot(entry);
1379 			if (i == size - 1)
1380 				return;
1381 			lock_cluster_or_swap_info(si, offset);
1382 		}
1383 	}
1384 	unlock_cluster_or_swap_info(si, ci);
1385 }
1386 
1387 #ifdef CONFIG_THP_SWAP
split_swap_cluster(swp_entry_t entry)1388 int split_swap_cluster(swp_entry_t entry)
1389 {
1390 	struct swap_info_struct *si;
1391 	struct swap_cluster_info *ci;
1392 	unsigned long offset = swp_offset(entry);
1393 
1394 	si = _swap_info_get(entry);
1395 	if (!si)
1396 		return -EBUSY;
1397 	ci = lock_cluster(si, offset);
1398 	cluster_clear_huge(ci);
1399 	unlock_cluster(ci);
1400 	return 0;
1401 }
1402 #endif
1403 
swp_entry_cmp(const void * ent1,const void * ent2)1404 static int swp_entry_cmp(const void *ent1, const void *ent2)
1405 {
1406 	const swp_entry_t *e1 = ent1, *e2 = ent2;
1407 
1408 	return (int)swp_type(*e1) - (int)swp_type(*e2);
1409 }
1410 
swapcache_free_entries(swp_entry_t * entries,int n)1411 void swapcache_free_entries(swp_entry_t *entries, int n)
1412 {
1413 	struct swap_info_struct *p, *prev;
1414 	int i;
1415 
1416 	if (n <= 0)
1417 		return;
1418 
1419 	prev = NULL;
1420 	p = NULL;
1421 
1422 	/*
1423 	 * Sort swap entries by swap device, so each lock is only taken once.
1424 	 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1425 	 * so low that it isn't necessary to optimize further.
1426 	 */
1427 	if (nr_swapfiles > 1)
1428 		sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1429 	for (i = 0; i < n; ++i) {
1430 		p = swap_info_get_cont(entries[i], prev);
1431 		if (p)
1432 			swap_entry_free(p, entries[i]);
1433 		prev = p;
1434 	}
1435 	if (p)
1436 		spin_unlock(&p->lock);
1437 }
1438 
1439 /*
1440  * How many references to page are currently swapped out?
1441  * This does not give an exact answer when swap count is continued,
1442  * but does include the high COUNT_CONTINUED flag to allow for that.
1443  */
page_swapcount(struct page * page)1444 int page_swapcount(struct page *page)
1445 {
1446 	int count = 0;
1447 	struct swap_info_struct *p;
1448 	struct swap_cluster_info *ci;
1449 	swp_entry_t entry;
1450 	unsigned long offset;
1451 
1452 	entry.val = page_private(page);
1453 	p = _swap_info_get(entry);
1454 	if (p) {
1455 		offset = swp_offset(entry);
1456 		ci = lock_cluster_or_swap_info(p, offset);
1457 		count = swap_count(p->swap_map[offset]);
1458 		unlock_cluster_or_swap_info(p, ci);
1459 	}
1460 	return count;
1461 }
1462 
__swap_count(swp_entry_t entry)1463 int __swap_count(swp_entry_t entry)
1464 {
1465 	struct swap_info_struct *si;
1466 	pgoff_t offset = swp_offset(entry);
1467 	int count = 0;
1468 
1469 	si = get_swap_device(entry);
1470 	if (si) {
1471 		count = swap_count(si->swap_map[offset]);
1472 		put_swap_device(si);
1473 	}
1474 	return count;
1475 }
1476 
swap_swapcount(struct swap_info_struct * si,swp_entry_t entry)1477 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1478 {
1479 	int count = 0;
1480 	pgoff_t offset = swp_offset(entry);
1481 	struct swap_cluster_info *ci;
1482 
1483 	ci = lock_cluster_or_swap_info(si, offset);
1484 	count = swap_count(si->swap_map[offset]);
1485 	unlock_cluster_or_swap_info(si, ci);
1486 	return count;
1487 }
1488 
1489 /*
1490  * How many references to @entry are currently swapped out?
1491  * This does not give an exact answer when swap count is continued,
1492  * but does include the high COUNT_CONTINUED flag to allow for that.
1493  */
__swp_swapcount(swp_entry_t entry)1494 int __swp_swapcount(swp_entry_t entry)
1495 {
1496 	int count = 0;
1497 	struct swap_info_struct *si;
1498 
1499 	si = get_swap_device(entry);
1500 	if (si) {
1501 		count = swap_swapcount(si, entry);
1502 		put_swap_device(si);
1503 	}
1504 	return count;
1505 }
1506 
1507 /*
1508  * How many references to @entry are currently swapped out?
1509  * This considers COUNT_CONTINUED so it returns exact answer.
1510  */
swp_swapcount(swp_entry_t entry)1511 int swp_swapcount(swp_entry_t entry)
1512 {
1513 	int count, tmp_count, n;
1514 	struct swap_info_struct *p;
1515 	struct swap_cluster_info *ci;
1516 	struct page *page;
1517 	pgoff_t offset;
1518 	unsigned char *map;
1519 
1520 	p = _swap_info_get(entry);
1521 	if (!p)
1522 		return 0;
1523 
1524 	offset = swp_offset(entry);
1525 
1526 	ci = lock_cluster_or_swap_info(p, offset);
1527 
1528 	count = swap_count(p->swap_map[offset]);
1529 	if (!(count & COUNT_CONTINUED))
1530 		goto out;
1531 
1532 	count &= ~COUNT_CONTINUED;
1533 	n = SWAP_MAP_MAX + 1;
1534 
1535 	page = vmalloc_to_page(p->swap_map + offset);
1536 	offset &= ~PAGE_MASK;
1537 	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1538 
1539 	do {
1540 		page = list_next_entry(page, lru);
1541 		map = kmap_atomic(page);
1542 		tmp_count = map[offset];
1543 		kunmap_atomic(map);
1544 
1545 		count += (tmp_count & ~COUNT_CONTINUED) * n;
1546 		n *= (SWAP_CONT_MAX + 1);
1547 	} while (tmp_count & COUNT_CONTINUED);
1548 out:
1549 	unlock_cluster_or_swap_info(p, ci);
1550 	return count;
1551 }
1552 
swap_page_trans_huge_swapped(struct swap_info_struct * si,swp_entry_t entry)1553 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1554 					 swp_entry_t entry)
1555 {
1556 	struct swap_cluster_info *ci;
1557 	unsigned char *map = si->swap_map;
1558 	unsigned long roffset = swp_offset(entry);
1559 	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1560 	int i;
1561 	bool ret = false;
1562 
1563 	ci = lock_cluster_or_swap_info(si, offset);
1564 	if (!ci || !cluster_is_huge(ci)) {
1565 		if (swap_count(map[roffset]))
1566 			ret = true;
1567 		goto unlock_out;
1568 	}
1569 	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1570 		if (swap_count(map[offset + i])) {
1571 			ret = true;
1572 			break;
1573 		}
1574 	}
1575 unlock_out:
1576 	unlock_cluster_or_swap_info(si, ci);
1577 	return ret;
1578 }
1579 
page_swapped(struct page * page)1580 static bool page_swapped(struct page *page)
1581 {
1582 	swp_entry_t entry;
1583 	struct swap_info_struct *si;
1584 
1585 	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1586 		return page_swapcount(page) != 0;
1587 
1588 	page = compound_head(page);
1589 	entry.val = page_private(page);
1590 	si = _swap_info_get(entry);
1591 	if (si)
1592 		return swap_page_trans_huge_swapped(si, entry);
1593 	return false;
1594 }
1595 
page_trans_huge_map_swapcount(struct page * page,int * total_mapcount,int * total_swapcount)1596 static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1597 					 int *total_swapcount)
1598 {
1599 	int i, map_swapcount, _total_mapcount, _total_swapcount;
1600 	unsigned long offset = 0;
1601 	struct swap_info_struct *si;
1602 	struct swap_cluster_info *ci = NULL;
1603 	unsigned char *map = NULL;
1604 	int mapcount, swapcount = 0;
1605 
1606 	/* hugetlbfs shouldn't call it */
1607 	VM_BUG_ON_PAGE(PageHuge(page), page);
1608 
1609 	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1610 		mapcount = page_trans_huge_mapcount(page, total_mapcount);
1611 		if (PageSwapCache(page))
1612 			swapcount = page_swapcount(page);
1613 		if (total_swapcount)
1614 			*total_swapcount = swapcount;
1615 		return mapcount + swapcount;
1616 	}
1617 
1618 	page = compound_head(page);
1619 
1620 	_total_mapcount = _total_swapcount = map_swapcount = 0;
1621 	if (PageSwapCache(page)) {
1622 		swp_entry_t entry;
1623 
1624 		entry.val = page_private(page);
1625 		si = _swap_info_get(entry);
1626 		if (si) {
1627 			map = si->swap_map;
1628 			offset = swp_offset(entry);
1629 		}
1630 	}
1631 	if (map)
1632 		ci = lock_cluster(si, offset);
1633 	for (i = 0; i < HPAGE_PMD_NR; i++) {
1634 		mapcount = atomic_read(&page[i]._mapcount) + 1;
1635 		_total_mapcount += mapcount;
1636 		if (map) {
1637 			swapcount = swap_count(map[offset + i]);
1638 			_total_swapcount += swapcount;
1639 		}
1640 		map_swapcount = max(map_swapcount, mapcount + swapcount);
1641 	}
1642 	unlock_cluster(ci);
1643 	if (PageDoubleMap(page)) {
1644 		map_swapcount -= 1;
1645 		_total_mapcount -= HPAGE_PMD_NR;
1646 	}
1647 	mapcount = compound_mapcount(page);
1648 	map_swapcount += mapcount;
1649 	_total_mapcount += mapcount;
1650 	if (total_mapcount)
1651 		*total_mapcount = _total_mapcount;
1652 	if (total_swapcount)
1653 		*total_swapcount = _total_swapcount;
1654 
1655 	return map_swapcount;
1656 }
1657 
1658 /*
1659  * We can write to an anon page without COW if there are no other references
1660  * to it.  And as a side-effect, free up its swap: because the old content
1661  * on disk will never be read, and seeking back there to write new content
1662  * later would only waste time away from clustering.
1663  *
1664  * NOTE: total_map_swapcount should not be relied upon by the caller if
1665  * reuse_swap_page() returns false, but it may be always overwritten
1666  * (see the other implementation for CONFIG_SWAP=n).
1667  */
reuse_swap_page(struct page * page,int * total_map_swapcount)1668 bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1669 {
1670 	int count, total_mapcount, total_swapcount;
1671 
1672 	VM_BUG_ON_PAGE(!PageLocked(page), page);
1673 	if (unlikely(PageKsm(page)))
1674 		return false;
1675 	count = page_trans_huge_map_swapcount(page, &total_mapcount,
1676 					      &total_swapcount);
1677 	if (total_map_swapcount)
1678 		*total_map_swapcount = total_mapcount + total_swapcount;
1679 	if (count == 1 && PageSwapCache(page) &&
1680 	    (likely(!PageTransCompound(page)) ||
1681 	     /* The remaining swap count will be freed soon */
1682 	     total_swapcount == page_swapcount(page))) {
1683 		if (!PageWriteback(page)) {
1684 			page = compound_head(page);
1685 			delete_from_swap_cache(page);
1686 			SetPageDirty(page);
1687 		} else {
1688 			swp_entry_t entry;
1689 			struct swap_info_struct *p;
1690 
1691 			entry.val = page_private(page);
1692 			p = swap_info_get(entry);
1693 			if (p->flags & SWP_STABLE_WRITES) {
1694 				spin_unlock(&p->lock);
1695 				return false;
1696 			}
1697 			spin_unlock(&p->lock);
1698 		}
1699 	}
1700 
1701 	return count <= 1;
1702 }
1703 
1704 /*
1705  * If swap is getting full, or if there are no more mappings of this page,
1706  * then try_to_free_swap is called to free its swap space.
1707  */
try_to_free_swap(struct page * page)1708 int try_to_free_swap(struct page *page)
1709 {
1710 	VM_BUG_ON_PAGE(!PageLocked(page), page);
1711 
1712 	if (!PageSwapCache(page))
1713 		return 0;
1714 	if (PageWriteback(page))
1715 		return 0;
1716 	if (page_swapped(page))
1717 		return 0;
1718 
1719 	/*
1720 	 * Once hibernation has begun to create its image of memory,
1721 	 * there's a danger that one of the calls to try_to_free_swap()
1722 	 * - most probably a call from __try_to_reclaim_swap() while
1723 	 * hibernation is allocating its own swap pages for the image,
1724 	 * but conceivably even a call from memory reclaim - will free
1725 	 * the swap from a page which has already been recorded in the
1726 	 * image as a clean swapcache page, and then reuse its swap for
1727 	 * another page of the image.  On waking from hibernation, the
1728 	 * original page might be freed under memory pressure, then
1729 	 * later read back in from swap, now with the wrong data.
1730 	 *
1731 	 * Hibernation suspends storage while it is writing the image
1732 	 * to disk so check that here.
1733 	 */
1734 	if (pm_suspended_storage())
1735 		return 0;
1736 
1737 	page = compound_head(page);
1738 	delete_from_swap_cache(page);
1739 	SetPageDirty(page);
1740 	return 1;
1741 }
1742 
1743 /*
1744  * Free the swap entry like above, but also try to
1745  * free the page cache entry if it is the last user.
1746  */
free_swap_and_cache(swp_entry_t entry)1747 int free_swap_and_cache(swp_entry_t entry)
1748 {
1749 	struct swap_info_struct *p;
1750 	unsigned char count;
1751 
1752 	if (non_swap_entry(entry))
1753 		return 1;
1754 
1755 	p = _swap_info_get(entry);
1756 	if (p) {
1757 		count = __swap_entry_free(p, entry, 1);
1758 		if (count == SWAP_HAS_CACHE &&
1759 		    !swap_page_trans_huge_swapped(p, entry))
1760 			__try_to_reclaim_swap(p, swp_offset(entry),
1761 					      TTRS_UNMAPPED | TTRS_FULL);
1762 	}
1763 	return p != NULL;
1764 }
1765 
1766 #ifdef CONFIG_HIBERNATION
1767 /*
1768  * Find the swap type that corresponds to given device (if any).
1769  *
1770  * @offset - number of the PAGE_SIZE-sized block of the device, starting
1771  * from 0, in which the swap header is expected to be located.
1772  *
1773  * This is needed for the suspend to disk (aka swsusp).
1774  */
swap_type_of(dev_t device,sector_t offset,struct block_device ** bdev_p)1775 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
1776 {
1777 	struct block_device *bdev = NULL;
1778 	int type;
1779 
1780 	if (device)
1781 		bdev = bdget(device);
1782 
1783 	spin_lock(&swap_lock);
1784 	for (type = 0; type < nr_swapfiles; type++) {
1785 		struct swap_info_struct *sis = swap_info[type];
1786 
1787 		if (!(sis->flags & SWP_WRITEOK))
1788 			continue;
1789 
1790 		if (!bdev) {
1791 			if (bdev_p)
1792 				*bdev_p = bdgrab(sis->bdev);
1793 
1794 			spin_unlock(&swap_lock);
1795 			return type;
1796 		}
1797 		if (bdev == sis->bdev) {
1798 			struct swap_extent *se = first_se(sis);
1799 
1800 			if (se->start_block == offset) {
1801 				if (bdev_p)
1802 					*bdev_p = bdgrab(sis->bdev);
1803 
1804 				spin_unlock(&swap_lock);
1805 				bdput(bdev);
1806 				return type;
1807 			}
1808 		}
1809 	}
1810 	spin_unlock(&swap_lock);
1811 	if (bdev)
1812 		bdput(bdev);
1813 
1814 	return -ENODEV;
1815 }
1816 
1817 /*
1818  * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1819  * corresponding to given index in swap_info (swap type).
1820  */
swapdev_block(int type,pgoff_t offset)1821 sector_t swapdev_block(int type, pgoff_t offset)
1822 {
1823 	struct block_device *bdev;
1824 	struct swap_info_struct *si = swap_type_to_swap_info(type);
1825 
1826 	if (!si || !(si->flags & SWP_WRITEOK))
1827 		return 0;
1828 	return map_swap_entry(swp_entry(type, offset), &bdev);
1829 }
1830 
1831 /*
1832  * Return either the total number of swap pages of given type, or the number
1833  * of free pages of that type (depending on @free)
1834  *
1835  * This is needed for software suspend
1836  */
count_swap_pages(int type,int free)1837 unsigned int count_swap_pages(int type, int free)
1838 {
1839 	unsigned int n = 0;
1840 
1841 	spin_lock(&swap_lock);
1842 	if ((unsigned int)type < nr_swapfiles) {
1843 		struct swap_info_struct *sis = swap_info[type];
1844 
1845 		spin_lock(&sis->lock);
1846 		if (sis->flags & SWP_WRITEOK) {
1847 			n = sis->pages;
1848 			if (free)
1849 				n -= sis->inuse_pages;
1850 		}
1851 		spin_unlock(&sis->lock);
1852 	}
1853 	spin_unlock(&swap_lock);
1854 	return n;
1855 }
1856 #endif /* CONFIG_HIBERNATION */
1857 
pte_same_as_swp(pte_t pte,pte_t swp_pte)1858 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1859 {
1860 	return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
1861 }
1862 
1863 /*
1864  * No need to decide whether this PTE shares the swap entry with others,
1865  * just let do_wp_page work it out if a write is requested later - to
1866  * force COW, vm_page_prot omits write permission from any private vma.
1867  */
unuse_pte(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,swp_entry_t entry,struct page * page)1868 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1869 		unsigned long addr, swp_entry_t entry, struct page *page)
1870 {
1871 	struct page *swapcache;
1872 	struct mem_cgroup *memcg;
1873 	spinlock_t *ptl;
1874 	pte_t *pte;
1875 	int ret = 1;
1876 
1877 	swapcache = page;
1878 	page = ksm_might_need_to_copy(page, vma, addr);
1879 	if (unlikely(!page))
1880 		return -ENOMEM;
1881 
1882 	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1883 				&memcg, false)) {
1884 		ret = -ENOMEM;
1885 		goto out_nolock;
1886 	}
1887 
1888 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1889 	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1890 		mem_cgroup_cancel_charge(page, memcg, false);
1891 		ret = 0;
1892 		goto out;
1893 	}
1894 
1895 	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1896 	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1897 	get_page(page);
1898 	set_pte_at(vma->vm_mm, addr, pte,
1899 		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1900 	if (page == swapcache) {
1901 		page_add_anon_rmap(page, vma, addr, false);
1902 		mem_cgroup_commit_charge(page, memcg, true, false);
1903 	} else { /* ksm created a completely new copy */
1904 		page_add_new_anon_rmap(page, vma, addr, false);
1905 		mem_cgroup_commit_charge(page, memcg, false, false);
1906 		lru_cache_add_active_or_unevictable(page, vma);
1907 	}
1908 	swap_free(entry);
1909 	/*
1910 	 * Move the page to the active list so it is not
1911 	 * immediately swapped out again after swapon.
1912 	 */
1913 	activate_page(page);
1914 out:
1915 	pte_unmap_unlock(pte, ptl);
1916 out_nolock:
1917 	if (page != swapcache) {
1918 		unlock_page(page);
1919 		put_page(page);
1920 	}
1921 	return ret;
1922 }
1923 
unuse_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)1924 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1925 			unsigned long addr, unsigned long end,
1926 			unsigned int type, bool frontswap,
1927 			unsigned long *fs_pages_to_unuse)
1928 {
1929 	struct page *page;
1930 	swp_entry_t entry;
1931 	pte_t *pte;
1932 	struct swap_info_struct *si;
1933 	unsigned long offset;
1934 	int ret = 0;
1935 	volatile unsigned char *swap_map;
1936 
1937 	si = swap_info[type];
1938 	pte = pte_offset_map(pmd, addr);
1939 	do {
1940 		struct vm_fault vmf;
1941 
1942 		if (!is_swap_pte(*pte))
1943 			continue;
1944 
1945 		entry = pte_to_swp_entry(*pte);
1946 		if (swp_type(entry) != type)
1947 			continue;
1948 
1949 		offset = swp_offset(entry);
1950 		if (frontswap && !frontswap_test(si, offset))
1951 			continue;
1952 
1953 		pte_unmap(pte);
1954 		swap_map = &si->swap_map[offset];
1955 		page = lookup_swap_cache(entry, vma, addr);
1956 		if (!page) {
1957 			vmf.vma = vma;
1958 			vmf.address = addr;
1959 			vmf.pmd = pmd;
1960 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1961 						&vmf);
1962 		}
1963 		if (!page) {
1964 			if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1965 				goto try_next;
1966 			return -ENOMEM;
1967 		}
1968 
1969 		lock_page(page);
1970 		wait_on_page_writeback(page);
1971 		ret = unuse_pte(vma, pmd, addr, entry, page);
1972 		if (ret < 0) {
1973 			unlock_page(page);
1974 			put_page(page);
1975 			goto out;
1976 		}
1977 
1978 		try_to_free_swap(page);
1979 		unlock_page(page);
1980 		put_page(page);
1981 
1982 		if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
1983 			ret = FRONTSWAP_PAGES_UNUSED;
1984 			goto out;
1985 		}
1986 try_next:
1987 		pte = pte_offset_map(pmd, addr);
1988 	} while (pte++, addr += PAGE_SIZE, addr != end);
1989 	pte_unmap(pte - 1);
1990 
1991 	ret = 0;
1992 out:
1993 	return ret;
1994 }
1995 
unuse_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)1996 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1997 				unsigned long addr, unsigned long end,
1998 				unsigned int type, bool frontswap,
1999 				unsigned long *fs_pages_to_unuse)
2000 {
2001 	pmd_t *pmd;
2002 	unsigned long next;
2003 	int ret;
2004 
2005 	pmd = pmd_offset(pud, addr);
2006 	do {
2007 		cond_resched();
2008 		next = pmd_addr_end(addr, end);
2009 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
2010 			continue;
2011 		ret = unuse_pte_range(vma, pmd, addr, next, type,
2012 				      frontswap, fs_pages_to_unuse);
2013 		if (ret)
2014 			return ret;
2015 	} while (pmd++, addr = next, addr != end);
2016 	return 0;
2017 }
2018 
unuse_pud_range(struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2019 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2020 				unsigned long addr, unsigned long end,
2021 				unsigned int type, bool frontswap,
2022 				unsigned long *fs_pages_to_unuse)
2023 {
2024 	pud_t *pud;
2025 	unsigned long next;
2026 	int ret;
2027 
2028 	pud = pud_offset(p4d, addr);
2029 	do {
2030 		next = pud_addr_end(addr, end);
2031 		if (pud_none_or_clear_bad(pud))
2032 			continue;
2033 		ret = unuse_pmd_range(vma, pud, addr, next, type,
2034 				      frontswap, fs_pages_to_unuse);
2035 		if (ret)
2036 			return ret;
2037 	} while (pud++, addr = next, addr != end);
2038 	return 0;
2039 }
2040 
unuse_p4d_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2041 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2042 				unsigned long addr, unsigned long end,
2043 				unsigned int type, bool frontswap,
2044 				unsigned long *fs_pages_to_unuse)
2045 {
2046 	p4d_t *p4d;
2047 	unsigned long next;
2048 	int ret;
2049 
2050 	p4d = p4d_offset(pgd, addr);
2051 	do {
2052 		next = p4d_addr_end(addr, end);
2053 		if (p4d_none_or_clear_bad(p4d))
2054 			continue;
2055 		ret = unuse_pud_range(vma, p4d, addr, next, type,
2056 				      frontswap, fs_pages_to_unuse);
2057 		if (ret)
2058 			return ret;
2059 	} while (p4d++, addr = next, addr != end);
2060 	return 0;
2061 }
2062 
unuse_vma(struct vm_area_struct * vma,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2063 static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
2064 		     bool frontswap, unsigned long *fs_pages_to_unuse)
2065 {
2066 	pgd_t *pgd;
2067 	unsigned long addr, end, next;
2068 	int ret;
2069 
2070 	addr = vma->vm_start;
2071 	end = vma->vm_end;
2072 
2073 	pgd = pgd_offset(vma->vm_mm, addr);
2074 	do {
2075 		next = pgd_addr_end(addr, end);
2076 		if (pgd_none_or_clear_bad(pgd))
2077 			continue;
2078 		ret = unuse_p4d_range(vma, pgd, addr, next, type,
2079 				      frontswap, fs_pages_to_unuse);
2080 		if (ret)
2081 			return ret;
2082 	} while (pgd++, addr = next, addr != end);
2083 	return 0;
2084 }
2085 
unuse_mm(struct mm_struct * mm,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2086 static int unuse_mm(struct mm_struct *mm, unsigned int type,
2087 		    bool frontswap, unsigned long *fs_pages_to_unuse)
2088 {
2089 	struct vm_area_struct *vma;
2090 	int ret = 0;
2091 
2092 	down_read(&mm->mmap_sem);
2093 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
2094 		if (vma->anon_vma) {
2095 			ret = unuse_vma(vma, type, frontswap,
2096 					fs_pages_to_unuse);
2097 			if (ret)
2098 				break;
2099 		}
2100 		cond_resched();
2101 	}
2102 	up_read(&mm->mmap_sem);
2103 	return ret;
2104 }
2105 
2106 /*
2107  * Scan swap_map (or frontswap_map if frontswap parameter is true)
2108  * from current position to next entry still in use. Return 0
2109  * if there are no inuse entries after prev till end of the map.
2110  */
find_next_to_unuse(struct swap_info_struct * si,unsigned int prev,bool frontswap)2111 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2112 					unsigned int prev, bool frontswap)
2113 {
2114 	unsigned int i;
2115 	unsigned char count;
2116 
2117 	/*
2118 	 * No need for swap_lock here: we're just looking
2119 	 * for whether an entry is in use, not modifying it; false
2120 	 * hits are okay, and sys_swapoff() has already prevented new
2121 	 * allocations from this area (while holding swap_lock).
2122 	 */
2123 	for (i = prev + 1; i < si->max; i++) {
2124 		count = READ_ONCE(si->swap_map[i]);
2125 		if (count && swap_count(count) != SWAP_MAP_BAD)
2126 			if (!frontswap || frontswap_test(si, i))
2127 				break;
2128 		if ((i % LATENCY_LIMIT) == 0)
2129 			cond_resched();
2130 	}
2131 
2132 	if (i == si->max)
2133 		i = 0;
2134 
2135 	return i;
2136 }
2137 
2138 /*
2139  * If the boolean frontswap is true, only unuse pages_to_unuse pages;
2140  * pages_to_unuse==0 means all pages; ignored if frontswap is false
2141  */
try_to_unuse(unsigned int type,bool frontswap,unsigned long pages_to_unuse)2142 int try_to_unuse(unsigned int type, bool frontswap,
2143 		 unsigned long pages_to_unuse)
2144 {
2145 	struct mm_struct *prev_mm;
2146 	struct mm_struct *mm;
2147 	struct list_head *p;
2148 	int retval = 0;
2149 	struct swap_info_struct *si = swap_info[type];
2150 	struct page *page;
2151 	swp_entry_t entry;
2152 	unsigned int i;
2153 
2154 	if (!READ_ONCE(si->inuse_pages))
2155 		return 0;
2156 
2157 	if (!frontswap)
2158 		pages_to_unuse = 0;
2159 
2160 retry:
2161 	retval = shmem_unuse(type, frontswap, &pages_to_unuse);
2162 	if (retval)
2163 		goto out;
2164 
2165 	prev_mm = &init_mm;
2166 	mmget(prev_mm);
2167 
2168 	spin_lock(&mmlist_lock);
2169 	p = &init_mm.mmlist;
2170 	while (READ_ONCE(si->inuse_pages) &&
2171 	       !signal_pending(current) &&
2172 	       (p = p->next) != &init_mm.mmlist) {
2173 
2174 		mm = list_entry(p, struct mm_struct, mmlist);
2175 		if (!mmget_not_zero(mm))
2176 			continue;
2177 		spin_unlock(&mmlist_lock);
2178 		mmput(prev_mm);
2179 		prev_mm = mm;
2180 		retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
2181 
2182 		if (retval) {
2183 			mmput(prev_mm);
2184 			goto out;
2185 		}
2186 
2187 		/*
2188 		 * Make sure that we aren't completely killing
2189 		 * interactive performance.
2190 		 */
2191 		cond_resched();
2192 		spin_lock(&mmlist_lock);
2193 	}
2194 	spin_unlock(&mmlist_lock);
2195 
2196 	mmput(prev_mm);
2197 
2198 	i = 0;
2199 	while (READ_ONCE(si->inuse_pages) &&
2200 	       !signal_pending(current) &&
2201 	       (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2202 
2203 		entry = swp_entry(type, i);
2204 		page = find_get_page(swap_address_space(entry), i);
2205 		if (!page)
2206 			continue;
2207 
2208 		/*
2209 		 * It is conceivable that a racing task removed this page from
2210 		 * swap cache just before we acquired the page lock. The page
2211 		 * might even be back in swap cache on another swap area. But
2212 		 * that is okay, try_to_free_swap() only removes stale pages.
2213 		 */
2214 		lock_page(page);
2215 		wait_on_page_writeback(page);
2216 		try_to_free_swap(page);
2217 		unlock_page(page);
2218 		put_page(page);
2219 
2220 		/*
2221 		 * For frontswap, we just need to unuse pages_to_unuse, if
2222 		 * it was specified. Need not check frontswap again here as
2223 		 * we already zeroed out pages_to_unuse if not frontswap.
2224 		 */
2225 		if (pages_to_unuse && --pages_to_unuse == 0)
2226 			goto out;
2227 	}
2228 
2229 	/*
2230 	 * Lets check again to see if there are still swap entries in the map.
2231 	 * If yes, we would need to do retry the unuse logic again.
2232 	 * Under global memory pressure, swap entries can be reinserted back
2233 	 * into process space after the mmlist loop above passes over them.
2234 	 *
2235 	 * Limit the number of retries? No: when mmget_not_zero() above fails,
2236 	 * that mm is likely to be freeing swap from exit_mmap(), which proceeds
2237 	 * at its own independent pace; and even shmem_writepage() could have
2238 	 * been preempted after get_swap_page(), temporarily hiding that swap.
2239 	 * It's easy and robust (though cpu-intensive) just to keep retrying.
2240 	 */
2241 	if (READ_ONCE(si->inuse_pages)) {
2242 		if (!signal_pending(current))
2243 			goto retry;
2244 		retval = -EINTR;
2245 	}
2246 out:
2247 	return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2248 }
2249 
2250 /*
2251  * After a successful try_to_unuse, if no swap is now in use, we know
2252  * we can empty the mmlist.  swap_lock must be held on entry and exit.
2253  * Note that mmlist_lock nests inside swap_lock, and an mm must be
2254  * added to the mmlist just after page_duplicate - before would be racy.
2255  */
drain_mmlist(void)2256 static void drain_mmlist(void)
2257 {
2258 	struct list_head *p, *next;
2259 	unsigned int type;
2260 
2261 	for (type = 0; type < nr_swapfiles; type++)
2262 		if (swap_info[type]->inuse_pages)
2263 			return;
2264 	spin_lock(&mmlist_lock);
2265 	list_for_each_safe(p, next, &init_mm.mmlist)
2266 		list_del_init(p);
2267 	spin_unlock(&mmlist_lock);
2268 }
2269 
2270 /*
2271  * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
2272  * corresponds to page offset for the specified swap entry.
2273  * Note that the type of this function is sector_t, but it returns page offset
2274  * into the bdev, not sector offset.
2275  */
map_swap_entry(swp_entry_t entry,struct block_device ** bdev)2276 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
2277 {
2278 	struct swap_info_struct *sis;
2279 	struct swap_extent *se;
2280 	pgoff_t offset;
2281 
2282 	sis = swp_swap_info(entry);
2283 	*bdev = sis->bdev;
2284 
2285 	offset = swp_offset(entry);
2286 	se = offset_to_swap_extent(sis, offset);
2287 	return se->start_block + (offset - se->start_page);
2288 }
2289 
2290 /*
2291  * Returns the page offset into bdev for the specified page's swap entry.
2292  */
map_swap_page(struct page * page,struct block_device ** bdev)2293 sector_t map_swap_page(struct page *page, struct block_device **bdev)
2294 {
2295 	swp_entry_t entry;
2296 	entry.val = page_private(page);
2297 	return map_swap_entry(entry, bdev);
2298 }
2299 
2300 /*
2301  * Free all of a swapdev's extent information
2302  */
destroy_swap_extents(struct swap_info_struct * sis)2303 static void destroy_swap_extents(struct swap_info_struct *sis)
2304 {
2305 	while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2306 		struct rb_node *rb = sis->swap_extent_root.rb_node;
2307 		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2308 
2309 		rb_erase(rb, &sis->swap_extent_root);
2310 		kfree(se);
2311 	}
2312 
2313 	if (sis->flags & SWP_ACTIVATED) {
2314 		struct file *swap_file = sis->swap_file;
2315 		struct address_space *mapping = swap_file->f_mapping;
2316 
2317 		sis->flags &= ~SWP_ACTIVATED;
2318 		if (mapping->a_ops->swap_deactivate)
2319 			mapping->a_ops->swap_deactivate(swap_file);
2320 	}
2321 }
2322 
2323 /*
2324  * Add a block range (and the corresponding page range) into this swapdev's
2325  * extent tree.
2326  *
2327  * This function rather assumes that it is called in ascending page order.
2328  */
2329 int
add_swap_extent(struct swap_info_struct * sis,unsigned long start_page,unsigned long nr_pages,sector_t start_block)2330 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2331 		unsigned long nr_pages, sector_t start_block)
2332 {
2333 	struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2334 	struct swap_extent *se;
2335 	struct swap_extent *new_se;
2336 
2337 	/*
2338 	 * place the new node at the right most since the
2339 	 * function is called in ascending page order.
2340 	 */
2341 	while (*link) {
2342 		parent = *link;
2343 		link = &parent->rb_right;
2344 	}
2345 
2346 	if (parent) {
2347 		se = rb_entry(parent, struct swap_extent, rb_node);
2348 		BUG_ON(se->start_page + se->nr_pages != start_page);
2349 		if (se->start_block + se->nr_pages == start_block) {
2350 			/* Merge it */
2351 			se->nr_pages += nr_pages;
2352 			return 0;
2353 		}
2354 	}
2355 
2356 	/* No merge, insert a new extent. */
2357 	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2358 	if (new_se == NULL)
2359 		return -ENOMEM;
2360 	new_se->start_page = start_page;
2361 	new_se->nr_pages = nr_pages;
2362 	new_se->start_block = start_block;
2363 
2364 	rb_link_node(&new_se->rb_node, parent, link);
2365 	rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2366 	return 1;
2367 }
2368 EXPORT_SYMBOL_GPL(add_swap_extent);
2369 
2370 /*
2371  * A `swap extent' is a simple thing which maps a contiguous range of pages
2372  * onto a contiguous range of disk blocks.  An ordered list of swap extents
2373  * is built at swapon time and is then used at swap_writepage/swap_readpage
2374  * time for locating where on disk a page belongs.
2375  *
2376  * If the swapfile is an S_ISBLK block device, a single extent is installed.
2377  * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2378  * swap files identically.
2379  *
2380  * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2381  * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2382  * swapfiles are handled *identically* after swapon time.
2383  *
2384  * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2385  * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
2386  * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2387  * requirements, they are simply tossed out - we will never use those blocks
2388  * for swapping.
2389  *
2390  * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2391  * prevents users from writing to the swap device, which will corrupt memory.
2392  *
2393  * The amount of disk space which a single swap extent represents varies.
2394  * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2395  * extents in the list.  To avoid much list walking, we cache the previous
2396  * search location in `curr_swap_extent', and start new searches from there.
2397  * This is extremely effective.  The average number of iterations in
2398  * map_swap_page() has been measured at about 0.3 per page.  - akpm.
2399  */
setup_swap_extents(struct swap_info_struct * sis,sector_t * span)2400 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2401 {
2402 	struct file *swap_file = sis->swap_file;
2403 	struct address_space *mapping = swap_file->f_mapping;
2404 	struct inode *inode = mapping->host;
2405 	int ret;
2406 
2407 	if (S_ISBLK(inode->i_mode)) {
2408 		ret = add_swap_extent(sis, 0, sis->max, 0);
2409 		*span = sis->pages;
2410 		return ret;
2411 	}
2412 
2413 	if (mapping->a_ops->swap_activate) {
2414 		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2415 		if (ret >= 0)
2416 			sis->flags |= SWP_ACTIVATED;
2417 		if (!ret) {
2418 			sis->flags |= SWP_FS;
2419 			ret = add_swap_extent(sis, 0, sis->max, 0);
2420 			*span = sis->pages;
2421 		}
2422 		return ret;
2423 	}
2424 
2425 	return generic_swapfile_activate(sis, swap_file, span);
2426 }
2427 
swap_node(struct swap_info_struct * p)2428 static int swap_node(struct swap_info_struct *p)
2429 {
2430 	struct block_device *bdev;
2431 
2432 	if (p->bdev)
2433 		bdev = p->bdev;
2434 	else
2435 		bdev = p->swap_file->f_inode->i_sb->s_bdev;
2436 
2437 	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2438 }
2439 
setup_swap_info(struct swap_info_struct * p,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info)2440 static void setup_swap_info(struct swap_info_struct *p, int prio,
2441 			    unsigned char *swap_map,
2442 			    struct swap_cluster_info *cluster_info)
2443 {
2444 	int i;
2445 
2446 	if (prio >= 0)
2447 		p->prio = prio;
2448 	else
2449 		p->prio = --least_priority;
2450 	/*
2451 	 * the plist prio is negated because plist ordering is
2452 	 * low-to-high, while swap ordering is high-to-low
2453 	 */
2454 	p->list.prio = -p->prio;
2455 	for_each_node(i) {
2456 		if (p->prio >= 0)
2457 			p->avail_lists[i].prio = -p->prio;
2458 		else {
2459 			if (swap_node(p) == i)
2460 				p->avail_lists[i].prio = 1;
2461 			else
2462 				p->avail_lists[i].prio = -p->prio;
2463 		}
2464 	}
2465 	p->swap_map = swap_map;
2466 	p->cluster_info = cluster_info;
2467 }
2468 
_enable_swap_info(struct swap_info_struct * p)2469 static void _enable_swap_info(struct swap_info_struct *p)
2470 {
2471 	p->flags |= SWP_WRITEOK | SWP_VALID;
2472 	atomic_long_add(p->pages, &nr_swap_pages);
2473 	total_swap_pages += p->pages;
2474 
2475 	assert_spin_locked(&swap_lock);
2476 	/*
2477 	 * both lists are plists, and thus priority ordered.
2478 	 * swap_active_head needs to be priority ordered for swapoff(),
2479 	 * which on removal of any swap_info_struct with an auto-assigned
2480 	 * (i.e. negative) priority increments the auto-assigned priority
2481 	 * of any lower-priority swap_info_structs.
2482 	 * swap_avail_head needs to be priority ordered for get_swap_page(),
2483 	 * which allocates swap pages from the highest available priority
2484 	 * swap_info_struct.
2485 	 */
2486 	plist_add(&p->list, &swap_active_head);
2487 	add_to_avail_list(p);
2488 }
2489 
enable_swap_info(struct swap_info_struct * p,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long * frontswap_map)2490 static void enable_swap_info(struct swap_info_struct *p, int prio,
2491 				unsigned char *swap_map,
2492 				struct swap_cluster_info *cluster_info,
2493 				unsigned long *frontswap_map)
2494 {
2495 	frontswap_init(p->type, frontswap_map);
2496 	spin_lock(&swap_lock);
2497 	spin_lock(&p->lock);
2498 	setup_swap_info(p, prio, swap_map, cluster_info);
2499 	spin_unlock(&p->lock);
2500 	spin_unlock(&swap_lock);
2501 	/*
2502 	 * Guarantee swap_map, cluster_info, etc. fields are valid
2503 	 * between get/put_swap_device() if SWP_VALID bit is set
2504 	 */
2505 	synchronize_rcu();
2506 	spin_lock(&swap_lock);
2507 	spin_lock(&p->lock);
2508 	_enable_swap_info(p);
2509 	spin_unlock(&p->lock);
2510 	spin_unlock(&swap_lock);
2511 }
2512 
reinsert_swap_info(struct swap_info_struct * p)2513 static void reinsert_swap_info(struct swap_info_struct *p)
2514 {
2515 	spin_lock(&swap_lock);
2516 	spin_lock(&p->lock);
2517 	setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2518 	_enable_swap_info(p);
2519 	spin_unlock(&p->lock);
2520 	spin_unlock(&swap_lock);
2521 }
2522 
has_usable_swap(void)2523 bool has_usable_swap(void)
2524 {
2525 	bool ret = true;
2526 
2527 	spin_lock(&swap_lock);
2528 	if (plist_head_empty(&swap_active_head))
2529 		ret = false;
2530 	spin_unlock(&swap_lock);
2531 	return ret;
2532 }
2533 
SYSCALL_DEFINE1(swapoff,const char __user *,specialfile)2534 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2535 {
2536 	struct swap_info_struct *p = NULL;
2537 	unsigned char *swap_map;
2538 	struct swap_cluster_info *cluster_info;
2539 	unsigned long *frontswap_map;
2540 	struct file *swap_file, *victim;
2541 	struct address_space *mapping;
2542 	struct inode *inode;
2543 	struct filename *pathname;
2544 	int err, found = 0;
2545 	unsigned int old_block_size;
2546 
2547 	if (!capable(CAP_SYS_ADMIN))
2548 		return -EPERM;
2549 
2550 	BUG_ON(!current->mm);
2551 
2552 	pathname = getname(specialfile);
2553 	if (IS_ERR(pathname))
2554 		return PTR_ERR(pathname);
2555 
2556 	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2557 	err = PTR_ERR(victim);
2558 	if (IS_ERR(victim))
2559 		goto out;
2560 
2561 	mapping = victim->f_mapping;
2562 	spin_lock(&swap_lock);
2563 	plist_for_each_entry(p, &swap_active_head, list) {
2564 		if (p->flags & SWP_WRITEOK) {
2565 			if (p->swap_file->f_mapping == mapping) {
2566 				found = 1;
2567 				break;
2568 			}
2569 		}
2570 	}
2571 	if (!found) {
2572 		err = -EINVAL;
2573 		spin_unlock(&swap_lock);
2574 		goto out_dput;
2575 	}
2576 	if (!security_vm_enough_memory_mm(current->mm, p->pages))
2577 		vm_unacct_memory(p->pages);
2578 	else {
2579 		err = -ENOMEM;
2580 		spin_unlock(&swap_lock);
2581 		goto out_dput;
2582 	}
2583 	spin_lock(&p->lock);
2584 	del_from_avail_list(p);
2585 	if (p->prio < 0) {
2586 		struct swap_info_struct *si = p;
2587 		int nid;
2588 
2589 		plist_for_each_entry_continue(si, &swap_active_head, list) {
2590 			si->prio++;
2591 			si->list.prio--;
2592 			for_each_node(nid) {
2593 				if (si->avail_lists[nid].prio != 1)
2594 					si->avail_lists[nid].prio--;
2595 			}
2596 		}
2597 		least_priority++;
2598 	}
2599 	plist_del(&p->list, &swap_active_head);
2600 	atomic_long_sub(p->pages, &nr_swap_pages);
2601 	total_swap_pages -= p->pages;
2602 	p->flags &= ~SWP_WRITEOK;
2603 	spin_unlock(&p->lock);
2604 	spin_unlock(&swap_lock);
2605 
2606 	disable_swap_slots_cache_lock();
2607 
2608 	set_current_oom_origin();
2609 	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
2610 	clear_current_oom_origin();
2611 
2612 	if (err) {
2613 		/* re-insert swap space back into swap_list */
2614 		reinsert_swap_info(p);
2615 		reenable_swap_slots_cache_unlock();
2616 		goto out_dput;
2617 	}
2618 
2619 	reenable_swap_slots_cache_unlock();
2620 
2621 	spin_lock(&swap_lock);
2622 	spin_lock(&p->lock);
2623 	p->flags &= ~SWP_VALID;		/* mark swap device as invalid */
2624 	spin_unlock(&p->lock);
2625 	spin_unlock(&swap_lock);
2626 	/*
2627 	 * wait for swap operations protected by get/put_swap_device()
2628 	 * to complete
2629 	 */
2630 	synchronize_rcu();
2631 
2632 	flush_work(&p->discard_work);
2633 
2634 	destroy_swap_extents(p);
2635 	if (p->flags & SWP_CONTINUED)
2636 		free_swap_count_continuations(p);
2637 
2638 	if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2639 		atomic_dec(&nr_rotate_swap);
2640 
2641 	mutex_lock(&swapon_mutex);
2642 	spin_lock(&swap_lock);
2643 	spin_lock(&p->lock);
2644 	drain_mmlist();
2645 
2646 	/* wait for anyone still in scan_swap_map */
2647 	p->highest_bit = 0;		/* cuts scans short */
2648 	while (p->flags >= SWP_SCANNING) {
2649 		spin_unlock(&p->lock);
2650 		spin_unlock(&swap_lock);
2651 		schedule_timeout_uninterruptible(1);
2652 		spin_lock(&swap_lock);
2653 		spin_lock(&p->lock);
2654 	}
2655 
2656 	swap_file = p->swap_file;
2657 	old_block_size = p->old_block_size;
2658 	p->swap_file = NULL;
2659 	p->max = 0;
2660 	swap_map = p->swap_map;
2661 	p->swap_map = NULL;
2662 	cluster_info = p->cluster_info;
2663 	p->cluster_info = NULL;
2664 	frontswap_map = frontswap_map_get(p);
2665 	spin_unlock(&p->lock);
2666 	spin_unlock(&swap_lock);
2667 	frontswap_invalidate_area(p->type);
2668 	frontswap_map_set(p, NULL);
2669 	mutex_unlock(&swapon_mutex);
2670 	free_percpu(p->percpu_cluster);
2671 	p->percpu_cluster = NULL;
2672 	vfree(swap_map);
2673 	kvfree(cluster_info);
2674 	kvfree(frontswap_map);
2675 	/* Destroy swap account information */
2676 	swap_cgroup_swapoff(p->type);
2677 	exit_swap_address_space(p->type);
2678 
2679 	inode = mapping->host;
2680 	if (S_ISBLK(inode->i_mode)) {
2681 		struct block_device *bdev = I_BDEV(inode);
2682 
2683 		set_blocksize(bdev, old_block_size);
2684 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2685 	}
2686 
2687 	inode_lock(inode);
2688 	inode->i_flags &= ~S_SWAPFILE;
2689 	inode_unlock(inode);
2690 	filp_close(swap_file, NULL);
2691 
2692 	/*
2693 	 * Clear the SWP_USED flag after all resources are freed so that swapon
2694 	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2695 	 * not hold p->lock after we cleared its SWP_WRITEOK.
2696 	 */
2697 	spin_lock(&swap_lock);
2698 	p->flags = 0;
2699 	spin_unlock(&swap_lock);
2700 
2701 	err = 0;
2702 	atomic_inc(&proc_poll_event);
2703 	wake_up_interruptible(&proc_poll_wait);
2704 
2705 out_dput:
2706 	filp_close(victim, NULL);
2707 out:
2708 	putname(pathname);
2709 	return err;
2710 }
2711 
2712 #ifdef CONFIG_PROC_FS
swaps_poll(struct file * file,poll_table * wait)2713 static __poll_t swaps_poll(struct file *file, poll_table *wait)
2714 {
2715 	struct seq_file *seq = file->private_data;
2716 
2717 	poll_wait(file, &proc_poll_wait, wait);
2718 
2719 	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2720 		seq->poll_event = atomic_read(&proc_poll_event);
2721 		return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2722 	}
2723 
2724 	return EPOLLIN | EPOLLRDNORM;
2725 }
2726 
2727 /* iterator */
swap_start(struct seq_file * swap,loff_t * pos)2728 static void *swap_start(struct seq_file *swap, loff_t *pos)
2729 {
2730 	struct swap_info_struct *si;
2731 	int type;
2732 	loff_t l = *pos;
2733 
2734 	mutex_lock(&swapon_mutex);
2735 
2736 	if (!l)
2737 		return SEQ_START_TOKEN;
2738 
2739 	for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2740 		if (!(si->flags & SWP_USED) || !si->swap_map)
2741 			continue;
2742 		if (!--l)
2743 			return si;
2744 	}
2745 
2746 	return NULL;
2747 }
2748 
swap_next(struct seq_file * swap,void * v,loff_t * pos)2749 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2750 {
2751 	struct swap_info_struct *si = v;
2752 	int type;
2753 
2754 	if (v == SEQ_START_TOKEN)
2755 		type = 0;
2756 	else
2757 		type = si->type + 1;
2758 
2759 	++(*pos);
2760 	for (; (si = swap_type_to_swap_info(type)); type++) {
2761 		if (!(si->flags & SWP_USED) || !si->swap_map)
2762 			continue;
2763 		return si;
2764 	}
2765 
2766 	return NULL;
2767 }
2768 
swap_stop(struct seq_file * swap,void * v)2769 static void swap_stop(struct seq_file *swap, void *v)
2770 {
2771 	mutex_unlock(&swapon_mutex);
2772 }
2773 
swap_show(struct seq_file * swap,void * v)2774 static int swap_show(struct seq_file *swap, void *v)
2775 {
2776 	struct swap_info_struct *si = v;
2777 	struct file *file;
2778 	int len;
2779 
2780 	if (si == SEQ_START_TOKEN) {
2781 		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2782 		return 0;
2783 	}
2784 
2785 	file = si->swap_file;
2786 	len = seq_file_path(swap, file, " \t\n\\");
2787 	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
2788 			len < 40 ? 40 - len : 1, " ",
2789 			S_ISBLK(file_inode(file)->i_mode) ?
2790 				"partition" : "file\t",
2791 			si->pages << (PAGE_SHIFT - 10),
2792 			si->inuse_pages << (PAGE_SHIFT - 10),
2793 			si->prio);
2794 	return 0;
2795 }
2796 
2797 static const struct seq_operations swaps_op = {
2798 	.start =	swap_start,
2799 	.next =		swap_next,
2800 	.stop =		swap_stop,
2801 	.show =		swap_show
2802 };
2803 
swaps_open(struct inode * inode,struct file * file)2804 static int swaps_open(struct inode *inode, struct file *file)
2805 {
2806 	struct seq_file *seq;
2807 	int ret;
2808 
2809 	ret = seq_open(file, &swaps_op);
2810 	if (ret)
2811 		return ret;
2812 
2813 	seq = file->private_data;
2814 	seq->poll_event = atomic_read(&proc_poll_event);
2815 	return 0;
2816 }
2817 
2818 static const struct file_operations proc_swaps_operations = {
2819 	.open		= swaps_open,
2820 	.read		= seq_read,
2821 	.llseek		= seq_lseek,
2822 	.release	= seq_release,
2823 	.poll		= swaps_poll,
2824 };
2825 
procswaps_init(void)2826 static int __init procswaps_init(void)
2827 {
2828 	proc_create("swaps", 0, NULL, &proc_swaps_operations);
2829 	return 0;
2830 }
2831 __initcall(procswaps_init);
2832 #endif /* CONFIG_PROC_FS */
2833 
2834 #ifdef MAX_SWAPFILES_CHECK
max_swapfiles_check(void)2835 static int __init max_swapfiles_check(void)
2836 {
2837 	MAX_SWAPFILES_CHECK();
2838 	return 0;
2839 }
2840 late_initcall(max_swapfiles_check);
2841 #endif
2842 
alloc_swap_info(void)2843 static struct swap_info_struct *alloc_swap_info(void)
2844 {
2845 	struct swap_info_struct *p;
2846 	struct swap_info_struct *defer = NULL;
2847 	unsigned int type;
2848 	int i;
2849 
2850 	p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2851 	if (!p)
2852 		return ERR_PTR(-ENOMEM);
2853 
2854 	spin_lock(&swap_lock);
2855 	for (type = 0; type < nr_swapfiles; type++) {
2856 		if (!(swap_info[type]->flags & SWP_USED))
2857 			break;
2858 	}
2859 	if (type >= MAX_SWAPFILES) {
2860 		spin_unlock(&swap_lock);
2861 		kvfree(p);
2862 		return ERR_PTR(-EPERM);
2863 	}
2864 	if (type >= nr_swapfiles) {
2865 		p->type = type;
2866 		WRITE_ONCE(swap_info[type], p);
2867 		/*
2868 		 * Write swap_info[type] before nr_swapfiles, in case a
2869 		 * racing procfs swap_start() or swap_next() is reading them.
2870 		 * (We never shrink nr_swapfiles, we never free this entry.)
2871 		 */
2872 		smp_wmb();
2873 		WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
2874 	} else {
2875 		defer = p;
2876 		p = swap_info[type];
2877 		/*
2878 		 * Do not memset this entry: a racing procfs swap_next()
2879 		 * would be relying on p->type to remain valid.
2880 		 */
2881 	}
2882 	p->swap_extent_root = RB_ROOT;
2883 	plist_node_init(&p->list, 0);
2884 	for_each_node(i)
2885 		plist_node_init(&p->avail_lists[i], 0);
2886 	p->flags = SWP_USED;
2887 	spin_unlock(&swap_lock);
2888 	kvfree(defer);
2889 	spin_lock_init(&p->lock);
2890 	spin_lock_init(&p->cont_lock);
2891 
2892 	return p;
2893 }
2894 
claim_swapfile(struct swap_info_struct * p,struct inode * inode)2895 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2896 {
2897 	int error;
2898 
2899 	if (S_ISBLK(inode->i_mode)) {
2900 		p->bdev = bdgrab(I_BDEV(inode));
2901 		error = blkdev_get(p->bdev,
2902 				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2903 		if (error < 0) {
2904 			p->bdev = NULL;
2905 			return error;
2906 		}
2907 		p->old_block_size = block_size(p->bdev);
2908 		error = set_blocksize(p->bdev, PAGE_SIZE);
2909 		if (error < 0)
2910 			return error;
2911 		p->flags |= SWP_BLKDEV;
2912 	} else if (S_ISREG(inode->i_mode)) {
2913 		p->bdev = inode->i_sb->s_bdev;
2914 	}
2915 
2916 	return 0;
2917 }
2918 
2919 
2920 /*
2921  * Find out how many pages are allowed for a single swap device. There
2922  * are two limiting factors:
2923  * 1) the number of bits for the swap offset in the swp_entry_t type, and
2924  * 2) the number of bits in the swap pte, as defined by the different
2925  * architectures.
2926  *
2927  * In order to find the largest possible bit mask, a swap entry with
2928  * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2929  * decoded to a swp_entry_t again, and finally the swap offset is
2930  * extracted.
2931  *
2932  * This will mask all the bits from the initial ~0UL mask that can't
2933  * be encoded in either the swp_entry_t or the architecture definition
2934  * of a swap pte.
2935  */
generic_max_swapfile_size(void)2936 unsigned long generic_max_swapfile_size(void)
2937 {
2938 	return swp_offset(pte_to_swp_entry(
2939 			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2940 }
2941 
2942 /* Can be overridden by an architecture for additional checks. */
max_swapfile_size(void)2943 __weak unsigned long max_swapfile_size(void)
2944 {
2945 	return generic_max_swapfile_size();
2946 }
2947 
read_swap_header(struct swap_info_struct * p,union swap_header * swap_header,struct inode * inode)2948 static unsigned long read_swap_header(struct swap_info_struct *p,
2949 					union swap_header *swap_header,
2950 					struct inode *inode)
2951 {
2952 	int i;
2953 	unsigned long maxpages;
2954 	unsigned long swapfilepages;
2955 	unsigned long last_page;
2956 
2957 	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2958 		pr_err("Unable to find swap-space signature\n");
2959 		return 0;
2960 	}
2961 
2962 	/* swap partition endianess hack... */
2963 	if (swab32(swap_header->info.version) == 1) {
2964 		swab32s(&swap_header->info.version);
2965 		swab32s(&swap_header->info.last_page);
2966 		swab32s(&swap_header->info.nr_badpages);
2967 		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2968 			return 0;
2969 		for (i = 0; i < swap_header->info.nr_badpages; i++)
2970 			swab32s(&swap_header->info.badpages[i]);
2971 	}
2972 	/* Check the swap header's sub-version */
2973 	if (swap_header->info.version != 1) {
2974 		pr_warn("Unable to handle swap header version %d\n",
2975 			swap_header->info.version);
2976 		return 0;
2977 	}
2978 
2979 	p->lowest_bit  = 1;
2980 	p->cluster_next = 1;
2981 	p->cluster_nr = 0;
2982 
2983 	maxpages = max_swapfile_size();
2984 	last_page = swap_header->info.last_page;
2985 	if (!last_page) {
2986 		pr_warn("Empty swap-file\n");
2987 		return 0;
2988 	}
2989 	if (last_page > maxpages) {
2990 		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2991 			maxpages << (PAGE_SHIFT - 10),
2992 			last_page << (PAGE_SHIFT - 10));
2993 	}
2994 	if (maxpages > last_page) {
2995 		maxpages = last_page + 1;
2996 		/* p->max is an unsigned int: don't overflow it */
2997 		if ((unsigned int)maxpages == 0)
2998 			maxpages = UINT_MAX;
2999 	}
3000 	p->highest_bit = maxpages - 1;
3001 
3002 	if (!maxpages)
3003 		return 0;
3004 	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3005 	if (swapfilepages && maxpages > swapfilepages) {
3006 		pr_warn("Swap area shorter than signature indicates\n");
3007 		return 0;
3008 	}
3009 	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3010 		return 0;
3011 	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3012 		return 0;
3013 
3014 	return maxpages;
3015 }
3016 
3017 #define SWAP_CLUSTER_INFO_COLS						\
3018 	DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3019 #define SWAP_CLUSTER_SPACE_COLS						\
3020 	DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3021 #define SWAP_CLUSTER_COLS						\
3022 	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3023 
setup_swap_map_and_extents(struct swap_info_struct * p,union swap_header * swap_header,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long maxpages,sector_t * span)3024 static int setup_swap_map_and_extents(struct swap_info_struct *p,
3025 					union swap_header *swap_header,
3026 					unsigned char *swap_map,
3027 					struct swap_cluster_info *cluster_info,
3028 					unsigned long maxpages,
3029 					sector_t *span)
3030 {
3031 	unsigned int j, k;
3032 	unsigned int nr_good_pages;
3033 	int nr_extents;
3034 	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3035 	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3036 	unsigned long i, idx;
3037 
3038 	nr_good_pages = maxpages - 1;	/* omit header page */
3039 
3040 	cluster_list_init(&p->free_clusters);
3041 	cluster_list_init(&p->discard_clusters);
3042 
3043 	for (i = 0; i < swap_header->info.nr_badpages; i++) {
3044 		unsigned int page_nr = swap_header->info.badpages[i];
3045 		if (page_nr == 0 || page_nr > swap_header->info.last_page)
3046 			return -EINVAL;
3047 		if (page_nr < maxpages) {
3048 			swap_map[page_nr] = SWAP_MAP_BAD;
3049 			nr_good_pages--;
3050 			/*
3051 			 * Haven't marked the cluster free yet, no list
3052 			 * operation involved
3053 			 */
3054 			inc_cluster_info_page(p, cluster_info, page_nr);
3055 		}
3056 	}
3057 
3058 	/* Haven't marked the cluster free yet, no list operation involved */
3059 	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3060 		inc_cluster_info_page(p, cluster_info, i);
3061 
3062 	if (nr_good_pages) {
3063 		swap_map[0] = SWAP_MAP_BAD;
3064 		/*
3065 		 * Not mark the cluster free yet, no list
3066 		 * operation involved
3067 		 */
3068 		inc_cluster_info_page(p, cluster_info, 0);
3069 		p->max = maxpages;
3070 		p->pages = nr_good_pages;
3071 		nr_extents = setup_swap_extents(p, span);
3072 		if (nr_extents < 0)
3073 			return nr_extents;
3074 		nr_good_pages = p->pages;
3075 	}
3076 	if (!nr_good_pages) {
3077 		pr_warn("Empty swap-file\n");
3078 		return -EINVAL;
3079 	}
3080 
3081 	if (!cluster_info)
3082 		return nr_extents;
3083 
3084 
3085 	/*
3086 	 * Reduce false cache line sharing between cluster_info and
3087 	 * sharing same address space.
3088 	 */
3089 	for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3090 		j = (k + col) % SWAP_CLUSTER_COLS;
3091 		for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3092 			idx = i * SWAP_CLUSTER_COLS + j;
3093 			if (idx >= nr_clusters)
3094 				continue;
3095 			if (cluster_count(&cluster_info[idx]))
3096 				continue;
3097 			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3098 			cluster_list_add_tail(&p->free_clusters, cluster_info,
3099 					      idx);
3100 		}
3101 	}
3102 	return nr_extents;
3103 }
3104 
3105 /*
3106  * Helper to sys_swapon determining if a given swap
3107  * backing device queue supports DISCARD operations.
3108  */
swap_discardable(struct swap_info_struct * si)3109 static bool swap_discardable(struct swap_info_struct *si)
3110 {
3111 	struct request_queue *q = bdev_get_queue(si->bdev);
3112 
3113 	if (!q || !blk_queue_discard(q))
3114 		return false;
3115 
3116 	return true;
3117 }
3118 
SYSCALL_DEFINE2(swapon,const char __user *,specialfile,int,swap_flags)3119 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3120 {
3121 	struct swap_info_struct *p;
3122 	struct filename *name;
3123 	struct file *swap_file = NULL;
3124 	struct address_space *mapping;
3125 	int prio;
3126 	int error;
3127 	union swap_header *swap_header;
3128 	int nr_extents;
3129 	sector_t span;
3130 	unsigned long maxpages;
3131 	unsigned char *swap_map = NULL;
3132 	struct swap_cluster_info *cluster_info = NULL;
3133 	unsigned long *frontswap_map = NULL;
3134 	struct page *page = NULL;
3135 	struct inode *inode = NULL;
3136 	bool inced_nr_rotate_swap = false;
3137 
3138 	if (swap_flags & ~SWAP_FLAGS_VALID)
3139 		return -EINVAL;
3140 
3141 	if (!capable(CAP_SYS_ADMIN))
3142 		return -EPERM;
3143 
3144 	if (!swap_avail_heads)
3145 		return -ENOMEM;
3146 
3147 	p = alloc_swap_info();
3148 	if (IS_ERR(p))
3149 		return PTR_ERR(p);
3150 
3151 	INIT_WORK(&p->discard_work, swap_discard_work);
3152 
3153 	name = getname(specialfile);
3154 	if (IS_ERR(name)) {
3155 		error = PTR_ERR(name);
3156 		name = NULL;
3157 		goto bad_swap;
3158 	}
3159 	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3160 	if (IS_ERR(swap_file)) {
3161 		error = PTR_ERR(swap_file);
3162 		swap_file = NULL;
3163 		goto bad_swap;
3164 	}
3165 
3166 	p->swap_file = swap_file;
3167 	mapping = swap_file->f_mapping;
3168 	inode = mapping->host;
3169 
3170 	error = claim_swapfile(p, inode);
3171 	if (unlikely(error))
3172 		goto bad_swap;
3173 
3174 	inode_lock(inode);
3175 	if (IS_SWAPFILE(inode)) {
3176 		error = -EBUSY;
3177 		goto bad_swap_unlock_inode;
3178 	}
3179 	/*
3180 	 * Read the swap header.
3181 	 */
3182 	if (!mapping->a_ops->readpage) {
3183 		error = -EINVAL;
3184 		goto bad_swap_unlock_inode;
3185 	}
3186 	page = read_mapping_page(mapping, 0, swap_file);
3187 	if (IS_ERR(page)) {
3188 		error = PTR_ERR(page);
3189 		goto bad_swap_unlock_inode;
3190 	}
3191 	swap_header = kmap(page);
3192 
3193 	maxpages = read_swap_header(p, swap_header, inode);
3194 	if (unlikely(!maxpages)) {
3195 		error = -EINVAL;
3196 		goto bad_swap_unlock_inode;
3197 	}
3198 
3199 	/* OK, set up the swap map and apply the bad block list */
3200 	swap_map = vzalloc(maxpages);
3201 	if (!swap_map) {
3202 		error = -ENOMEM;
3203 		goto bad_swap_unlock_inode;
3204 	}
3205 
3206 	if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
3207 		p->flags |= SWP_STABLE_WRITES;
3208 
3209 	if (bdi_cap_synchronous_io(inode_to_bdi(inode)))
3210 		p->flags |= SWP_SYNCHRONOUS_IO;
3211 
3212 	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3213 		int cpu;
3214 		unsigned long ci, nr_cluster;
3215 
3216 		p->flags |= SWP_SOLIDSTATE;
3217 		/*
3218 		 * select a random position to start with to help wear leveling
3219 		 * SSD
3220 		 */
3221 		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
3222 		nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3223 
3224 		cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3225 					GFP_KERNEL);
3226 		if (!cluster_info) {
3227 			error = -ENOMEM;
3228 			goto bad_swap_unlock_inode;
3229 		}
3230 
3231 		for (ci = 0; ci < nr_cluster; ci++)
3232 			spin_lock_init(&((cluster_info + ci)->lock));
3233 
3234 		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3235 		if (!p->percpu_cluster) {
3236 			error = -ENOMEM;
3237 			goto bad_swap_unlock_inode;
3238 		}
3239 		for_each_possible_cpu(cpu) {
3240 			struct percpu_cluster *cluster;
3241 			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3242 			cluster_set_null(&cluster->index);
3243 		}
3244 	} else {
3245 		atomic_inc(&nr_rotate_swap);
3246 		inced_nr_rotate_swap = true;
3247 	}
3248 
3249 	error = swap_cgroup_swapon(p->type, maxpages);
3250 	if (error)
3251 		goto bad_swap_unlock_inode;
3252 
3253 	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3254 		cluster_info, maxpages, &span);
3255 	if (unlikely(nr_extents < 0)) {
3256 		error = nr_extents;
3257 		goto bad_swap_unlock_inode;
3258 	}
3259 	/* frontswap enabled? set up bit-per-page map for frontswap */
3260 	if (IS_ENABLED(CONFIG_FRONTSWAP))
3261 		frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3262 					 sizeof(long),
3263 					 GFP_KERNEL);
3264 
3265 	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
3266 		/*
3267 		 * When discard is enabled for swap with no particular
3268 		 * policy flagged, we set all swap discard flags here in
3269 		 * order to sustain backward compatibility with older
3270 		 * swapon(8) releases.
3271 		 */
3272 		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3273 			     SWP_PAGE_DISCARD);
3274 
3275 		/*
3276 		 * By flagging sys_swapon, a sysadmin can tell us to
3277 		 * either do single-time area discards only, or to just
3278 		 * perform discards for released swap page-clusters.
3279 		 * Now it's time to adjust the p->flags accordingly.
3280 		 */
3281 		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3282 			p->flags &= ~SWP_PAGE_DISCARD;
3283 		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3284 			p->flags &= ~SWP_AREA_DISCARD;
3285 
3286 		/* issue a swapon-time discard if it's still required */
3287 		if (p->flags & SWP_AREA_DISCARD) {
3288 			int err = discard_swap(p);
3289 			if (unlikely(err))
3290 				pr_err("swapon: discard_swap(%p): %d\n",
3291 					p, err);
3292 		}
3293 	}
3294 
3295 	error = init_swap_address_space(p->type, maxpages);
3296 	if (error)
3297 		goto bad_swap_unlock_inode;
3298 
3299 	/*
3300 	 * Flush any pending IO and dirty mappings before we start using this
3301 	 * swap device.
3302 	 */
3303 	inode->i_flags |= S_SWAPFILE;
3304 	error = inode_drain_writes(inode);
3305 	if (error) {
3306 		inode->i_flags &= ~S_SWAPFILE;
3307 		goto free_swap_address_space;
3308 	}
3309 
3310 	mutex_lock(&swapon_mutex);
3311 	prio = -1;
3312 	if (swap_flags & SWAP_FLAG_PREFER)
3313 		prio =
3314 		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3315 	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3316 
3317 	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3318 		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3319 		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3320 		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3321 		(p->flags & SWP_DISCARDABLE) ? "D" : "",
3322 		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
3323 		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3324 		(frontswap_map) ? "FS" : "");
3325 
3326 	mutex_unlock(&swapon_mutex);
3327 	atomic_inc(&proc_poll_event);
3328 	wake_up_interruptible(&proc_poll_wait);
3329 
3330 	error = 0;
3331 	goto out;
3332 free_swap_address_space:
3333 	exit_swap_address_space(p->type);
3334 bad_swap_unlock_inode:
3335 	inode_unlock(inode);
3336 bad_swap:
3337 	free_percpu(p->percpu_cluster);
3338 	p->percpu_cluster = NULL;
3339 	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3340 		set_blocksize(p->bdev, p->old_block_size);
3341 		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3342 	}
3343 	inode = NULL;
3344 	destroy_swap_extents(p);
3345 	swap_cgroup_swapoff(p->type);
3346 	spin_lock(&swap_lock);
3347 	p->swap_file = NULL;
3348 	p->flags = 0;
3349 	spin_unlock(&swap_lock);
3350 	vfree(swap_map);
3351 	kvfree(cluster_info);
3352 	kvfree(frontswap_map);
3353 	if (inced_nr_rotate_swap)
3354 		atomic_dec(&nr_rotate_swap);
3355 	if (swap_file)
3356 		filp_close(swap_file, NULL);
3357 out:
3358 	if (page && !IS_ERR(page)) {
3359 		kunmap(page);
3360 		put_page(page);
3361 	}
3362 	if (name)
3363 		putname(name);
3364 	if (inode)
3365 		inode_unlock(inode);
3366 	if (!error)
3367 		enable_swap_slots_cache();
3368 	return error;
3369 }
3370 
si_swapinfo(struct sysinfo * val)3371 void si_swapinfo(struct sysinfo *val)
3372 {
3373 	unsigned int type;
3374 	unsigned long nr_to_be_unused = 0;
3375 
3376 	spin_lock(&swap_lock);
3377 	for (type = 0; type < nr_swapfiles; type++) {
3378 		struct swap_info_struct *si = swap_info[type];
3379 
3380 		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3381 			nr_to_be_unused += si->inuse_pages;
3382 	}
3383 	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3384 	val->totalswap = total_swap_pages + nr_to_be_unused;
3385 	spin_unlock(&swap_lock);
3386 }
3387 
3388 /*
3389  * Verify that a swap entry is valid and increment its swap map count.
3390  *
3391  * Returns error code in following case.
3392  * - success -> 0
3393  * - swp_entry is invalid -> EINVAL
3394  * - swp_entry is migration entry -> EINVAL
3395  * - swap-cache reference is requested but there is already one. -> EEXIST
3396  * - swap-cache reference is requested but the entry is not used. -> ENOENT
3397  * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3398  */
__swap_duplicate(swp_entry_t entry,unsigned char usage)3399 static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3400 {
3401 	struct swap_info_struct *p;
3402 	struct swap_cluster_info *ci;
3403 	unsigned long offset;
3404 	unsigned char count;
3405 	unsigned char has_cache;
3406 	int err = -EINVAL;
3407 
3408 	p = get_swap_device(entry);
3409 	if (!p)
3410 		goto out;
3411 
3412 	offset = swp_offset(entry);
3413 	ci = lock_cluster_or_swap_info(p, offset);
3414 
3415 	count = p->swap_map[offset];
3416 
3417 	/*
3418 	 * swapin_readahead() doesn't check if a swap entry is valid, so the
3419 	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3420 	 */
3421 	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3422 		err = -ENOENT;
3423 		goto unlock_out;
3424 	}
3425 
3426 	has_cache = count & SWAP_HAS_CACHE;
3427 	count &= ~SWAP_HAS_CACHE;
3428 	err = 0;
3429 
3430 	if (usage == SWAP_HAS_CACHE) {
3431 
3432 		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
3433 		if (!has_cache && count)
3434 			has_cache = SWAP_HAS_CACHE;
3435 		else if (has_cache)		/* someone else added cache */
3436 			err = -EEXIST;
3437 		else				/* no users remaining */
3438 			err = -ENOENT;
3439 
3440 	} else if (count || has_cache) {
3441 
3442 		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3443 			count += usage;
3444 		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3445 			err = -EINVAL;
3446 		else if (swap_count_continued(p, offset, count))
3447 			count = COUNT_CONTINUED;
3448 		else
3449 			err = -ENOMEM;
3450 	} else
3451 		err = -ENOENT;			/* unused swap entry */
3452 
3453 	p->swap_map[offset] = count | has_cache;
3454 
3455 unlock_out:
3456 	unlock_cluster_or_swap_info(p, ci);
3457 out:
3458 	if (p)
3459 		put_swap_device(p);
3460 	return err;
3461 }
3462 
3463 /*
3464  * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3465  * (in which case its reference count is never incremented).
3466  */
swap_shmem_alloc(swp_entry_t entry)3467 void swap_shmem_alloc(swp_entry_t entry)
3468 {
3469 	__swap_duplicate(entry, SWAP_MAP_SHMEM);
3470 }
3471 
3472 /*
3473  * Increase reference count of swap entry by 1.
3474  * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3475  * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3476  * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3477  * might occur if a page table entry has got corrupted.
3478  */
swap_duplicate(swp_entry_t entry)3479 int swap_duplicate(swp_entry_t entry)
3480 {
3481 	int err = 0;
3482 
3483 	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3484 		err = add_swap_count_continuation(entry, GFP_ATOMIC);
3485 	return err;
3486 }
3487 
3488 /*
3489  * @entry: swap entry for which we allocate swap cache.
3490  *
3491  * Called when allocating swap cache for existing swap entry,
3492  * This can return error codes. Returns 0 at success.
3493  * -EBUSY means there is a swap cache.
3494  * Note: return code is different from swap_duplicate().
3495  */
swapcache_prepare(swp_entry_t entry)3496 int swapcache_prepare(swp_entry_t entry)
3497 {
3498 	return __swap_duplicate(entry, SWAP_HAS_CACHE);
3499 }
3500 
swp_swap_info(swp_entry_t entry)3501 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3502 {
3503 	return swap_type_to_swap_info(swp_type(entry));
3504 }
3505 
page_swap_info(struct page * page)3506 struct swap_info_struct *page_swap_info(struct page *page)
3507 {
3508 	swp_entry_t entry = { .val = page_private(page) };
3509 	return swp_swap_info(entry);
3510 }
3511 
3512 /*
3513  * out-of-line __page_file_ methods to avoid include hell.
3514  */
__page_file_mapping(struct page * page)3515 struct address_space *__page_file_mapping(struct page *page)
3516 {
3517 	return page_swap_info(page)->swap_file->f_mapping;
3518 }
3519 EXPORT_SYMBOL_GPL(__page_file_mapping);
3520 
__page_file_index(struct page * page)3521 pgoff_t __page_file_index(struct page *page)
3522 {
3523 	swp_entry_t swap = { .val = page_private(page) };
3524 	return swp_offset(swap);
3525 }
3526 EXPORT_SYMBOL_GPL(__page_file_index);
3527 
3528 /*
3529  * add_swap_count_continuation - called when a swap count is duplicated
3530  * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3531  * page of the original vmalloc'ed swap_map, to hold the continuation count
3532  * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3533  * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3534  *
3535  * These continuation pages are seldom referenced: the common paths all work
3536  * on the original swap_map, only referring to a continuation page when the
3537  * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3538  *
3539  * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3540  * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3541  * can be called after dropping locks.
3542  */
add_swap_count_continuation(swp_entry_t entry,gfp_t gfp_mask)3543 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3544 {
3545 	struct swap_info_struct *si;
3546 	struct swap_cluster_info *ci;
3547 	struct page *head;
3548 	struct page *page;
3549 	struct page *list_page;
3550 	pgoff_t offset;
3551 	unsigned char count;
3552 	int ret = 0;
3553 
3554 	/*
3555 	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3556 	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3557 	 */
3558 	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3559 
3560 	si = get_swap_device(entry);
3561 	if (!si) {
3562 		/*
3563 		 * An acceptable race has occurred since the failing
3564 		 * __swap_duplicate(): the swap device may be swapoff
3565 		 */
3566 		goto outer;
3567 	}
3568 	spin_lock(&si->lock);
3569 
3570 	offset = swp_offset(entry);
3571 
3572 	ci = lock_cluster(si, offset);
3573 
3574 	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
3575 
3576 	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3577 		/*
3578 		 * The higher the swap count, the more likely it is that tasks
3579 		 * will race to add swap count continuation: we need to avoid
3580 		 * over-provisioning.
3581 		 */
3582 		goto out;
3583 	}
3584 
3585 	if (!page) {
3586 		ret = -ENOMEM;
3587 		goto out;
3588 	}
3589 
3590 	/*
3591 	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3592 	 * no architecture is using highmem pages for kernel page tables: so it
3593 	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3594 	 */
3595 	head = vmalloc_to_page(si->swap_map + offset);
3596 	offset &= ~PAGE_MASK;
3597 
3598 	spin_lock(&si->cont_lock);
3599 	/*
3600 	 * Page allocation does not initialize the page's lru field,
3601 	 * but it does always reset its private field.
3602 	 */
3603 	if (!page_private(head)) {
3604 		BUG_ON(count & COUNT_CONTINUED);
3605 		INIT_LIST_HEAD(&head->lru);
3606 		set_page_private(head, SWP_CONTINUED);
3607 		si->flags |= SWP_CONTINUED;
3608 	}
3609 
3610 	list_for_each_entry(list_page, &head->lru, lru) {
3611 		unsigned char *map;
3612 
3613 		/*
3614 		 * If the previous map said no continuation, but we've found
3615 		 * a continuation page, free our allocation and use this one.
3616 		 */
3617 		if (!(count & COUNT_CONTINUED))
3618 			goto out_unlock_cont;
3619 
3620 		map = kmap_atomic(list_page) + offset;
3621 		count = *map;
3622 		kunmap_atomic(map);
3623 
3624 		/*
3625 		 * If this continuation count now has some space in it,
3626 		 * free our allocation and use this one.
3627 		 */
3628 		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3629 			goto out_unlock_cont;
3630 	}
3631 
3632 	list_add_tail(&page->lru, &head->lru);
3633 	page = NULL;			/* now it's attached, don't free it */
3634 out_unlock_cont:
3635 	spin_unlock(&si->cont_lock);
3636 out:
3637 	unlock_cluster(ci);
3638 	spin_unlock(&si->lock);
3639 	put_swap_device(si);
3640 outer:
3641 	if (page)
3642 		__free_page(page);
3643 	return ret;
3644 }
3645 
3646 /*
3647  * swap_count_continued - when the original swap_map count is incremented
3648  * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3649  * into, carry if so, or else fail until a new continuation page is allocated;
3650  * when the original swap_map count is decremented from 0 with continuation,
3651  * borrow from the continuation and report whether it still holds more.
3652  * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3653  * lock.
3654  */
swap_count_continued(struct swap_info_struct * si,pgoff_t offset,unsigned char count)3655 static bool swap_count_continued(struct swap_info_struct *si,
3656 				 pgoff_t offset, unsigned char count)
3657 {
3658 	struct page *head;
3659 	struct page *page;
3660 	unsigned char *map;
3661 	bool ret;
3662 
3663 	head = vmalloc_to_page(si->swap_map + offset);
3664 	if (page_private(head) != SWP_CONTINUED) {
3665 		BUG_ON(count & COUNT_CONTINUED);
3666 		return false;		/* need to add count continuation */
3667 	}
3668 
3669 	spin_lock(&si->cont_lock);
3670 	offset &= ~PAGE_MASK;
3671 	page = list_entry(head->lru.next, struct page, lru);
3672 	map = kmap_atomic(page) + offset;
3673 
3674 	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
3675 		goto init_map;		/* jump over SWAP_CONT_MAX checks */
3676 
3677 	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3678 		/*
3679 		 * Think of how you add 1 to 999
3680 		 */
3681 		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3682 			kunmap_atomic(map);
3683 			page = list_entry(page->lru.next, struct page, lru);
3684 			BUG_ON(page == head);
3685 			map = kmap_atomic(page) + offset;
3686 		}
3687 		if (*map == SWAP_CONT_MAX) {
3688 			kunmap_atomic(map);
3689 			page = list_entry(page->lru.next, struct page, lru);
3690 			if (page == head) {
3691 				ret = false;	/* add count continuation */
3692 				goto out;
3693 			}
3694 			map = kmap_atomic(page) + offset;
3695 init_map:		*map = 0;		/* we didn't zero the page */
3696 		}
3697 		*map += 1;
3698 		kunmap_atomic(map);
3699 		page = list_entry(page->lru.prev, struct page, lru);
3700 		while (page != head) {
3701 			map = kmap_atomic(page) + offset;
3702 			*map = COUNT_CONTINUED;
3703 			kunmap_atomic(map);
3704 			page = list_entry(page->lru.prev, struct page, lru);
3705 		}
3706 		ret = true;			/* incremented */
3707 
3708 	} else {				/* decrementing */
3709 		/*
3710 		 * Think of how you subtract 1 from 1000
3711 		 */
3712 		BUG_ON(count != COUNT_CONTINUED);
3713 		while (*map == COUNT_CONTINUED) {
3714 			kunmap_atomic(map);
3715 			page = list_entry(page->lru.next, struct page, lru);
3716 			BUG_ON(page == head);
3717 			map = kmap_atomic(page) + offset;
3718 		}
3719 		BUG_ON(*map == 0);
3720 		*map -= 1;
3721 		if (*map == 0)
3722 			count = 0;
3723 		kunmap_atomic(map);
3724 		page = list_entry(page->lru.prev, struct page, lru);
3725 		while (page != head) {
3726 			map = kmap_atomic(page) + offset;
3727 			*map = SWAP_CONT_MAX | count;
3728 			count = COUNT_CONTINUED;
3729 			kunmap_atomic(map);
3730 			page = list_entry(page->lru.prev, struct page, lru);
3731 		}
3732 		ret = count == COUNT_CONTINUED;
3733 	}
3734 out:
3735 	spin_unlock(&si->cont_lock);
3736 	return ret;
3737 }
3738 
3739 /*
3740  * free_swap_count_continuations - swapoff free all the continuation pages
3741  * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3742  */
free_swap_count_continuations(struct swap_info_struct * si)3743 static void free_swap_count_continuations(struct swap_info_struct *si)
3744 {
3745 	pgoff_t offset;
3746 
3747 	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3748 		struct page *head;
3749 		head = vmalloc_to_page(si->swap_map + offset);
3750 		if (page_private(head)) {
3751 			struct page *page, *next;
3752 
3753 			list_for_each_entry_safe(page, next, &head->lru, lru) {
3754 				list_del(&page->lru);
3755 				__free_page(page);
3756 			}
3757 		}
3758 	}
3759 }
3760 
3761 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
mem_cgroup_throttle_swaprate(struct mem_cgroup * memcg,int node,gfp_t gfp_mask)3762 void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
3763 				  gfp_t gfp_mask)
3764 {
3765 	struct swap_info_struct *si, *next;
3766 	if (!(gfp_mask & __GFP_IO) || !memcg)
3767 		return;
3768 
3769 	if (!blk_cgroup_congested())
3770 		return;
3771 
3772 	/*
3773 	 * We've already scheduled a throttle, avoid taking the global swap
3774 	 * lock.
3775 	 */
3776 	if (current->throttle_queue)
3777 		return;
3778 
3779 	spin_lock(&swap_avail_lock);
3780 	plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
3781 				  avail_lists[node]) {
3782 		if (si->bdev) {
3783 			blkcg_schedule_throttle(bdev_get_queue(si->bdev),
3784 						true);
3785 			break;
3786 		}
3787 	}
3788 	spin_unlock(&swap_avail_lock);
3789 }
3790 #endif
3791 
swapfile_init(void)3792 static int __init swapfile_init(void)
3793 {
3794 	int nid;
3795 
3796 	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3797 					 GFP_KERNEL);
3798 	if (!swap_avail_heads) {
3799 		pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3800 		return -ENOMEM;
3801 	}
3802 
3803 	for_each_node(nid)
3804 		plist_head_init(&swap_avail_heads[nid]);
3805 
3806 	return 0;
3807 }
3808 subsys_initcall(swapfile_init);
3809