• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/swapfile.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *  Swap reorganised 29.12.95, Stephen Tweedie
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/task.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mman.h>
14 #include <linux/slab.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/swap.h>
17 #include <linux/vmalloc.h>
18 #include <linux/pagemap.h>
19 #include <linux/namei.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/writeback.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/init.h>
27 #include <linux/ksm.h>
28 #include <linux/rmap.h>
29 #include <linux/security.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mutex.h>
32 #include <linux/capability.h>
33 #include <linux/syscalls.h>
34 #include <linux/memcontrol.h>
35 #include <linux/poll.h>
36 #include <linux/oom.h>
37 #include <linux/frontswap.h>
38 #include <linux/swapfile.h>
39 #include <linux/export.h>
40 #include <linux/swap_slots.h>
41 #include <linux/sort.h>
42 
43 #include <asm/tlbflush.h>
44 #include <linux/swapops.h>
45 #include <linux/swap_cgroup.h>
46 #include <trace/hooks/mm.h>
47 
48 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
49 				 unsigned char);
50 static void free_swap_count_continuations(struct swap_info_struct *);
51 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
52 
53 DEFINE_SPINLOCK(swap_lock);
54 static unsigned int nr_swapfiles;
55 atomic_long_t nr_swap_pages;
56 /*
57  * Some modules use swappable objects and may try to swap them out under
58  * memory pressure (via the shrinker). Before doing so, they may wish to
59  * check to see if any swap space is available.
60  */
61 EXPORT_SYMBOL_GPL(nr_swap_pages);
62 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
63 long total_swap_pages;
64 static int least_priority = -1;
65 
66 static const char Bad_file[] = "Bad swap file entry ";
67 static const char Unused_file[] = "Unused swap file entry ";
68 static const char Bad_offset[] = "Bad swap offset entry ";
69 static const char Unused_offset[] = "Unused swap offset entry ";
70 
71 /*
72  * all active swap_info_structs
73  * protected with swap_lock, and ordered by priority.
74  */
75 PLIST_HEAD(swap_active_head);
76 
77 /*
78  * all available (active, not full) swap_info_structs
79  * protected with swap_avail_lock, ordered by priority.
80  * This is used by get_swap_page() instead of swap_active_head
81  * because swap_active_head includes all swap_info_structs,
82  * but get_swap_page() doesn't need to look at full ones.
83  * This uses its own lock instead of swap_lock because when a
84  * swap_info_struct changes between not-full/full, it needs to
85  * add/remove itself to/from this list, but the swap_info_struct->lock
86  * is held and the locking order requires swap_lock to be taken
87  * before any swap_info_struct->lock.
88  */
89 static struct plist_head *swap_avail_heads;
90 static DEFINE_SPINLOCK(swap_avail_lock);
91 
92 struct swap_info_struct *swap_info[MAX_SWAPFILES];
93 
94 static DEFINE_MUTEX(swapon_mutex);
95 
96 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
97 /* Activity counter to indicate that a swapon or swapoff has occurred */
98 static atomic_t proc_poll_event = ATOMIC_INIT(0);
99 
100 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
101 
swap_type_to_swap_info(int type)102 struct swap_info_struct *swap_type_to_swap_info(int type)
103 {
104 	if (type >= READ_ONCE(nr_swapfiles))
105 		return NULL;
106 
107 	smp_rmb();	/* Pairs with smp_wmb in alloc_swap_info. */
108 	return READ_ONCE(swap_info[type]);
109 }
110 EXPORT_SYMBOL_GPL(swap_type_to_swap_info);
111 
swap_count(unsigned char ent)112 static inline unsigned char swap_count(unsigned char ent)
113 {
114 	return ent & ~SWAP_HAS_CACHE;	/* may include COUNT_CONTINUED flag */
115 }
116 
117 /* Reclaim the swap entry anyway if possible */
118 #define TTRS_ANYWAY		0x1
119 /*
120  * Reclaim the swap entry if there are no more mappings of the
121  * corresponding page
122  */
123 #define TTRS_UNMAPPED		0x2
124 /* Reclaim the swap entry if swap is getting full*/
125 #define TTRS_FULL		0x4
126 
127 /* returns 1 if swap entry is freed */
__try_to_reclaim_swap(struct swap_info_struct * si,unsigned long offset,unsigned long flags)128 static int __try_to_reclaim_swap(struct swap_info_struct *si,
129 				 unsigned long offset, unsigned long flags)
130 {
131 	swp_entry_t entry = swp_entry(si->type, offset);
132 	struct page *page;
133 	int ret = 0;
134 
135 	page = find_get_page(swap_address_space(entry), offset);
136 	if (!page)
137 		return 0;
138 	/*
139 	 * When this function is called from scan_swap_map_slots() and it's
140 	 * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
141 	 * here. We have to use trylock for avoiding deadlock. This is a special
142 	 * case and you should use try_to_free_swap() with explicit lock_page()
143 	 * in usual operations.
144 	 */
145 	if (trylock_page(page)) {
146 		if ((flags & TTRS_ANYWAY) ||
147 		    ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
148 		    ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
149 			ret = try_to_free_swap(page);
150 		unlock_page(page);
151 	}
152 	put_page(page);
153 	return ret;
154 }
155 
first_se(struct swap_info_struct * sis)156 static inline struct swap_extent *first_se(struct swap_info_struct *sis)
157 {
158 	struct rb_node *rb = rb_first(&sis->swap_extent_root);
159 	return rb_entry(rb, struct swap_extent, rb_node);
160 }
161 
next_se(struct swap_extent * se)162 static inline struct swap_extent *next_se(struct swap_extent *se)
163 {
164 	struct rb_node *rb = rb_next(&se->rb_node);
165 	return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
166 }
167 
168 /*
169  * swapon tell device that all the old swap contents can be discarded,
170  * to allow the swap device to optimize its wear-levelling.
171  */
discard_swap(struct swap_info_struct * si)172 static int discard_swap(struct swap_info_struct *si)
173 {
174 	struct swap_extent *se;
175 	sector_t start_block;
176 	sector_t nr_blocks;
177 	int err = 0;
178 
179 	/* Do not discard the swap header page! */
180 	se = first_se(si);
181 	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
182 	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
183 	if (nr_blocks) {
184 		err = blkdev_issue_discard(si->bdev, start_block,
185 				nr_blocks, GFP_KERNEL, 0);
186 		if (err)
187 			return err;
188 		cond_resched();
189 	}
190 
191 	for (se = next_se(se); se; se = next_se(se)) {
192 		start_block = se->start_block << (PAGE_SHIFT - 9);
193 		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
194 
195 		err = blkdev_issue_discard(si->bdev, start_block,
196 				nr_blocks, GFP_KERNEL, 0);
197 		if (err)
198 			break;
199 
200 		cond_resched();
201 	}
202 	return err;		/* That will often be -EOPNOTSUPP */
203 }
204 
205 static struct swap_extent *
offset_to_swap_extent(struct swap_info_struct * sis,unsigned long offset)206 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
207 {
208 	struct swap_extent *se;
209 	struct rb_node *rb;
210 
211 	rb = sis->swap_extent_root.rb_node;
212 	while (rb) {
213 		se = rb_entry(rb, struct swap_extent, rb_node);
214 		if (offset < se->start_page)
215 			rb = rb->rb_left;
216 		else if (offset >= se->start_page + se->nr_pages)
217 			rb = rb->rb_right;
218 		else
219 			return se;
220 	}
221 	/* It *must* be present */
222 	BUG();
223 }
224 
swap_page_sector(struct page * page)225 sector_t swap_page_sector(struct page *page)
226 {
227 	struct swap_info_struct *sis = page_swap_info(page);
228 	struct swap_extent *se;
229 	sector_t sector;
230 	pgoff_t offset;
231 
232 	offset = __page_file_index(page);
233 	se = offset_to_swap_extent(sis, offset);
234 	sector = se->start_block + (offset - se->start_page);
235 	return sector << (PAGE_SHIFT - 9);
236 }
237 
238 /*
239  * swap allocation tell device that a cluster of swap can now be discarded,
240  * to allow the swap device to optimize its wear-levelling.
241  */
discard_swap_cluster(struct swap_info_struct * si,pgoff_t start_page,pgoff_t nr_pages)242 static void discard_swap_cluster(struct swap_info_struct *si,
243 				 pgoff_t start_page, pgoff_t nr_pages)
244 {
245 	struct swap_extent *se = offset_to_swap_extent(si, start_page);
246 
247 	while (nr_pages) {
248 		pgoff_t offset = start_page - se->start_page;
249 		sector_t start_block = se->start_block + offset;
250 		sector_t nr_blocks = se->nr_pages - offset;
251 
252 		if (nr_blocks > nr_pages)
253 			nr_blocks = nr_pages;
254 		start_page += nr_blocks;
255 		nr_pages -= nr_blocks;
256 
257 		start_block <<= PAGE_SHIFT - 9;
258 		nr_blocks <<= PAGE_SHIFT - 9;
259 		if (blkdev_issue_discard(si->bdev, start_block,
260 					nr_blocks, GFP_NOIO, 0))
261 			break;
262 
263 		se = next_se(se);
264 	}
265 }
266 
267 #ifdef CONFIG_THP_SWAP
268 #define SWAPFILE_CLUSTER	HPAGE_PMD_NR
269 
270 #define swap_entry_size(size)	(size)
271 #else
272 #define SWAPFILE_CLUSTER	256
273 
274 /*
275  * Define swap_entry_size() as constant to let compiler to optimize
276  * out some code if !CONFIG_THP_SWAP
277  */
278 #define swap_entry_size(size)	1
279 #endif
280 #define LATENCY_LIMIT		256
281 
cluster_set_flag(struct swap_cluster_info * info,unsigned int flag)282 static inline void cluster_set_flag(struct swap_cluster_info *info,
283 	unsigned int flag)
284 {
285 	info->flags = flag;
286 }
287 
cluster_count(struct swap_cluster_info * info)288 static inline unsigned int cluster_count(struct swap_cluster_info *info)
289 {
290 	return info->data;
291 }
292 
cluster_set_count(struct swap_cluster_info * info,unsigned int c)293 static inline void cluster_set_count(struct swap_cluster_info *info,
294 				     unsigned int c)
295 {
296 	info->data = c;
297 }
298 
cluster_set_count_flag(struct swap_cluster_info * info,unsigned int c,unsigned int f)299 static inline void cluster_set_count_flag(struct swap_cluster_info *info,
300 					 unsigned int c, unsigned int f)
301 {
302 	info->flags = f;
303 	info->data = c;
304 }
305 
cluster_next(struct swap_cluster_info * info)306 static inline unsigned int cluster_next(struct swap_cluster_info *info)
307 {
308 	return info->data;
309 }
310 
cluster_set_next(struct swap_cluster_info * info,unsigned int n)311 static inline void cluster_set_next(struct swap_cluster_info *info,
312 				    unsigned int n)
313 {
314 	info->data = n;
315 }
316 
cluster_set_next_flag(struct swap_cluster_info * info,unsigned int n,unsigned int f)317 static inline void cluster_set_next_flag(struct swap_cluster_info *info,
318 					 unsigned int n, unsigned int f)
319 {
320 	info->flags = f;
321 	info->data = n;
322 }
323 
cluster_is_free(struct swap_cluster_info * info)324 static inline bool cluster_is_free(struct swap_cluster_info *info)
325 {
326 	return info->flags & CLUSTER_FLAG_FREE;
327 }
328 
cluster_is_null(struct swap_cluster_info * info)329 static inline bool cluster_is_null(struct swap_cluster_info *info)
330 {
331 	return info->flags & CLUSTER_FLAG_NEXT_NULL;
332 }
333 
cluster_set_null(struct swap_cluster_info * info)334 static inline void cluster_set_null(struct swap_cluster_info *info)
335 {
336 	info->flags = CLUSTER_FLAG_NEXT_NULL;
337 	info->data = 0;
338 }
339 
cluster_is_huge(struct swap_cluster_info * info)340 static inline bool cluster_is_huge(struct swap_cluster_info *info)
341 {
342 	if (IS_ENABLED(CONFIG_THP_SWAP))
343 		return info->flags & CLUSTER_FLAG_HUGE;
344 	return false;
345 }
346 
cluster_clear_huge(struct swap_cluster_info * info)347 static inline void cluster_clear_huge(struct swap_cluster_info *info)
348 {
349 	info->flags &= ~CLUSTER_FLAG_HUGE;
350 }
351 
lock_cluster(struct swap_info_struct * si,unsigned long offset)352 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
353 						     unsigned long offset)
354 {
355 	struct swap_cluster_info *ci;
356 
357 	ci = si->cluster_info;
358 	if (ci) {
359 		ci += offset / SWAPFILE_CLUSTER;
360 		spin_lock(&ci->lock);
361 	}
362 	return ci;
363 }
364 
unlock_cluster(struct swap_cluster_info * ci)365 static inline void unlock_cluster(struct swap_cluster_info *ci)
366 {
367 	if (ci)
368 		spin_unlock(&ci->lock);
369 }
370 
371 /*
372  * Determine the locking method in use for this device.  Return
373  * swap_cluster_info if SSD-style cluster-based locking is in place.
374  */
lock_cluster_or_swap_info(struct swap_info_struct * si,unsigned long offset)375 static inline struct swap_cluster_info *lock_cluster_or_swap_info(
376 		struct swap_info_struct *si, unsigned long offset)
377 {
378 	struct swap_cluster_info *ci;
379 
380 	/* Try to use fine-grained SSD-style locking if available: */
381 	ci = lock_cluster(si, offset);
382 	/* Otherwise, fall back to traditional, coarse locking: */
383 	if (!ci)
384 		spin_lock(&si->lock);
385 
386 	return ci;
387 }
388 
unlock_cluster_or_swap_info(struct swap_info_struct * si,struct swap_cluster_info * ci)389 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
390 					       struct swap_cluster_info *ci)
391 {
392 	if (ci)
393 		unlock_cluster(ci);
394 	else
395 		spin_unlock(&si->lock);
396 }
397 
cluster_list_empty(struct swap_cluster_list * list)398 static inline bool cluster_list_empty(struct swap_cluster_list *list)
399 {
400 	return cluster_is_null(&list->head);
401 }
402 
cluster_list_first(struct swap_cluster_list * list)403 static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
404 {
405 	return cluster_next(&list->head);
406 }
407 
cluster_list_init(struct swap_cluster_list * list)408 static void cluster_list_init(struct swap_cluster_list *list)
409 {
410 	cluster_set_null(&list->head);
411 	cluster_set_null(&list->tail);
412 }
413 
cluster_list_add_tail(struct swap_cluster_list * list,struct swap_cluster_info * ci,unsigned int idx)414 static void cluster_list_add_tail(struct swap_cluster_list *list,
415 				  struct swap_cluster_info *ci,
416 				  unsigned int idx)
417 {
418 	if (cluster_list_empty(list)) {
419 		cluster_set_next_flag(&list->head, idx, 0);
420 		cluster_set_next_flag(&list->tail, idx, 0);
421 	} else {
422 		struct swap_cluster_info *ci_tail;
423 		unsigned int tail = cluster_next(&list->tail);
424 
425 		/*
426 		 * Nested cluster lock, but both cluster locks are
427 		 * only acquired when we held swap_info_struct->lock
428 		 */
429 		ci_tail = ci + tail;
430 		spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
431 		cluster_set_next(ci_tail, idx);
432 		spin_unlock(&ci_tail->lock);
433 		cluster_set_next_flag(&list->tail, idx, 0);
434 	}
435 }
436 
cluster_list_del_first(struct swap_cluster_list * list,struct swap_cluster_info * ci)437 static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
438 					   struct swap_cluster_info *ci)
439 {
440 	unsigned int idx;
441 
442 	idx = cluster_next(&list->head);
443 	if (cluster_next(&list->tail) == idx) {
444 		cluster_set_null(&list->head);
445 		cluster_set_null(&list->tail);
446 	} else
447 		cluster_set_next_flag(&list->head,
448 				      cluster_next(&ci[idx]), 0);
449 
450 	return idx;
451 }
452 
453 /* Add a cluster to discard list and schedule it to do discard */
swap_cluster_schedule_discard(struct swap_info_struct * si,unsigned int idx)454 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
455 		unsigned int idx)
456 {
457 	/*
458 	 * If scan_swap_map() can't find a free cluster, it will check
459 	 * si->swap_map directly. To make sure the discarding cluster isn't
460 	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
461 	 * will be cleared after discard
462 	 */
463 	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
464 			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
465 
466 	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
467 
468 	schedule_work(&si->discard_work);
469 }
470 
__free_cluster(struct swap_info_struct * si,unsigned long idx)471 static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
472 {
473 	struct swap_cluster_info *ci = si->cluster_info;
474 
475 	cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
476 	cluster_list_add_tail(&si->free_clusters, ci, idx);
477 }
478 
479 /*
480  * Doing discard actually. After a cluster discard is finished, the cluster
481  * will be added to free cluster list. caller should hold si->lock.
482 */
swap_do_scheduled_discard(struct swap_info_struct * si)483 static void swap_do_scheduled_discard(struct swap_info_struct *si)
484 {
485 	struct swap_cluster_info *info, *ci;
486 	unsigned int idx;
487 
488 	info = si->cluster_info;
489 
490 	while (!cluster_list_empty(&si->discard_clusters)) {
491 		idx = cluster_list_del_first(&si->discard_clusters, info);
492 		spin_unlock(&si->lock);
493 
494 		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
495 				SWAPFILE_CLUSTER);
496 
497 		spin_lock(&si->lock);
498 		ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
499 		__free_cluster(si, idx);
500 		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
501 				0, SWAPFILE_CLUSTER);
502 		unlock_cluster(ci);
503 	}
504 }
505 
swap_discard_work(struct work_struct * work)506 static void swap_discard_work(struct work_struct *work)
507 {
508 	struct swap_info_struct *si;
509 
510 	si = container_of(work, struct swap_info_struct, discard_work);
511 
512 	spin_lock(&si->lock);
513 	swap_do_scheduled_discard(si);
514 	spin_unlock(&si->lock);
515 }
516 
alloc_cluster(struct swap_info_struct * si,unsigned long idx)517 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
518 {
519 	struct swap_cluster_info *ci = si->cluster_info;
520 
521 	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
522 	cluster_list_del_first(&si->free_clusters, ci);
523 	cluster_set_count_flag(ci + idx, 0, 0);
524 }
525 
free_cluster(struct swap_info_struct * si,unsigned long idx)526 static void free_cluster(struct swap_info_struct *si, unsigned long idx)
527 {
528 	struct swap_cluster_info *ci = si->cluster_info + idx;
529 
530 	VM_BUG_ON(cluster_count(ci) != 0);
531 	/*
532 	 * If the swap is discardable, prepare discard the cluster
533 	 * instead of free it immediately. The cluster will be freed
534 	 * after discard.
535 	 */
536 	if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
537 	    (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
538 		swap_cluster_schedule_discard(si, idx);
539 		return;
540 	}
541 
542 	__free_cluster(si, idx);
543 }
544 
545 /*
546  * The cluster corresponding to page_nr will be used. The cluster will be
547  * removed from free cluster list and its usage counter will be increased.
548  */
inc_cluster_info_page(struct swap_info_struct * p,struct swap_cluster_info * cluster_info,unsigned long page_nr)549 static void inc_cluster_info_page(struct swap_info_struct *p,
550 	struct swap_cluster_info *cluster_info, unsigned long page_nr)
551 {
552 	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
553 
554 	if (!cluster_info)
555 		return;
556 	if (cluster_is_free(&cluster_info[idx]))
557 		alloc_cluster(p, idx);
558 
559 	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
560 	cluster_set_count(&cluster_info[idx],
561 		cluster_count(&cluster_info[idx]) + 1);
562 }
563 
564 /*
565  * The cluster corresponding to page_nr decreases one usage. If the usage
566  * counter becomes 0, which means no page in the cluster is in using, we can
567  * optionally discard the cluster and add it to free cluster list.
568  */
dec_cluster_info_page(struct swap_info_struct * p,struct swap_cluster_info * cluster_info,unsigned long page_nr)569 static void dec_cluster_info_page(struct swap_info_struct *p,
570 	struct swap_cluster_info *cluster_info, unsigned long page_nr)
571 {
572 	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
573 
574 	if (!cluster_info)
575 		return;
576 
577 	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
578 	cluster_set_count(&cluster_info[idx],
579 		cluster_count(&cluster_info[idx]) - 1);
580 
581 	if (cluster_count(&cluster_info[idx]) == 0)
582 		free_cluster(p, idx);
583 }
584 
585 /*
586  * It's possible scan_swap_map() uses a free cluster in the middle of free
587  * cluster list. Avoiding such abuse to avoid list corruption.
588  */
589 static bool
scan_swap_map_ssd_cluster_conflict(struct swap_info_struct * si,unsigned long offset)590 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
591 	unsigned long offset)
592 {
593 	struct percpu_cluster *percpu_cluster;
594 	bool conflict;
595 
596 	offset /= SWAPFILE_CLUSTER;
597 	conflict = !cluster_list_empty(&si->free_clusters) &&
598 		offset != cluster_list_first(&si->free_clusters) &&
599 		cluster_is_free(&si->cluster_info[offset]);
600 
601 	if (!conflict)
602 		return false;
603 
604 	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
605 	cluster_set_null(&percpu_cluster->index);
606 	return true;
607 }
608 
609 /*
610  * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
611  * might involve allocating a new cluster for current CPU too.
612  */
scan_swap_map_try_ssd_cluster(struct swap_info_struct * si,unsigned long * offset,unsigned long * scan_base)613 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
614 	unsigned long *offset, unsigned long *scan_base)
615 {
616 	struct percpu_cluster *cluster;
617 	struct swap_cluster_info *ci;
618 	unsigned long tmp, max;
619 
620 new_cluster:
621 	cluster = this_cpu_ptr(si->percpu_cluster);
622 	if (cluster_is_null(&cluster->index)) {
623 		if (!cluster_list_empty(&si->free_clusters)) {
624 			cluster->index = si->free_clusters.head;
625 			cluster->next = cluster_next(&cluster->index) *
626 					SWAPFILE_CLUSTER;
627 		} else if (!cluster_list_empty(&si->discard_clusters)) {
628 			/*
629 			 * we don't have free cluster but have some clusters in
630 			 * discarding, do discard now and reclaim them, then
631 			 * reread cluster_next_cpu since we dropped si->lock
632 			 */
633 			swap_do_scheduled_discard(si);
634 			*scan_base = this_cpu_read(*si->cluster_next_cpu);
635 			*offset = *scan_base;
636 			goto new_cluster;
637 		} else
638 			return false;
639 	}
640 
641 	/*
642 	 * Other CPUs can use our cluster if they can't find a free cluster,
643 	 * check if there is still free entry in the cluster
644 	 */
645 	tmp = cluster->next;
646 	max = min_t(unsigned long, si->max,
647 		    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
648 	if (tmp < max) {
649 		ci = lock_cluster(si, tmp);
650 		while (tmp < max) {
651 			if (!si->swap_map[tmp])
652 				break;
653 			tmp++;
654 		}
655 		unlock_cluster(ci);
656 	}
657 	if (tmp >= max) {
658 		cluster_set_null(&cluster->index);
659 		goto new_cluster;
660 	}
661 	cluster->next = tmp + 1;
662 	*offset = tmp;
663 	*scan_base = tmp;
664 	return true;
665 }
666 
__del_from_avail_list(struct swap_info_struct * p)667 static void __del_from_avail_list(struct swap_info_struct *p)
668 {
669 	int nid;
670 
671 	assert_spin_locked(&p->lock);
672 	for_each_node(nid)
673 		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
674 }
675 
del_from_avail_list(struct swap_info_struct * p)676 static void del_from_avail_list(struct swap_info_struct *p)
677 {
678 	bool skip = false;
679 
680 	trace_android_vh_del_from_avail_list(p, &skip);
681 	if (skip)
682 		return;
683 
684 	spin_lock(&swap_avail_lock);
685 	__del_from_avail_list(p);
686 	spin_unlock(&swap_avail_lock);
687 }
688 
swap_range_alloc(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)689 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
690 			     unsigned int nr_entries)
691 {
692 	unsigned int end = offset + nr_entries - 1;
693 
694 	if (offset == si->lowest_bit)
695 		si->lowest_bit += nr_entries;
696 	if (end == si->highest_bit)
697 		WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
698 	si->inuse_pages += nr_entries;
699 	if (si->inuse_pages == si->pages) {
700 		si->lowest_bit = si->max;
701 		si->highest_bit = 0;
702 		del_from_avail_list(si);
703 	}
704 }
705 
add_to_avail_list(struct swap_info_struct * p)706 static void add_to_avail_list(struct swap_info_struct *p)
707 {
708 	int nid;
709 	bool skip = false;
710 
711 	trace_android_vh_add_to_avail_list(p, &skip);
712 	if (skip)
713 		return;
714 
715 	spin_lock(&swap_avail_lock);
716 	for_each_node(nid) {
717 		WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
718 		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
719 	}
720 	spin_unlock(&swap_avail_lock);
721 }
722 
swap_range_free(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)723 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
724 			    unsigned int nr_entries)
725 {
726 	unsigned long begin = offset;
727 	unsigned long end = offset + nr_entries - 1;
728 	void (*swap_slot_free_notify)(struct block_device *, unsigned long);
729 	bool skip = false;
730 
731 	if (offset < si->lowest_bit)
732 		si->lowest_bit = offset;
733 	if (end > si->highest_bit) {
734 		bool was_full = !si->highest_bit;
735 
736 		WRITE_ONCE(si->highest_bit, end);
737 		if (was_full && (si->flags & SWP_WRITEOK))
738 			add_to_avail_list(si);
739 	}
740 	trace_android_vh_account_swap_pages(si, &skip);
741 	if (!skip)
742 		atomic_long_add(nr_entries, &nr_swap_pages);
743 	si->inuse_pages -= nr_entries;
744 	if (si->flags & SWP_BLKDEV)
745 		swap_slot_free_notify =
746 			si->bdev->bd_disk->fops->swap_slot_free_notify;
747 	else
748 		swap_slot_free_notify = NULL;
749 	while (offset <= end) {
750 		arch_swap_invalidate_page(si->type, offset);
751 		frontswap_invalidate_page(si->type, offset);
752 		if (swap_slot_free_notify)
753 			swap_slot_free_notify(si->bdev, offset);
754 		offset++;
755 	}
756 	clear_shadow_from_swap_cache(si->type, begin, end);
757 }
758 
set_cluster_next(struct swap_info_struct * si,unsigned long next)759 static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
760 {
761 	unsigned long prev;
762 
763 	if (!(si->flags & SWP_SOLIDSTATE)) {
764 		si->cluster_next = next;
765 		return;
766 	}
767 
768 	prev = this_cpu_read(*si->cluster_next_cpu);
769 	/*
770 	 * Cross the swap address space size aligned trunk, choose
771 	 * another trunk randomly to avoid lock contention on swap
772 	 * address space if possible.
773 	 */
774 	if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
775 	    (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
776 		/* No free swap slots available */
777 		if (si->highest_bit <= si->lowest_bit)
778 			return;
779 		next = si->lowest_bit +
780 			prandom_u32_max(si->highest_bit - si->lowest_bit + 1);
781 		next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
782 		next = max_t(unsigned int, next, si->lowest_bit);
783 	}
784 	this_cpu_write(*si->cluster_next_cpu, next);
785 }
786 
scan_swap_map_slots(struct swap_info_struct * si,unsigned char usage,int nr,swp_entry_t slots[])787 int scan_swap_map_slots(struct swap_info_struct *si,
788 			       unsigned char usage, int nr,
789 			       swp_entry_t slots[])
790 {
791 	struct swap_cluster_info *ci;
792 	unsigned long offset;
793 	unsigned long scan_base;
794 	unsigned long last_in_cluster = 0;
795 	int latency_ration = LATENCY_LIMIT;
796 	int n_ret = 0;
797 	bool scanned_many = false;
798 
799 	/*
800 	 * We try to cluster swap pages by allocating them sequentially
801 	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
802 	 * way, however, we resort to first-free allocation, starting
803 	 * a new cluster.  This prevents us from scattering swap pages
804 	 * all over the entire swap partition, so that we reduce
805 	 * overall disk seek times between swap pages.  -- sct
806 	 * But we do now try to find an empty cluster.  -Andrea
807 	 * And we let swap pages go all over an SSD partition.  Hugh
808 	 */
809 
810 	si->flags += SWP_SCANNING;
811 	/*
812 	 * Use percpu scan base for SSD to reduce lock contention on
813 	 * cluster and swap cache.  For HDD, sequential access is more
814 	 * important.
815 	 */
816 	if (si->flags & SWP_SOLIDSTATE)
817 		scan_base = this_cpu_read(*si->cluster_next_cpu);
818 	else
819 		scan_base = si->cluster_next;
820 	offset = scan_base;
821 
822 	/* SSD algorithm */
823 	if (si->cluster_info) {
824 		if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
825 			goto scan;
826 	} else if (unlikely(!si->cluster_nr--)) {
827 		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
828 			si->cluster_nr = SWAPFILE_CLUSTER - 1;
829 			goto checks;
830 		}
831 
832 		spin_unlock(&si->lock);
833 
834 		/*
835 		 * If seek is expensive, start searching for new cluster from
836 		 * start of partition, to minimize the span of allocated swap.
837 		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
838 		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
839 		 */
840 		scan_base = offset = si->lowest_bit;
841 		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
842 
843 		/* Locate the first empty (unaligned) cluster */
844 		for (; last_in_cluster <= si->highest_bit; offset++) {
845 			if (si->swap_map[offset])
846 				last_in_cluster = offset + SWAPFILE_CLUSTER;
847 			else if (offset == last_in_cluster) {
848 				spin_lock(&si->lock);
849 				offset -= SWAPFILE_CLUSTER - 1;
850 				si->cluster_next = offset;
851 				si->cluster_nr = SWAPFILE_CLUSTER - 1;
852 				goto checks;
853 			}
854 			if (unlikely(--latency_ration < 0)) {
855 				cond_resched();
856 				latency_ration = LATENCY_LIMIT;
857 			}
858 		}
859 
860 		offset = scan_base;
861 		spin_lock(&si->lock);
862 		si->cluster_nr = SWAPFILE_CLUSTER - 1;
863 	}
864 
865 checks:
866 	if (si->cluster_info) {
867 		while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
868 		/* take a break if we already got some slots */
869 			if (n_ret)
870 				goto done;
871 			if (!scan_swap_map_try_ssd_cluster(si, &offset,
872 							&scan_base))
873 				goto scan;
874 		}
875 	}
876 	if (!(si->flags & SWP_WRITEOK))
877 		goto no_page;
878 	if (!si->highest_bit)
879 		goto no_page;
880 	if (offset > si->highest_bit)
881 		scan_base = offset = si->lowest_bit;
882 
883 	ci = lock_cluster(si, offset);
884 	/* reuse swap entry of cache-only swap if not busy. */
885 	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
886 		int swap_was_freed;
887 		unlock_cluster(ci);
888 		spin_unlock(&si->lock);
889 		swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
890 		spin_lock(&si->lock);
891 		/* entry was freed successfully, try to use this again */
892 		if (swap_was_freed)
893 			goto checks;
894 		goto scan; /* check next one */
895 	}
896 
897 	if (si->swap_map[offset]) {
898 		unlock_cluster(ci);
899 		if (!n_ret)
900 			goto scan;
901 		else
902 			goto done;
903 	}
904 	WRITE_ONCE(si->swap_map[offset], usage);
905 	inc_cluster_info_page(si, si->cluster_info, offset);
906 	unlock_cluster(ci);
907 
908 	swap_range_alloc(si, offset, 1);
909 	slots[n_ret++] = swp_entry(si->type, offset);
910 
911 	/* got enough slots or reach max slots? */
912 	if ((n_ret == nr) || (offset >= si->highest_bit))
913 		goto done;
914 
915 	/* search for next available slot */
916 
917 	/* time to take a break? */
918 	if (unlikely(--latency_ration < 0)) {
919 		if (n_ret)
920 			goto done;
921 		spin_unlock(&si->lock);
922 		cond_resched();
923 		spin_lock(&si->lock);
924 		latency_ration = LATENCY_LIMIT;
925 	}
926 
927 	/* try to get more slots in cluster */
928 	if (si->cluster_info) {
929 		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
930 			goto checks;
931 	} else if (si->cluster_nr && !si->swap_map[++offset]) {
932 		/* non-ssd case, still more slots in cluster? */
933 		--si->cluster_nr;
934 		goto checks;
935 	}
936 
937 	/*
938 	 * Even if there's no free clusters available (fragmented),
939 	 * try to scan a little more quickly with lock held unless we
940 	 * have scanned too many slots already.
941 	 */
942 	if (!scanned_many) {
943 		unsigned long scan_limit;
944 
945 		if (offset < scan_base)
946 			scan_limit = scan_base;
947 		else
948 			scan_limit = si->highest_bit;
949 		for (; offset <= scan_limit && --latency_ration > 0;
950 		     offset++) {
951 			if (!si->swap_map[offset])
952 				goto checks;
953 		}
954 	}
955 
956 done:
957 	set_cluster_next(si, offset + 1);
958 	si->flags -= SWP_SCANNING;
959 	return n_ret;
960 
961 scan:
962 	spin_unlock(&si->lock);
963 	while (++offset <= READ_ONCE(si->highest_bit)) {
964 		if (data_race(!si->swap_map[offset])) {
965 			spin_lock(&si->lock);
966 			goto checks;
967 		}
968 		if (vm_swap_full() &&
969 		    READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
970 			spin_lock(&si->lock);
971 			goto checks;
972 		}
973 		if (unlikely(--latency_ration < 0)) {
974 			cond_resched();
975 			latency_ration = LATENCY_LIMIT;
976 			scanned_many = true;
977 		}
978 	}
979 	offset = si->lowest_bit;
980 	while (offset < scan_base) {
981 		if (data_race(!si->swap_map[offset])) {
982 			spin_lock(&si->lock);
983 			goto checks;
984 		}
985 		if (vm_swap_full() &&
986 		    READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
987 			spin_lock(&si->lock);
988 			goto checks;
989 		}
990 		if (unlikely(--latency_ration < 0)) {
991 			cond_resched();
992 			latency_ration = LATENCY_LIMIT;
993 			scanned_many = true;
994 		}
995 		offset++;
996 	}
997 	spin_lock(&si->lock);
998 
999 no_page:
1000 	si->flags -= SWP_SCANNING;
1001 	return n_ret;
1002 }
1003 EXPORT_SYMBOL_GPL(scan_swap_map_slots);
1004 
swap_alloc_cluster(struct swap_info_struct * si,swp_entry_t * slot)1005 int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
1006 {
1007 	unsigned long idx;
1008 	struct swap_cluster_info *ci;
1009 	unsigned long offset, i;
1010 	unsigned char *map;
1011 
1012 	/*
1013 	 * Should not even be attempting cluster allocations when huge
1014 	 * page swap is disabled.  Warn and fail the allocation.
1015 	 */
1016 	if (!IS_ENABLED(CONFIG_THP_SWAP)) {
1017 		VM_WARN_ON_ONCE(1);
1018 		return 0;
1019 	}
1020 
1021 	if (cluster_list_empty(&si->free_clusters))
1022 		return 0;
1023 
1024 	idx = cluster_list_first(&si->free_clusters);
1025 	offset = idx * SWAPFILE_CLUSTER;
1026 	ci = lock_cluster(si, offset);
1027 	alloc_cluster(si, idx);
1028 	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
1029 
1030 	map = si->swap_map + offset;
1031 	for (i = 0; i < SWAPFILE_CLUSTER; i++)
1032 		map[i] = SWAP_HAS_CACHE;
1033 	unlock_cluster(ci);
1034 	swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1035 	*slot = swp_entry(si->type, offset);
1036 
1037 	return 1;
1038 }
1039 EXPORT_SYMBOL_GPL(swap_alloc_cluster);
1040 
swap_free_cluster(struct swap_info_struct * si,unsigned long idx)1041 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1042 {
1043 	unsigned long offset = idx * SWAPFILE_CLUSTER;
1044 	struct swap_cluster_info *ci;
1045 
1046 	ci = lock_cluster(si, offset);
1047 	memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1048 	cluster_set_count_flag(ci, 0, 0);
1049 	free_cluster(si, idx);
1050 	unlock_cluster(ci);
1051 	swap_range_free(si, offset, SWAPFILE_CLUSTER);
1052 }
1053 
scan_swap_map(struct swap_info_struct * si,unsigned char usage)1054 static unsigned long scan_swap_map(struct swap_info_struct *si,
1055 				   unsigned char usage)
1056 {
1057 	swp_entry_t entry;
1058 	int n_ret;
1059 
1060 	n_ret = scan_swap_map_slots(si, usage, 1, &entry);
1061 
1062 	if (n_ret)
1063 		return swp_offset(entry);
1064 	else
1065 		return 0;
1066 
1067 }
1068 
get_swap_pages(int n_goal,swp_entry_t swp_entries[],int entry_size)1069 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
1070 {
1071 	unsigned long size = swap_entry_size(entry_size);
1072 	struct swap_info_struct *si, *next;
1073 	long avail_pgs;
1074 	int n_ret = 0;
1075 	int node;
1076 
1077 	/* Only single cluster request supported */
1078 	WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1079 
1080 	spin_lock(&swap_avail_lock);
1081 
1082 	avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1083 	if (avail_pgs <= 0) {
1084 		spin_unlock(&swap_avail_lock);
1085 		goto noswap;
1086 	}
1087 
1088 	n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1089 
1090 	atomic_long_sub(n_goal * size, &nr_swap_pages);
1091 
1092 start_over:
1093 	node = numa_node_id();
1094 	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1095 		/* requeue si to after same-priority siblings */
1096 		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1097 		spin_unlock(&swap_avail_lock);
1098 		spin_lock(&si->lock);
1099 		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1100 			spin_lock(&swap_avail_lock);
1101 			if (plist_node_empty(&si->avail_lists[node])) {
1102 				spin_unlock(&si->lock);
1103 				goto nextsi;
1104 			}
1105 			WARN(!si->highest_bit,
1106 			     "swap_info %d in list but !highest_bit\n",
1107 			     si->type);
1108 			WARN(!(si->flags & SWP_WRITEOK),
1109 			     "swap_info %d in list but !SWP_WRITEOK\n",
1110 			     si->type);
1111 			__del_from_avail_list(si);
1112 			spin_unlock(&si->lock);
1113 			goto nextsi;
1114 		}
1115 		if (size == SWAPFILE_CLUSTER) {
1116 			if (si->flags & SWP_BLKDEV)
1117 				n_ret = swap_alloc_cluster(si, swp_entries);
1118 		} else
1119 			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1120 						    n_goal, swp_entries);
1121 		spin_unlock(&si->lock);
1122 		if (n_ret || size == SWAPFILE_CLUSTER)
1123 			goto check_out;
1124 		pr_debug("scan_swap_map of si %d failed to find offset\n",
1125 			si->type);
1126 		cond_resched();
1127 
1128 		spin_lock(&swap_avail_lock);
1129 nextsi:
1130 		/*
1131 		 * if we got here, it's likely that si was almost full before,
1132 		 * and since scan_swap_map() can drop the si->lock, multiple
1133 		 * callers probably all tried to get a page from the same si
1134 		 * and it filled up before we could get one; or, the si filled
1135 		 * up between us dropping swap_avail_lock and taking si->lock.
1136 		 * Since we dropped the swap_avail_lock, the swap_avail_head
1137 		 * list may have been modified; so if next is still in the
1138 		 * swap_avail_head list then try it, otherwise start over
1139 		 * if we have not gotten any slots.
1140 		 */
1141 		if (plist_node_empty(&next->avail_lists[node]))
1142 			goto start_over;
1143 	}
1144 
1145 	spin_unlock(&swap_avail_lock);
1146 
1147 check_out:
1148 	if (n_ret < n_goal)
1149 		atomic_long_add((long)(n_goal - n_ret) * size,
1150 				&nr_swap_pages);
1151 noswap:
1152 	return n_ret;
1153 }
1154 
1155 /* The only caller of this function is now suspend routine */
get_swap_page_of_type(int type)1156 swp_entry_t get_swap_page_of_type(int type)
1157 {
1158 	struct swap_info_struct *si = swap_type_to_swap_info(type);
1159 	pgoff_t offset;
1160 	bool skip = false;
1161 
1162 	if (!si)
1163 		goto fail;
1164 
1165 	spin_lock(&si->lock);
1166 	if (si->flags & SWP_WRITEOK) {
1167 		/* This is called for allocating swap entry, not cache */
1168 		offset = scan_swap_map(si, 1);
1169 		if (offset) {
1170 			trace_android_vh_account_swap_pages(si, &skip);
1171 			if (!skip)
1172 				atomic_long_dec(&nr_swap_pages);
1173 			spin_unlock(&si->lock);
1174 			return swp_entry(type, offset);
1175 		}
1176 	}
1177 	spin_unlock(&si->lock);
1178 fail:
1179 	return (swp_entry_t) {0};
1180 }
1181 
__swap_info_get(swp_entry_t entry)1182 static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1183 {
1184 	struct swap_info_struct *p;
1185 	unsigned long offset;
1186 
1187 	if (!entry.val)
1188 		goto out;
1189 	p = swp_swap_info(entry);
1190 	if (!p)
1191 		goto bad_nofile;
1192 	if (data_race(!(p->flags & SWP_USED)))
1193 		goto bad_device;
1194 	offset = swp_offset(entry);
1195 	if (offset >= p->max)
1196 		goto bad_offset;
1197 	return p;
1198 
1199 bad_offset:
1200 	pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val);
1201 	goto out;
1202 bad_device:
1203 	pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val);
1204 	goto out;
1205 bad_nofile:
1206 	pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val);
1207 out:
1208 	return NULL;
1209 }
1210 
_swap_info_get(swp_entry_t entry)1211 static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1212 {
1213 	struct swap_info_struct *p;
1214 
1215 	p = __swap_info_get(entry);
1216 	if (!p)
1217 		goto out;
1218 	if (data_race(!p->swap_map[swp_offset(entry)]))
1219 		goto bad_free;
1220 	return p;
1221 
1222 bad_free:
1223 	pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val);
1224 out:
1225 	return NULL;
1226 }
1227 
swap_info_get(swp_entry_t entry)1228 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1229 {
1230 	struct swap_info_struct *p;
1231 
1232 	p = _swap_info_get(entry);
1233 	if (p)
1234 		spin_lock(&p->lock);
1235 	return p;
1236 }
1237 
swap_info_get_cont(swp_entry_t entry,struct swap_info_struct * q)1238 static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1239 					struct swap_info_struct *q)
1240 {
1241 	struct swap_info_struct *p;
1242 
1243 	p = _swap_info_get(entry);
1244 
1245 	if (p != q) {
1246 		if (q != NULL)
1247 			spin_unlock(&q->lock);
1248 		if (p != NULL)
1249 			spin_lock(&p->lock);
1250 	}
1251 	return p;
1252 }
1253 
__swap_entry_free_locked(struct swap_info_struct * p,unsigned long offset,unsigned char usage)1254 static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1255 					      unsigned long offset,
1256 					      unsigned char usage)
1257 {
1258 	unsigned char count;
1259 	unsigned char has_cache;
1260 
1261 	count = p->swap_map[offset];
1262 
1263 	has_cache = count & SWAP_HAS_CACHE;
1264 	count &= ~SWAP_HAS_CACHE;
1265 
1266 	if (usage == SWAP_HAS_CACHE) {
1267 		VM_BUG_ON(!has_cache);
1268 		has_cache = 0;
1269 	} else if (count == SWAP_MAP_SHMEM) {
1270 		/*
1271 		 * Or we could insist on shmem.c using a special
1272 		 * swap_shmem_free() and free_shmem_swap_and_cache()...
1273 		 */
1274 		count = 0;
1275 	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1276 		if (count == COUNT_CONTINUED) {
1277 			if (swap_count_continued(p, offset, count))
1278 				count = SWAP_MAP_MAX | COUNT_CONTINUED;
1279 			else
1280 				count = SWAP_MAP_MAX;
1281 		} else
1282 			count--;
1283 	}
1284 
1285 	usage = count | has_cache;
1286 	if (usage)
1287 		WRITE_ONCE(p->swap_map[offset], usage);
1288 	else
1289 		WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
1290 
1291 	return usage;
1292 }
1293 
1294 /*
1295  * Check whether swap entry is valid in the swap device.  If so,
1296  * return pointer to swap_info_struct, and keep the swap entry valid
1297  * via preventing the swap device from being swapoff, until
1298  * put_swap_device() is called.  Otherwise return NULL.
1299  *
1300  * The entirety of the RCU read critical section must come before the
1301  * return from or after the call to synchronize_rcu() in
1302  * enable_swap_info() or swapoff().  So if "si->flags & SWP_VALID" is
1303  * true, the si->map, si->cluster_info, etc. must be valid in the
1304  * critical section.
1305  *
1306  * Notice that swapoff or swapoff+swapon can still happen before the
1307  * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock()
1308  * in put_swap_device() if there isn't any other way to prevent
1309  * swapoff, such as page lock, page table lock, etc.  The caller must
1310  * be prepared for that.  For example, the following situation is
1311  * possible.
1312  *
1313  *   CPU1				CPU2
1314  *   do_swap_page()
1315  *     ...				swapoff+swapon
1316  *     __read_swap_cache_async()
1317  *       swapcache_prepare()
1318  *         __swap_duplicate()
1319  *           // check swap_map
1320  *     // verify PTE not changed
1321  *
1322  * In __swap_duplicate(), the swap_map need to be checked before
1323  * changing partly because the specified swap entry may be for another
1324  * swap device which has been swapoff.  And in do_swap_page(), after
1325  * the page is read from the swap device, the PTE is verified not
1326  * changed with the page table locked to check whether the swap device
1327  * has been swapoff or swapoff+swapon.
1328  */
get_swap_device(swp_entry_t entry)1329 struct swap_info_struct *get_swap_device(swp_entry_t entry)
1330 {
1331 	struct swap_info_struct *si;
1332 	unsigned long offset;
1333 
1334 	if (!entry.val)
1335 		goto out;
1336 	si = swp_swap_info(entry);
1337 	if (!si)
1338 		goto bad_nofile;
1339 
1340 	rcu_read_lock();
1341 	if (data_race(!(si->flags & SWP_VALID)))
1342 		goto unlock_out;
1343 	offset = swp_offset(entry);
1344 	if (offset >= si->max)
1345 		goto unlock_out;
1346 
1347 	return si;
1348 bad_nofile:
1349 	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1350 out:
1351 	return NULL;
1352 unlock_out:
1353 	rcu_read_unlock();
1354 	return NULL;
1355 }
1356 
__swap_entry_free(struct swap_info_struct * p,swp_entry_t entry)1357 static unsigned char __swap_entry_free(struct swap_info_struct *p,
1358 				       swp_entry_t entry)
1359 {
1360 	struct swap_cluster_info *ci;
1361 	unsigned long offset = swp_offset(entry);
1362 	unsigned char usage;
1363 
1364 	ci = lock_cluster_or_swap_info(p, offset);
1365 	usage = __swap_entry_free_locked(p, offset, 1);
1366 	unlock_cluster_or_swap_info(p, ci);
1367 	if (!usage)
1368 		free_swap_slot(entry);
1369 
1370 	return usage;
1371 }
1372 
swap_entry_free(struct swap_info_struct * p,swp_entry_t entry)1373 static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1374 {
1375 	struct swap_cluster_info *ci;
1376 	unsigned long offset = swp_offset(entry);
1377 	unsigned char count;
1378 
1379 	ci = lock_cluster(p, offset);
1380 	count = p->swap_map[offset];
1381 	VM_BUG_ON(count != SWAP_HAS_CACHE);
1382 	p->swap_map[offset] = 0;
1383 	dec_cluster_info_page(p, p->cluster_info, offset);
1384 	unlock_cluster(ci);
1385 
1386 	mem_cgroup_uncharge_swap(entry, 1);
1387 	swap_range_free(p, offset, 1);
1388 }
1389 
1390 /*
1391  * Caller has made sure that the swap device corresponding to entry
1392  * is still around or has not been recycled.
1393  */
swap_free(swp_entry_t entry)1394 void swap_free(swp_entry_t entry)
1395 {
1396 	struct swap_info_struct *p;
1397 
1398 	p = _swap_info_get(entry);
1399 	if (p)
1400 		__swap_entry_free(p, entry);
1401 }
1402 
1403 /*
1404  * Called after dropping swapcache to decrease refcnt to swap entries.
1405  */
put_swap_page(struct page * page,swp_entry_t entry)1406 void put_swap_page(struct page *page, swp_entry_t entry)
1407 {
1408 	unsigned long offset = swp_offset(entry);
1409 	unsigned long idx = offset / SWAPFILE_CLUSTER;
1410 	struct swap_cluster_info *ci;
1411 	struct swap_info_struct *si;
1412 	unsigned char *map;
1413 	unsigned int i, free_entries = 0;
1414 	unsigned char val;
1415 	int size = swap_entry_size(thp_nr_pages(page));
1416 
1417 	si = _swap_info_get(entry);
1418 	if (!si)
1419 		return;
1420 
1421 	ci = lock_cluster_or_swap_info(si, offset);
1422 	if (size == SWAPFILE_CLUSTER) {
1423 		VM_BUG_ON(!cluster_is_huge(ci));
1424 		map = si->swap_map + offset;
1425 		for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1426 			val = map[i];
1427 			VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1428 			if (val == SWAP_HAS_CACHE)
1429 				free_entries++;
1430 		}
1431 		cluster_clear_huge(ci);
1432 		if (free_entries == SWAPFILE_CLUSTER) {
1433 			unlock_cluster_or_swap_info(si, ci);
1434 			spin_lock(&si->lock);
1435 			mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1436 			swap_free_cluster(si, idx);
1437 			spin_unlock(&si->lock);
1438 			return;
1439 		}
1440 	}
1441 	for (i = 0; i < size; i++, entry.val++) {
1442 		if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1443 			unlock_cluster_or_swap_info(si, ci);
1444 			free_swap_slot(entry);
1445 			if (i == size - 1)
1446 				return;
1447 			lock_cluster_or_swap_info(si, offset);
1448 		}
1449 	}
1450 	unlock_cluster_or_swap_info(si, ci);
1451 }
1452 
1453 #ifdef CONFIG_THP_SWAP
split_swap_cluster(swp_entry_t entry)1454 int split_swap_cluster(swp_entry_t entry)
1455 {
1456 	struct swap_info_struct *si;
1457 	struct swap_cluster_info *ci;
1458 	unsigned long offset = swp_offset(entry);
1459 
1460 	si = _swap_info_get(entry);
1461 	if (!si)
1462 		return -EBUSY;
1463 	ci = lock_cluster(si, offset);
1464 	cluster_clear_huge(ci);
1465 	unlock_cluster(ci);
1466 	return 0;
1467 }
1468 #endif
1469 
swp_entry_cmp(const void * ent1,const void * ent2)1470 static int swp_entry_cmp(const void *ent1, const void *ent2)
1471 {
1472 	const swp_entry_t *e1 = ent1, *e2 = ent2;
1473 
1474 	return (int)swp_type(*e1) - (int)swp_type(*e2);
1475 }
1476 
swapcache_free_entries(swp_entry_t * entries,int n)1477 void swapcache_free_entries(swp_entry_t *entries, int n)
1478 {
1479 	struct swap_info_struct *p, *prev;
1480 	int i;
1481 
1482 	if (n <= 0)
1483 		return;
1484 
1485 	prev = NULL;
1486 	p = NULL;
1487 
1488 	/*
1489 	 * Sort swap entries by swap device, so each lock is only taken once.
1490 	 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1491 	 * so low that it isn't necessary to optimize further.
1492 	 */
1493 	if (nr_swapfiles > 1)
1494 		sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1495 	for (i = 0; i < n; ++i) {
1496 		p = swap_info_get_cont(entries[i], prev);
1497 		if (p)
1498 			swap_entry_free(p, entries[i]);
1499 		prev = p;
1500 	}
1501 	if (p)
1502 		spin_unlock(&p->lock);
1503 }
1504 EXPORT_SYMBOL_GPL(swapcache_free_entries);
1505 
1506 /*
1507  * How many references to page are currently swapped out?
1508  * This does not give an exact answer when swap count is continued,
1509  * but does include the high COUNT_CONTINUED flag to allow for that.
1510  */
page_swapcount(struct page * page)1511 int page_swapcount(struct page *page)
1512 {
1513 	int count = 0;
1514 	struct swap_info_struct *p;
1515 	struct swap_cluster_info *ci;
1516 	swp_entry_t entry;
1517 	unsigned long offset;
1518 
1519 	entry.val = page_private(page);
1520 	p = _swap_info_get(entry);
1521 	if (p) {
1522 		offset = swp_offset(entry);
1523 		ci = lock_cluster_or_swap_info(p, offset);
1524 		count = swap_count(p->swap_map[offset]);
1525 		unlock_cluster_or_swap_info(p, ci);
1526 	}
1527 	return count;
1528 }
1529 
__swap_count(swp_entry_t entry)1530 int __swap_count(swp_entry_t entry)
1531 {
1532 	struct swap_info_struct *si;
1533 	pgoff_t offset = swp_offset(entry);
1534 	int count = 0;
1535 
1536 	si = get_swap_device(entry);
1537 	if (si) {
1538 		count = swap_count(si->swap_map[offset]);
1539 		put_swap_device(si);
1540 	}
1541 	return count;
1542 }
1543 
swap_swapcount(struct swap_info_struct * si,swp_entry_t entry)1544 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1545 {
1546 	int count = 0;
1547 	pgoff_t offset = swp_offset(entry);
1548 	struct swap_cluster_info *ci;
1549 
1550 	ci = lock_cluster_or_swap_info(si, offset);
1551 	count = swap_count(si->swap_map[offset]);
1552 	unlock_cluster_or_swap_info(si, ci);
1553 	return count;
1554 }
1555 
1556 /*
1557  * How many references to @entry are currently swapped out?
1558  * This does not give an exact answer when swap count is continued,
1559  * but does include the high COUNT_CONTINUED flag to allow for that.
1560  */
__swp_swapcount(swp_entry_t entry)1561 int __swp_swapcount(swp_entry_t entry)
1562 {
1563 	int count = 0;
1564 	struct swap_info_struct *si;
1565 
1566 	si = get_swap_device(entry);
1567 	if (si) {
1568 		count = swap_swapcount(si, entry);
1569 		put_swap_device(si);
1570 	}
1571 	return count;
1572 }
1573 
1574 /*
1575  * How many references to @entry are currently swapped out?
1576  * This considers COUNT_CONTINUED so it returns exact answer.
1577  */
swp_swapcount(swp_entry_t entry)1578 int swp_swapcount(swp_entry_t entry)
1579 {
1580 	int count, tmp_count, n;
1581 	struct swap_info_struct *p;
1582 	struct swap_cluster_info *ci;
1583 	struct page *page;
1584 	pgoff_t offset;
1585 	unsigned char *map;
1586 
1587 	p = _swap_info_get(entry);
1588 	if (!p)
1589 		return 0;
1590 
1591 	offset = swp_offset(entry);
1592 
1593 	ci = lock_cluster_or_swap_info(p, offset);
1594 
1595 	count = swap_count(p->swap_map[offset]);
1596 	if (!(count & COUNT_CONTINUED))
1597 		goto out;
1598 
1599 	count &= ~COUNT_CONTINUED;
1600 	n = SWAP_MAP_MAX + 1;
1601 
1602 	page = vmalloc_to_page(p->swap_map + offset);
1603 	offset &= ~PAGE_MASK;
1604 	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1605 
1606 	do {
1607 		page = list_next_entry(page, lru);
1608 		map = kmap_atomic(page);
1609 		tmp_count = map[offset];
1610 		kunmap_atomic(map);
1611 
1612 		count += (tmp_count & ~COUNT_CONTINUED) * n;
1613 		n *= (SWAP_CONT_MAX + 1);
1614 	} while (tmp_count & COUNT_CONTINUED);
1615 out:
1616 	unlock_cluster_or_swap_info(p, ci);
1617 	return count;
1618 }
1619 
swap_page_trans_huge_swapped(struct swap_info_struct * si,swp_entry_t entry)1620 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1621 					 swp_entry_t entry)
1622 {
1623 	struct swap_cluster_info *ci;
1624 	unsigned char *map = si->swap_map;
1625 	unsigned long roffset = swp_offset(entry);
1626 	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1627 	int i;
1628 	bool ret = false;
1629 
1630 	ci = lock_cluster_or_swap_info(si, offset);
1631 	if (!ci || !cluster_is_huge(ci)) {
1632 		if (swap_count(map[roffset]))
1633 			ret = true;
1634 		goto unlock_out;
1635 	}
1636 	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1637 		if (swap_count(map[offset + i])) {
1638 			ret = true;
1639 			break;
1640 		}
1641 	}
1642 unlock_out:
1643 	unlock_cluster_or_swap_info(si, ci);
1644 	return ret;
1645 }
1646 
page_swapped(struct page * page)1647 static bool page_swapped(struct page *page)
1648 {
1649 	swp_entry_t entry;
1650 	struct swap_info_struct *si;
1651 
1652 	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1653 		return page_swapcount(page) != 0;
1654 
1655 	page = compound_head(page);
1656 	entry.val = page_private(page);
1657 	si = _swap_info_get(entry);
1658 	if (si)
1659 		return swap_page_trans_huge_swapped(si, entry);
1660 	return false;
1661 }
1662 
page_trans_huge_map_swapcount(struct page * page,int * total_mapcount,int * total_swapcount)1663 static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1664 					 int *total_swapcount)
1665 {
1666 	int i, map_swapcount, _total_mapcount, _total_swapcount;
1667 	unsigned long offset = 0;
1668 	struct swap_info_struct *si;
1669 	struct swap_cluster_info *ci = NULL;
1670 	unsigned char *map = NULL;
1671 	int mapcount, swapcount = 0;
1672 
1673 	/* hugetlbfs shouldn't call it */
1674 	VM_BUG_ON_PAGE(PageHuge(page), page);
1675 
1676 	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1677 		mapcount = page_trans_huge_mapcount(page, total_mapcount);
1678 		if (PageSwapCache(page))
1679 			swapcount = page_swapcount(page);
1680 		if (total_swapcount)
1681 			*total_swapcount = swapcount;
1682 		return mapcount + swapcount;
1683 	}
1684 
1685 	page = compound_head(page);
1686 
1687 	_total_mapcount = _total_swapcount = map_swapcount = 0;
1688 	if (PageSwapCache(page)) {
1689 		swp_entry_t entry;
1690 
1691 		entry.val = page_private(page);
1692 		si = _swap_info_get(entry);
1693 		if (si) {
1694 			map = si->swap_map;
1695 			offset = swp_offset(entry);
1696 		}
1697 	}
1698 	if (map)
1699 		ci = lock_cluster(si, offset);
1700 	for (i = 0; i < HPAGE_PMD_NR; i++) {
1701 		mapcount = atomic_read(&page[i]._mapcount) + 1;
1702 		_total_mapcount += mapcount;
1703 		if (map) {
1704 			swapcount = swap_count(map[offset + i]);
1705 			_total_swapcount += swapcount;
1706 		}
1707 		map_swapcount = max(map_swapcount, mapcount + swapcount);
1708 	}
1709 	unlock_cluster(ci);
1710 	if (PageDoubleMap(page)) {
1711 		map_swapcount -= 1;
1712 		_total_mapcount -= HPAGE_PMD_NR;
1713 	}
1714 	mapcount = compound_mapcount(page);
1715 	map_swapcount += mapcount;
1716 	_total_mapcount += mapcount;
1717 	if (total_mapcount)
1718 		*total_mapcount = _total_mapcount;
1719 	if (total_swapcount)
1720 		*total_swapcount = _total_swapcount;
1721 
1722 	return map_swapcount;
1723 }
1724 
1725 /*
1726  * We can write to an anon page without COW if there are no other references
1727  * to it.  And as a side-effect, free up its swap: because the old content
1728  * on disk will never be read, and seeking back there to write new content
1729  * later would only waste time away from clustering.
1730  *
1731  * NOTE: total_map_swapcount should not be relied upon by the caller if
1732  * reuse_swap_page() returns false, but it may be always overwritten
1733  * (see the other implementation for CONFIG_SWAP=n).
1734  */
reuse_swap_page(struct page * page,int * total_map_swapcount)1735 bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1736 {
1737 	int count, total_mapcount, total_swapcount;
1738 
1739 	VM_BUG_ON_PAGE(!PageLocked(page), page);
1740 	if (unlikely(PageKsm(page)))
1741 		return false;
1742 	count = page_trans_huge_map_swapcount(page, &total_mapcount,
1743 					      &total_swapcount);
1744 	if (total_map_swapcount)
1745 		*total_map_swapcount = total_mapcount + total_swapcount;
1746 	if (count == 1 && PageSwapCache(page) &&
1747 	    (likely(!PageTransCompound(page)) ||
1748 	     /* The remaining swap count will be freed soon */
1749 	     total_swapcount == page_swapcount(page))) {
1750 		if (!PageWriteback(page)) {
1751 			page = compound_head(page);
1752 			delete_from_swap_cache(page);
1753 			SetPageDirty(page);
1754 		} else {
1755 			swp_entry_t entry;
1756 			struct swap_info_struct *p;
1757 
1758 			entry.val = page_private(page);
1759 			p = swap_info_get(entry);
1760 			if (p->flags & SWP_STABLE_WRITES) {
1761 				spin_unlock(&p->lock);
1762 				return false;
1763 			}
1764 			spin_unlock(&p->lock);
1765 		}
1766 	}
1767 
1768 	return count <= 1;
1769 }
1770 
1771 /*
1772  * If swap is getting full, or if there are no more mappings of this page,
1773  * then try_to_free_swap is called to free its swap space.
1774  */
try_to_free_swap(struct page * page)1775 int try_to_free_swap(struct page *page)
1776 {
1777 	VM_BUG_ON_PAGE(!PageLocked(page), page);
1778 
1779 	if (!PageSwapCache(page))
1780 		return 0;
1781 	if (PageWriteback(page))
1782 		return 0;
1783 	if (page_swapped(page))
1784 		return 0;
1785 
1786 	/*
1787 	 * Once hibernation has begun to create its image of memory,
1788 	 * there's a danger that one of the calls to try_to_free_swap()
1789 	 * - most probably a call from __try_to_reclaim_swap() while
1790 	 * hibernation is allocating its own swap pages for the image,
1791 	 * but conceivably even a call from memory reclaim - will free
1792 	 * the swap from a page which has already been recorded in the
1793 	 * image as a clean swapcache page, and then reuse its swap for
1794 	 * another page of the image.  On waking from hibernation, the
1795 	 * original page might be freed under memory pressure, then
1796 	 * later read back in from swap, now with the wrong data.
1797 	 *
1798 	 * Hibernation suspends storage while it is writing the image
1799 	 * to disk so check that here.
1800 	 */
1801 	if (pm_suspended_storage())
1802 		return 0;
1803 
1804 	page = compound_head(page);
1805 	delete_from_swap_cache(page);
1806 	SetPageDirty(page);
1807 	return 1;
1808 }
1809 
1810 /*
1811  * Free the swap entry like above, but also try to
1812  * free the page cache entry if it is the last user.
1813  */
free_swap_and_cache(swp_entry_t entry)1814 int free_swap_and_cache(swp_entry_t entry)
1815 {
1816 	struct swap_info_struct *p;
1817 	unsigned char count;
1818 
1819 	if (non_swap_entry(entry))
1820 		return 1;
1821 
1822 	p = _swap_info_get(entry);
1823 	if (p) {
1824 		count = __swap_entry_free(p, entry);
1825 		if (count == SWAP_HAS_CACHE &&
1826 		    !swap_page_trans_huge_swapped(p, entry))
1827 			__try_to_reclaim_swap(p, swp_offset(entry),
1828 					      TTRS_UNMAPPED | TTRS_FULL);
1829 	}
1830 	return p != NULL;
1831 }
1832 
1833 #ifdef CONFIG_HIBERNATION
1834 /*
1835  * Find the swap type that corresponds to given device (if any).
1836  *
1837  * @offset - number of the PAGE_SIZE-sized block of the device, starting
1838  * from 0, in which the swap header is expected to be located.
1839  *
1840  * This is needed for the suspend to disk (aka swsusp).
1841  */
swap_type_of(dev_t device,sector_t offset)1842 int swap_type_of(dev_t device, sector_t offset)
1843 {
1844 	int type;
1845 
1846 	if (!device)
1847 		return -1;
1848 
1849 	spin_lock(&swap_lock);
1850 	for (type = 0; type < nr_swapfiles; type++) {
1851 		struct swap_info_struct *sis = swap_info[type];
1852 
1853 		if (!(sis->flags & SWP_WRITEOK))
1854 			continue;
1855 
1856 		if (device == sis->bdev->bd_dev) {
1857 			struct swap_extent *se = first_se(sis);
1858 
1859 			if (se->start_block == offset) {
1860 				spin_unlock(&swap_lock);
1861 				return type;
1862 			}
1863 		}
1864 	}
1865 	spin_unlock(&swap_lock);
1866 	return -ENODEV;
1867 }
1868 
find_first_swap(dev_t * device)1869 int find_first_swap(dev_t *device)
1870 {
1871 	int type;
1872 
1873 	spin_lock(&swap_lock);
1874 	for (type = 0; type < nr_swapfiles; type++) {
1875 		struct swap_info_struct *sis = swap_info[type];
1876 
1877 		if (!(sis->flags & SWP_WRITEOK))
1878 			continue;
1879 		*device = sis->bdev->bd_dev;
1880 		spin_unlock(&swap_lock);
1881 		return type;
1882 	}
1883 	spin_unlock(&swap_lock);
1884 	return -ENODEV;
1885 }
1886 
1887 /*
1888  * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1889  * corresponding to given index in swap_info (swap type).
1890  */
swapdev_block(int type,pgoff_t offset)1891 sector_t swapdev_block(int type, pgoff_t offset)
1892 {
1893 	struct block_device *bdev;
1894 	struct swap_info_struct *si = swap_type_to_swap_info(type);
1895 
1896 	if (!si || !(si->flags & SWP_WRITEOK))
1897 		return 0;
1898 	return map_swap_entry(swp_entry(type, offset), &bdev);
1899 }
1900 
1901 /*
1902  * Return either the total number of swap pages of given type, or the number
1903  * of free pages of that type (depending on @free)
1904  *
1905  * This is needed for software suspend
1906  */
count_swap_pages(int type,int free)1907 unsigned int count_swap_pages(int type, int free)
1908 {
1909 	unsigned int n = 0;
1910 
1911 	spin_lock(&swap_lock);
1912 	if ((unsigned int)type < nr_swapfiles) {
1913 		struct swap_info_struct *sis = swap_info[type];
1914 
1915 		spin_lock(&sis->lock);
1916 		if (sis->flags & SWP_WRITEOK) {
1917 			n = sis->pages;
1918 			if (free)
1919 				n -= sis->inuse_pages;
1920 		}
1921 		spin_unlock(&sis->lock);
1922 	}
1923 	spin_unlock(&swap_lock);
1924 	return n;
1925 }
1926 #endif /* CONFIG_HIBERNATION */
1927 
pte_same_as_swp(pte_t pte,pte_t swp_pte)1928 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1929 {
1930 	return pte_same(pte_swp_clear_flags(pte), swp_pte);
1931 }
1932 
1933 /*
1934  * No need to decide whether this PTE shares the swap entry with others,
1935  * just let do_wp_page work it out if a write is requested later - to
1936  * force COW, vm_page_prot omits write permission from any private vma.
1937  */
unuse_pte(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,swp_entry_t entry,struct page * page)1938 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1939 		unsigned long addr, swp_entry_t entry, struct page *page)
1940 {
1941 	struct page *swapcache;
1942 	spinlock_t *ptl;
1943 	pte_t *pte;
1944 	int ret = 1;
1945 
1946 	swapcache = page;
1947 	page = ksm_might_need_to_copy(page, vma, addr);
1948 	if (unlikely(!page))
1949 		return -ENOMEM;
1950 
1951 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1952 	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1953 		ret = 0;
1954 		goto out;
1955 	}
1956 
1957 	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1958 	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1959 	get_page(page);
1960 	set_pte_at(vma->vm_mm, addr, pte,
1961 		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1962 	if (page == swapcache) {
1963 		page_add_anon_rmap(page, vma, addr, false);
1964 	} else { /* ksm created a completely new copy */
1965 		page_add_new_anon_rmap(page, vma, addr, false);
1966 		lru_cache_add_inactive_or_unevictable(page, vma);
1967 	}
1968 	swap_free(entry);
1969 out:
1970 	pte_unmap_unlock(pte, ptl);
1971 	if (page != swapcache) {
1972 		unlock_page(page);
1973 		put_page(page);
1974 	}
1975 	return ret;
1976 }
1977 
unuse_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)1978 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1979 			unsigned long addr, unsigned long end,
1980 			unsigned int type, bool frontswap,
1981 			unsigned long *fs_pages_to_unuse)
1982 {
1983 	struct page *page;
1984 	swp_entry_t entry;
1985 	pte_t *pte;
1986 	struct swap_info_struct *si;
1987 	unsigned long offset;
1988 	int ret = 0;
1989 	volatile unsigned char *swap_map;
1990 
1991 	si = swap_info[type];
1992 	pte = pte_offset_map(pmd, addr);
1993 	do {
1994 		if (!is_swap_pte(*pte))
1995 			continue;
1996 
1997 		entry = pte_to_swp_entry(*pte);
1998 		if (swp_type(entry) != type)
1999 			continue;
2000 
2001 		offset = swp_offset(entry);
2002 		if (frontswap && !frontswap_test(si, offset))
2003 			continue;
2004 
2005 		pte_unmap(pte);
2006 		swap_map = &si->swap_map[offset];
2007 		page = lookup_swap_cache(entry, vma, addr);
2008 		if (!page) {
2009 			struct vm_fault vmf = {
2010 				.vma = vma,
2011 				.address = addr,
2012 				.pmd = pmd,
2013 			};
2014 
2015 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
2016 						&vmf);
2017 		}
2018 		if (!page) {
2019 			if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
2020 				goto try_next;
2021 			return -ENOMEM;
2022 		}
2023 
2024 		lock_page(page);
2025 		wait_on_page_writeback(page);
2026 		ret = unuse_pte(vma, pmd, addr, entry, page);
2027 		if (ret < 0) {
2028 			unlock_page(page);
2029 			put_page(page);
2030 			goto out;
2031 		}
2032 
2033 		try_to_free_swap(page);
2034 		trace_android_vh_unuse_swap_page(si, page);
2035 		unlock_page(page);
2036 		put_page(page);
2037 
2038 		if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
2039 			ret = FRONTSWAP_PAGES_UNUSED;
2040 			goto out;
2041 		}
2042 try_next:
2043 		pte = pte_offset_map(pmd, addr);
2044 	} while (pte++, addr += PAGE_SIZE, addr != end);
2045 	pte_unmap(pte - 1);
2046 
2047 	ret = 0;
2048 out:
2049 	return ret;
2050 }
2051 
unuse_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2052 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2053 				unsigned long addr, unsigned long end,
2054 				unsigned int type, bool frontswap,
2055 				unsigned long *fs_pages_to_unuse)
2056 {
2057 	pmd_t *pmd;
2058 	unsigned long next;
2059 	int ret;
2060 
2061 	pmd = pmd_offset(pud, addr);
2062 	do {
2063 		cond_resched();
2064 		next = pmd_addr_end(addr, end);
2065 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
2066 			continue;
2067 		ret = unuse_pte_range(vma, pmd, addr, next, type,
2068 				      frontswap, fs_pages_to_unuse);
2069 		if (ret)
2070 			return ret;
2071 	} while (pmd++, addr = next, addr != end);
2072 	return 0;
2073 }
2074 
unuse_pud_range(struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2075 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2076 				unsigned long addr, unsigned long end,
2077 				unsigned int type, bool frontswap,
2078 				unsigned long *fs_pages_to_unuse)
2079 {
2080 	pud_t *pud;
2081 	unsigned long next;
2082 	int ret;
2083 
2084 	pud = pud_offset(p4d, addr);
2085 	do {
2086 		next = pud_addr_end(addr, end);
2087 		if (pud_none_or_clear_bad(pud))
2088 			continue;
2089 		ret = unuse_pmd_range(vma, pud, addr, next, type,
2090 				      frontswap, fs_pages_to_unuse);
2091 		if (ret)
2092 			return ret;
2093 	} while (pud++, addr = next, addr != end);
2094 	return 0;
2095 }
2096 
unuse_p4d_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2097 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2098 				unsigned long addr, unsigned long end,
2099 				unsigned int type, bool frontswap,
2100 				unsigned long *fs_pages_to_unuse)
2101 {
2102 	p4d_t *p4d;
2103 	unsigned long next;
2104 	int ret;
2105 
2106 	p4d = p4d_offset(pgd, addr);
2107 	do {
2108 		next = p4d_addr_end(addr, end);
2109 		if (p4d_none_or_clear_bad(p4d))
2110 			continue;
2111 		ret = unuse_pud_range(vma, p4d, addr, next, type,
2112 				      frontswap, fs_pages_to_unuse);
2113 		if (ret)
2114 			return ret;
2115 	} while (p4d++, addr = next, addr != end);
2116 	return 0;
2117 }
2118 
unuse_vma(struct vm_area_struct * vma,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2119 static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
2120 		     bool frontswap, unsigned long *fs_pages_to_unuse)
2121 {
2122 	pgd_t *pgd;
2123 	unsigned long addr, end, next;
2124 	int ret;
2125 
2126 	addr = vma->vm_start;
2127 	end = vma->vm_end;
2128 
2129 	pgd = pgd_offset(vma->vm_mm, addr);
2130 	do {
2131 		next = pgd_addr_end(addr, end);
2132 		if (pgd_none_or_clear_bad(pgd))
2133 			continue;
2134 		ret = unuse_p4d_range(vma, pgd, addr, next, type,
2135 				      frontswap, fs_pages_to_unuse);
2136 		if (ret)
2137 			return ret;
2138 	} while (pgd++, addr = next, addr != end);
2139 	return 0;
2140 }
2141 
unuse_mm(struct mm_struct * mm,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)2142 static int unuse_mm(struct mm_struct *mm, unsigned int type,
2143 		    bool frontswap, unsigned long *fs_pages_to_unuse)
2144 {
2145 	struct vm_area_struct *vma;
2146 	int ret = 0;
2147 
2148 	mmap_read_lock(mm);
2149 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
2150 		if (vma->anon_vma) {
2151 			ret = unuse_vma(vma, type, frontswap,
2152 					fs_pages_to_unuse);
2153 			if (ret)
2154 				break;
2155 		}
2156 		cond_resched();
2157 	}
2158 	mmap_read_unlock(mm);
2159 	return ret;
2160 }
2161 
2162 /*
2163  * Scan swap_map (or frontswap_map if frontswap parameter is true)
2164  * from current position to next entry still in use. Return 0
2165  * if there are no inuse entries after prev till end of the map.
2166  */
find_next_to_unuse(struct swap_info_struct * si,unsigned int prev,bool frontswap)2167 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2168 					unsigned int prev, bool frontswap)
2169 {
2170 	unsigned int i;
2171 	unsigned char count;
2172 
2173 	/*
2174 	 * No need for swap_lock here: we're just looking
2175 	 * for whether an entry is in use, not modifying it; false
2176 	 * hits are okay, and sys_swapoff() has already prevented new
2177 	 * allocations from this area (while holding swap_lock).
2178 	 */
2179 	for (i = prev + 1; i < si->max; i++) {
2180 		count = READ_ONCE(si->swap_map[i]);
2181 		if (count && swap_count(count) != SWAP_MAP_BAD)
2182 			if (!frontswap || frontswap_test(si, i))
2183 				break;
2184 		if ((i % LATENCY_LIMIT) == 0)
2185 			cond_resched();
2186 	}
2187 
2188 	if (i == si->max)
2189 		i = 0;
2190 
2191 	return i;
2192 }
2193 
2194 /*
2195  * If the boolean frontswap is true, only unuse pages_to_unuse pages;
2196  * pages_to_unuse==0 means all pages; ignored if frontswap is false
2197  */
try_to_unuse(unsigned int type,bool frontswap,unsigned long pages_to_unuse)2198 int try_to_unuse(unsigned int type, bool frontswap,
2199 		 unsigned long pages_to_unuse)
2200 {
2201 	struct mm_struct *prev_mm;
2202 	struct mm_struct *mm;
2203 	struct list_head *p;
2204 	int retval = 0;
2205 	struct swap_info_struct *si = swap_info[type];
2206 	struct page *page;
2207 	swp_entry_t entry;
2208 	unsigned int i;
2209 
2210 	if (!READ_ONCE(si->inuse_pages))
2211 		return 0;
2212 
2213 	if (!frontswap)
2214 		pages_to_unuse = 0;
2215 
2216 retry:
2217 	retval = shmem_unuse(type, frontswap, &pages_to_unuse);
2218 	if (retval)
2219 		goto out;
2220 
2221 	prev_mm = &init_mm;
2222 	mmget(prev_mm);
2223 
2224 	spin_lock(&mmlist_lock);
2225 	p = &init_mm.mmlist;
2226 	while (READ_ONCE(si->inuse_pages) &&
2227 	       !signal_pending(current) &&
2228 	       (p = p->next) != &init_mm.mmlist) {
2229 
2230 		mm = list_entry(p, struct mm_struct, mmlist);
2231 		if (!mmget_not_zero(mm))
2232 			continue;
2233 		spin_unlock(&mmlist_lock);
2234 		mmput(prev_mm);
2235 		prev_mm = mm;
2236 		retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
2237 
2238 		if (retval) {
2239 			mmput(prev_mm);
2240 			goto out;
2241 		}
2242 
2243 		/*
2244 		 * Make sure that we aren't completely killing
2245 		 * interactive performance.
2246 		 */
2247 		cond_resched();
2248 		spin_lock(&mmlist_lock);
2249 	}
2250 	spin_unlock(&mmlist_lock);
2251 
2252 	mmput(prev_mm);
2253 
2254 	i = 0;
2255 	while (READ_ONCE(si->inuse_pages) &&
2256 	       !signal_pending(current) &&
2257 	       (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2258 
2259 		entry = swp_entry(type, i);
2260 		page = find_get_page(swap_address_space(entry), i);
2261 		if (!page)
2262 			continue;
2263 
2264 		/*
2265 		 * It is conceivable that a racing task removed this page from
2266 		 * swap cache just before we acquired the page lock. The page
2267 		 * might even be back in swap cache on another swap area. But
2268 		 * that is okay, try_to_free_swap() only removes stale pages.
2269 		 */
2270 		lock_page(page);
2271 		wait_on_page_writeback(page);
2272 		try_to_free_swap(page);
2273 		trace_android_vh_unuse_swap_page(si, page);
2274 		unlock_page(page);
2275 		put_page(page);
2276 
2277 		/*
2278 		 * For frontswap, we just need to unuse pages_to_unuse, if
2279 		 * it was specified. Need not check frontswap again here as
2280 		 * we already zeroed out pages_to_unuse if not frontswap.
2281 		 */
2282 		if (pages_to_unuse && --pages_to_unuse == 0)
2283 			goto out;
2284 	}
2285 
2286 	/*
2287 	 * Lets check again to see if there are still swap entries in the map.
2288 	 * If yes, we would need to do retry the unuse logic again.
2289 	 * Under global memory pressure, swap entries can be reinserted back
2290 	 * into process space after the mmlist loop above passes over them.
2291 	 *
2292 	 * Limit the number of retries? No: when mmget_not_zero() above fails,
2293 	 * that mm is likely to be freeing swap from exit_mmap(), which proceeds
2294 	 * at its own independent pace; and even shmem_writepage() could have
2295 	 * been preempted after get_swap_page(), temporarily hiding that swap.
2296 	 * It's easy and robust (though cpu-intensive) just to keep retrying.
2297 	 */
2298 	if (READ_ONCE(si->inuse_pages)) {
2299 		if (!signal_pending(current))
2300 			goto retry;
2301 		retval = -EINTR;
2302 	}
2303 out:
2304 	return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2305 }
2306 
2307 /*
2308  * After a successful try_to_unuse, if no swap is now in use, we know
2309  * we can empty the mmlist.  swap_lock must be held on entry and exit.
2310  * Note that mmlist_lock nests inside swap_lock, and an mm must be
2311  * added to the mmlist just after page_duplicate - before would be racy.
2312  */
drain_mmlist(void)2313 static void drain_mmlist(void)
2314 {
2315 	struct list_head *p, *next;
2316 	unsigned int type;
2317 
2318 	for (type = 0; type < nr_swapfiles; type++)
2319 		if (swap_info[type]->inuse_pages)
2320 			return;
2321 	spin_lock(&mmlist_lock);
2322 	list_for_each_safe(p, next, &init_mm.mmlist)
2323 		list_del_init(p);
2324 	spin_unlock(&mmlist_lock);
2325 }
2326 
2327 /*
2328  * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
2329  * corresponds to page offset for the specified swap entry.
2330  * Note that the type of this function is sector_t, but it returns page offset
2331  * into the bdev, not sector offset.
2332  */
map_swap_entry(swp_entry_t entry,struct block_device ** bdev)2333 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
2334 {
2335 	struct swap_info_struct *sis;
2336 	struct swap_extent *se;
2337 	pgoff_t offset;
2338 
2339 	sis = swp_swap_info(entry);
2340 	*bdev = sis->bdev;
2341 
2342 	offset = swp_offset(entry);
2343 	se = offset_to_swap_extent(sis, offset);
2344 	return se->start_block + (offset - se->start_page);
2345 }
2346 
2347 /*
2348  * Returns the page offset into bdev for the specified page's swap entry.
2349  */
map_swap_page(struct page * page,struct block_device ** bdev)2350 sector_t map_swap_page(struct page *page, struct block_device **bdev)
2351 {
2352 	swp_entry_t entry;
2353 	entry.val = page_private(page);
2354 	return map_swap_entry(entry, bdev);
2355 }
2356 
2357 /*
2358  * Free all of a swapdev's extent information
2359  */
destroy_swap_extents(struct swap_info_struct * sis)2360 static void destroy_swap_extents(struct swap_info_struct *sis)
2361 {
2362 	while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2363 		struct rb_node *rb = sis->swap_extent_root.rb_node;
2364 		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2365 
2366 		rb_erase(rb, &sis->swap_extent_root);
2367 		kfree(se);
2368 	}
2369 
2370 	if (sis->flags & SWP_ACTIVATED) {
2371 		struct file *swap_file = sis->swap_file;
2372 		struct address_space *mapping = swap_file->f_mapping;
2373 
2374 		sis->flags &= ~SWP_ACTIVATED;
2375 		if (mapping->a_ops->swap_deactivate)
2376 			mapping->a_ops->swap_deactivate(swap_file);
2377 	}
2378 }
2379 
2380 /*
2381  * Add a block range (and the corresponding page range) into this swapdev's
2382  * extent tree.
2383  *
2384  * This function rather assumes that it is called in ascending page order.
2385  */
2386 int
add_swap_extent(struct swap_info_struct * sis,unsigned long start_page,unsigned long nr_pages,sector_t start_block)2387 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2388 		unsigned long nr_pages, sector_t start_block)
2389 {
2390 	struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2391 	struct swap_extent *se;
2392 	struct swap_extent *new_se;
2393 
2394 	/*
2395 	 * place the new node at the right most since the
2396 	 * function is called in ascending page order.
2397 	 */
2398 	while (*link) {
2399 		parent = *link;
2400 		link = &parent->rb_right;
2401 	}
2402 
2403 	if (parent) {
2404 		se = rb_entry(parent, struct swap_extent, rb_node);
2405 		BUG_ON(se->start_page + se->nr_pages != start_page);
2406 		if (se->start_block + se->nr_pages == start_block) {
2407 			/* Merge it */
2408 			se->nr_pages += nr_pages;
2409 			return 0;
2410 		}
2411 	}
2412 
2413 	/* No merge, insert a new extent. */
2414 	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2415 	if (new_se == NULL)
2416 		return -ENOMEM;
2417 	new_se->start_page = start_page;
2418 	new_se->nr_pages = nr_pages;
2419 	new_se->start_block = start_block;
2420 
2421 	rb_link_node(&new_se->rb_node, parent, link);
2422 	rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2423 	return 1;
2424 }
2425 EXPORT_SYMBOL_GPL(add_swap_extent);
2426 
2427 /*
2428  * A `swap extent' is a simple thing which maps a contiguous range of pages
2429  * onto a contiguous range of disk blocks.  An ordered list of swap extents
2430  * is built at swapon time and is then used at swap_writepage/swap_readpage
2431  * time for locating where on disk a page belongs.
2432  *
2433  * If the swapfile is an S_ISBLK block device, a single extent is installed.
2434  * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2435  * swap files identically.
2436  *
2437  * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2438  * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2439  * swapfiles are handled *identically* after swapon time.
2440  *
2441  * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2442  * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
2443  * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2444  * requirements, they are simply tossed out - we will never use those blocks
2445  * for swapping.
2446  *
2447  * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2448  * prevents users from writing to the swap device, which will corrupt memory.
2449  *
2450  * The amount of disk space which a single swap extent represents varies.
2451  * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2452  * extents in the list.  To avoid much list walking, we cache the previous
2453  * search location in `curr_swap_extent', and start new searches from there.
2454  * This is extremely effective.  The average number of iterations in
2455  * map_swap_page() has been measured at about 0.3 per page.  - akpm.
2456  */
setup_swap_extents(struct swap_info_struct * sis,sector_t * span)2457 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2458 {
2459 	struct file *swap_file = sis->swap_file;
2460 	struct address_space *mapping = swap_file->f_mapping;
2461 	struct inode *inode = mapping->host;
2462 	int ret;
2463 
2464 	if (S_ISBLK(inode->i_mode)) {
2465 		ret = add_swap_extent(sis, 0, sis->max, 0);
2466 		*span = sis->pages;
2467 		return ret;
2468 	}
2469 
2470 	if (mapping->a_ops->swap_activate) {
2471 		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2472 		if (ret >= 0)
2473 			sis->flags |= SWP_ACTIVATED;
2474 		if (!ret) {
2475 			sis->flags |= SWP_FS_OPS;
2476 			ret = add_swap_extent(sis, 0, sis->max, 0);
2477 			*span = sis->pages;
2478 		}
2479 		return ret;
2480 	}
2481 
2482 	return generic_swapfile_activate(sis, swap_file, span);
2483 }
2484 
swap_node(struct swap_info_struct * p)2485 static int swap_node(struct swap_info_struct *p)
2486 {
2487 	struct block_device *bdev;
2488 
2489 	if (p->bdev)
2490 		bdev = p->bdev;
2491 	else
2492 		bdev = p->swap_file->f_inode->i_sb->s_bdev;
2493 
2494 	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2495 }
2496 
setup_swap_info(struct swap_info_struct * p,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info)2497 static void setup_swap_info(struct swap_info_struct *p, int prio,
2498 			    unsigned char *swap_map,
2499 			    struct swap_cluster_info *cluster_info)
2500 {
2501 	int i;
2502 
2503 	if (prio >= 0)
2504 		p->prio = prio;
2505 	else
2506 		p->prio = --least_priority;
2507 	/*
2508 	 * the plist prio is negated because plist ordering is
2509 	 * low-to-high, while swap ordering is high-to-low
2510 	 */
2511 	p->list.prio = -p->prio;
2512 	for_each_node(i) {
2513 		if (p->prio >= 0)
2514 			p->avail_lists[i].prio = -p->prio;
2515 		else {
2516 			if (swap_node(p) == i)
2517 				p->avail_lists[i].prio = 1;
2518 			else
2519 				p->avail_lists[i].prio = -p->prio;
2520 		}
2521 	}
2522 	p->swap_map = swap_map;
2523 	p->cluster_info = cluster_info;
2524 }
2525 
_enable_swap_info(struct swap_info_struct * p)2526 static void _enable_swap_info(struct swap_info_struct *p)
2527 {
2528 	bool skip = false;
2529 
2530 	p->flags |= SWP_WRITEOK | SWP_VALID;
2531 	trace_android_vh_account_swap_pages(p, &skip);
2532 	if (!skip) {
2533 		atomic_long_add(p->pages, &nr_swap_pages);
2534 		total_swap_pages += p->pages;
2535 	}
2536 	assert_spin_locked(&swap_lock);
2537 	/*
2538 	 * both lists are plists, and thus priority ordered.
2539 	 * swap_active_head needs to be priority ordered for swapoff(),
2540 	 * which on removal of any swap_info_struct with an auto-assigned
2541 	 * (i.e. negative) priority increments the auto-assigned priority
2542 	 * of any lower-priority swap_info_structs.
2543 	 * swap_avail_head needs to be priority ordered for get_swap_page(),
2544 	 * which allocates swap pages from the highest available priority
2545 	 * swap_info_struct.
2546 	 */
2547 	plist_add(&p->list, &swap_active_head);
2548 	add_to_avail_list(p);
2549 }
2550 
enable_swap_info(struct swap_info_struct * p,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long * frontswap_map)2551 static void enable_swap_info(struct swap_info_struct *p, int prio,
2552 				unsigned char *swap_map,
2553 				struct swap_cluster_info *cluster_info,
2554 				unsigned long *frontswap_map)
2555 {
2556 	frontswap_init(p->type, frontswap_map);
2557 	spin_lock(&swap_lock);
2558 	spin_lock(&p->lock);
2559 	setup_swap_info(p, prio, swap_map, cluster_info);
2560 	spin_unlock(&p->lock);
2561 	spin_unlock(&swap_lock);
2562 	/*
2563 	 * Guarantee swap_map, cluster_info, etc. fields are valid
2564 	 * between get/put_swap_device() if SWP_VALID bit is set
2565 	 */
2566 	synchronize_rcu();
2567 	spin_lock(&swap_lock);
2568 	spin_lock(&p->lock);
2569 	_enable_swap_info(p);
2570 	spin_unlock(&p->lock);
2571 	spin_unlock(&swap_lock);
2572 }
2573 
reinsert_swap_info(struct swap_info_struct * p)2574 static void reinsert_swap_info(struct swap_info_struct *p)
2575 {
2576 	spin_lock(&swap_lock);
2577 	spin_lock(&p->lock);
2578 	setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2579 	_enable_swap_info(p);
2580 	spin_unlock(&p->lock);
2581 	spin_unlock(&swap_lock);
2582 }
2583 
has_usable_swap(void)2584 bool has_usable_swap(void)
2585 {
2586 	bool ret = true;
2587 
2588 	spin_lock(&swap_lock);
2589 	if (plist_head_empty(&swap_active_head))
2590 		ret = false;
2591 	spin_unlock(&swap_lock);
2592 	return ret;
2593 }
2594 
SYSCALL_DEFINE1(swapoff,const char __user *,specialfile)2595 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2596 {
2597 	struct swap_info_struct *p = NULL;
2598 	unsigned char *swap_map;
2599 	struct swap_cluster_info *cluster_info;
2600 	unsigned long *frontswap_map;
2601 	struct file *swap_file, *victim;
2602 	struct address_space *mapping;
2603 	struct inode *inode;
2604 	struct filename *pathname;
2605 	int err, found = 0;
2606 	unsigned int old_block_size;
2607 	bool skip = false;
2608 
2609 	if (!capable(CAP_SYS_ADMIN))
2610 		return -EPERM;
2611 
2612 	BUG_ON(!current->mm);
2613 
2614 	pathname = getname(specialfile);
2615 	if (IS_ERR(pathname))
2616 		return PTR_ERR(pathname);
2617 
2618 	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2619 	err = PTR_ERR(victim);
2620 	if (IS_ERR(victim))
2621 		goto out;
2622 
2623 	mapping = victim->f_mapping;
2624 	spin_lock(&swap_lock);
2625 	plist_for_each_entry(p, &swap_active_head, list) {
2626 		if (p->flags & SWP_WRITEOK) {
2627 			if (p->swap_file->f_mapping == mapping) {
2628 				found = 1;
2629 				break;
2630 			}
2631 		}
2632 	}
2633 	if (!found) {
2634 		err = -EINVAL;
2635 		spin_unlock(&swap_lock);
2636 		goto out_dput;
2637 	}
2638 	if (!security_vm_enough_memory_mm(current->mm, p->pages))
2639 		vm_unacct_memory(p->pages);
2640 	else {
2641 		err = -ENOMEM;
2642 		spin_unlock(&swap_lock);
2643 		goto out_dput;
2644 	}
2645 	spin_lock(&p->lock);
2646 	del_from_avail_list(p);
2647 	if (p->prio < 0) {
2648 		struct swap_info_struct *si = p;
2649 		int nid;
2650 
2651 		plist_for_each_entry_continue(si, &swap_active_head, list) {
2652 			si->prio++;
2653 			si->list.prio--;
2654 			for_each_node(nid) {
2655 				if (si->avail_lists[nid].prio != 1)
2656 					si->avail_lists[nid].prio--;
2657 			}
2658 		}
2659 		least_priority++;
2660 	}
2661 	plist_del(&p->list, &swap_active_head);
2662 	trace_android_vh_account_swap_pages(p, &skip);
2663 	if (!skip) {
2664 		atomic_long_sub(p->pages, &nr_swap_pages);
2665 		total_swap_pages -= p->pages;
2666 	}
2667 	p->flags &= ~SWP_WRITEOK;
2668 	spin_unlock(&p->lock);
2669 	spin_unlock(&swap_lock);
2670 
2671 	disable_swap_slots_cache_lock();
2672 
2673 	set_current_oom_origin();
2674 	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
2675 	clear_current_oom_origin();
2676 
2677 	if (err) {
2678 		/* re-insert swap space back into swap_list */
2679 		reinsert_swap_info(p);
2680 		reenable_swap_slots_cache_unlock();
2681 		goto out_dput;
2682 	}
2683 
2684 	reenable_swap_slots_cache_unlock();
2685 
2686 	spin_lock(&swap_lock);
2687 	spin_lock(&p->lock);
2688 	p->flags &= ~SWP_VALID;		/* mark swap device as invalid */
2689 	spin_unlock(&p->lock);
2690 	spin_unlock(&swap_lock);
2691 	/*
2692 	 * wait for swap operations protected by get/put_swap_device()
2693 	 * to complete
2694 	 */
2695 	synchronize_rcu();
2696 
2697 	flush_work(&p->discard_work);
2698 
2699 	destroy_swap_extents(p);
2700 	if (p->flags & SWP_CONTINUED)
2701 		free_swap_count_continuations(p);
2702 
2703 	if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2704 		atomic_dec(&nr_rotate_swap);
2705 
2706 	mutex_lock(&swapon_mutex);
2707 	spin_lock(&swap_lock);
2708 	spin_lock(&p->lock);
2709 	drain_mmlist();
2710 
2711 	/* wait for anyone still in scan_swap_map */
2712 	p->highest_bit = 0;		/* cuts scans short */
2713 	while (p->flags >= SWP_SCANNING) {
2714 		spin_unlock(&p->lock);
2715 		spin_unlock(&swap_lock);
2716 		schedule_timeout_uninterruptible(1);
2717 		spin_lock(&swap_lock);
2718 		spin_lock(&p->lock);
2719 	}
2720 
2721 	swap_file = p->swap_file;
2722 	old_block_size = p->old_block_size;
2723 	p->swap_file = NULL;
2724 	p->max = 0;
2725 	swap_map = p->swap_map;
2726 	p->swap_map = NULL;
2727 	cluster_info = p->cluster_info;
2728 	p->cluster_info = NULL;
2729 	frontswap_map = frontswap_map_get(p);
2730 	spin_unlock(&p->lock);
2731 	spin_unlock(&swap_lock);
2732 	arch_swap_invalidate_area(p->type);
2733 	frontswap_invalidate_area(p->type);
2734 	frontswap_map_set(p, NULL);
2735 	mutex_unlock(&swapon_mutex);
2736 	free_percpu(p->percpu_cluster);
2737 	p->percpu_cluster = NULL;
2738 	free_percpu(p->cluster_next_cpu);
2739 	p->cluster_next_cpu = NULL;
2740 	vfree(swap_map);
2741 	kvfree(cluster_info);
2742 	kvfree(frontswap_map);
2743 	/* Destroy swap account information */
2744 	swap_cgroup_swapoff(p->type);
2745 	exit_swap_address_space(p->type);
2746 
2747 	inode = mapping->host;
2748 	if (S_ISBLK(inode->i_mode)) {
2749 		struct block_device *bdev = I_BDEV(inode);
2750 
2751 		set_blocksize(bdev, old_block_size);
2752 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2753 	}
2754 
2755 	inode_lock(inode);
2756 	inode->i_flags &= ~S_SWAPFILE;
2757 	inode_unlock(inode);
2758 	filp_close(swap_file, NULL);
2759 
2760 	/*
2761 	 * Clear the SWP_USED flag after all resources are freed so that swapon
2762 	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2763 	 * not hold p->lock after we cleared its SWP_WRITEOK.
2764 	 */
2765 	spin_lock(&swap_lock);
2766 	p->flags = 0;
2767 	spin_unlock(&swap_lock);
2768 
2769 	err = 0;
2770 	atomic_inc(&proc_poll_event);
2771 	wake_up_interruptible(&proc_poll_wait);
2772 
2773 out_dput:
2774 	filp_close(victim, NULL);
2775 out:
2776 	putname(pathname);
2777 	return err;
2778 }
2779 
2780 #ifdef CONFIG_PROC_FS
swaps_poll(struct file * file,poll_table * wait)2781 static __poll_t swaps_poll(struct file *file, poll_table *wait)
2782 {
2783 	struct seq_file *seq = file->private_data;
2784 
2785 	poll_wait(file, &proc_poll_wait, wait);
2786 
2787 	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2788 		seq->poll_event = atomic_read(&proc_poll_event);
2789 		return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2790 	}
2791 
2792 	return EPOLLIN | EPOLLRDNORM;
2793 }
2794 
2795 /* iterator */
swap_start(struct seq_file * swap,loff_t * pos)2796 static void *swap_start(struct seq_file *swap, loff_t *pos)
2797 {
2798 	struct swap_info_struct *si;
2799 	int type;
2800 	loff_t l = *pos;
2801 
2802 	mutex_lock(&swapon_mutex);
2803 
2804 	if (!l)
2805 		return SEQ_START_TOKEN;
2806 
2807 	for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2808 		if (!(si->flags & SWP_USED) || !si->swap_map)
2809 			continue;
2810 		if (!--l)
2811 			return si;
2812 	}
2813 
2814 	return NULL;
2815 }
2816 
swap_next(struct seq_file * swap,void * v,loff_t * pos)2817 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2818 {
2819 	struct swap_info_struct *si = v;
2820 	int type;
2821 
2822 	if (v == SEQ_START_TOKEN)
2823 		type = 0;
2824 	else
2825 		type = si->type + 1;
2826 
2827 	++(*pos);
2828 	for (; (si = swap_type_to_swap_info(type)); type++) {
2829 		if (!(si->flags & SWP_USED) || !si->swap_map)
2830 			continue;
2831 		return si;
2832 	}
2833 
2834 	return NULL;
2835 }
2836 
swap_stop(struct seq_file * swap,void * v)2837 static void swap_stop(struct seq_file *swap, void *v)
2838 {
2839 	mutex_unlock(&swapon_mutex);
2840 }
2841 
swap_show(struct seq_file * swap,void * v)2842 static int swap_show(struct seq_file *swap, void *v)
2843 {
2844 	struct swap_info_struct *si = v;
2845 	struct file *file;
2846 	int len;
2847 	unsigned int bytes, inuse;
2848 
2849 	if (si == SEQ_START_TOKEN) {
2850 		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2851 		return 0;
2852 	}
2853 
2854 	bytes = si->pages << (PAGE_SHIFT - 10);
2855 	inuse = si->inuse_pages << (PAGE_SHIFT - 10);
2856 
2857 	file = si->swap_file;
2858 	len = seq_file_path(swap, file, " \t\n\\");
2859 	seq_printf(swap, "%*s%s\t%u\t%s%u\t%s%d\n",
2860 			len < 40 ? 40 - len : 1, " ",
2861 			S_ISBLK(file_inode(file)->i_mode) ?
2862 				"partition" : "file\t",
2863 			bytes, bytes < 10000000 ? "\t" : "",
2864 			inuse, inuse < 10000000 ? "\t" : "",
2865 			si->prio);
2866 	return 0;
2867 }
2868 
2869 static const struct seq_operations swaps_op = {
2870 	.start =	swap_start,
2871 	.next =		swap_next,
2872 	.stop =		swap_stop,
2873 	.show =		swap_show
2874 };
2875 
swaps_open(struct inode * inode,struct file * file)2876 static int swaps_open(struct inode *inode, struct file *file)
2877 {
2878 	struct seq_file *seq;
2879 	int ret;
2880 
2881 	ret = seq_open(file, &swaps_op);
2882 	if (ret)
2883 		return ret;
2884 
2885 	seq = file->private_data;
2886 	seq->poll_event = atomic_read(&proc_poll_event);
2887 	return 0;
2888 }
2889 
2890 static const struct proc_ops swaps_proc_ops = {
2891 	.proc_flags	= PROC_ENTRY_PERMANENT,
2892 	.proc_open	= swaps_open,
2893 	.proc_read	= seq_read,
2894 	.proc_lseek	= seq_lseek,
2895 	.proc_release	= seq_release,
2896 	.proc_poll	= swaps_poll,
2897 };
2898 
procswaps_init(void)2899 static int __init procswaps_init(void)
2900 {
2901 	proc_create("swaps", 0, NULL, &swaps_proc_ops);
2902 	return 0;
2903 }
2904 __initcall(procswaps_init);
2905 #endif /* CONFIG_PROC_FS */
2906 
2907 #ifdef MAX_SWAPFILES_CHECK
max_swapfiles_check(void)2908 static int __init max_swapfiles_check(void)
2909 {
2910 	MAX_SWAPFILES_CHECK();
2911 	return 0;
2912 }
2913 late_initcall(max_swapfiles_check);
2914 #endif
2915 
alloc_swap_info(void)2916 static struct swap_info_struct *alloc_swap_info(void)
2917 {
2918 	struct swap_info_struct *p = NULL;
2919 	struct swap_info_struct *defer = NULL;
2920 	unsigned int type;
2921 	int i;
2922 	bool skip = false;
2923 
2924 	trace_android_rvh_alloc_si(&p, &skip);
2925 	trace_android_vh_alloc_si(&p, &skip);
2926 	if (!skip)
2927 		p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2928 	if (!p)
2929 		return ERR_PTR(-ENOMEM);
2930 
2931 	spin_lock(&swap_lock);
2932 	for (type = 0; type < nr_swapfiles; type++) {
2933 		if (!(swap_info[type]->flags & SWP_USED))
2934 			break;
2935 	}
2936 	if (type >= MAX_SWAPFILES) {
2937 		spin_unlock(&swap_lock);
2938 		kvfree(p);
2939 		return ERR_PTR(-EPERM);
2940 	}
2941 	if (type >= nr_swapfiles) {
2942 		p->type = type;
2943 		WRITE_ONCE(swap_info[type], p);
2944 		/*
2945 		 * Write swap_info[type] before nr_swapfiles, in case a
2946 		 * racing procfs swap_start() or swap_next() is reading them.
2947 		 * (We never shrink nr_swapfiles, we never free this entry.)
2948 		 */
2949 		smp_wmb();
2950 		WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
2951 	} else {
2952 		defer = p;
2953 		p = swap_info[type];
2954 		/*
2955 		 * Do not memset this entry: a racing procfs swap_next()
2956 		 * would be relying on p->type to remain valid.
2957 		 */
2958 	}
2959 	p->swap_extent_root = RB_ROOT;
2960 	plist_node_init(&p->list, 0);
2961 	for_each_node(i)
2962 		plist_node_init(&p->avail_lists[i], 0);
2963 	p->flags = SWP_USED;
2964 	spin_unlock(&swap_lock);
2965 	kvfree(defer);
2966 	spin_lock_init(&p->lock);
2967 	spin_lock_init(&p->cont_lock);
2968 
2969 	return p;
2970 }
2971 
claim_swapfile(struct swap_info_struct * p,struct inode * inode)2972 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2973 {
2974 	int error;
2975 
2976 	if (S_ISBLK(inode->i_mode)) {
2977 		p->bdev = blkdev_get_by_dev(inode->i_rdev,
2978 				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2979 		if (IS_ERR(p->bdev)) {
2980 			error = PTR_ERR(p->bdev);
2981 			p->bdev = NULL;
2982 			return error;
2983 		}
2984 		p->old_block_size = block_size(p->bdev);
2985 		error = set_blocksize(p->bdev, PAGE_SIZE);
2986 		if (error < 0)
2987 			return error;
2988 		/*
2989 		 * Zoned block devices contain zones that have a sequential
2990 		 * write only restriction.  Hence zoned block devices are not
2991 		 * suitable for swapping.  Disallow them here.
2992 		 */
2993 		if (blk_queue_is_zoned(p->bdev->bd_disk->queue))
2994 			return -EINVAL;
2995 		p->flags |= SWP_BLKDEV;
2996 	} else if (S_ISREG(inode->i_mode)) {
2997 		p->bdev = inode->i_sb->s_bdev;
2998 	}
2999 
3000 	return 0;
3001 }
3002 
3003 
3004 /*
3005  * Find out how many pages are allowed for a single swap device. There
3006  * are two limiting factors:
3007  * 1) the number of bits for the swap offset in the swp_entry_t type, and
3008  * 2) the number of bits in the swap pte, as defined by the different
3009  * architectures.
3010  *
3011  * In order to find the largest possible bit mask, a swap entry with
3012  * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
3013  * decoded to a swp_entry_t again, and finally the swap offset is
3014  * extracted.
3015  *
3016  * This will mask all the bits from the initial ~0UL mask that can't
3017  * be encoded in either the swp_entry_t or the architecture definition
3018  * of a swap pte.
3019  */
generic_max_swapfile_size(void)3020 unsigned long generic_max_swapfile_size(void)
3021 {
3022 	return swp_offset(pte_to_swp_entry(
3023 			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
3024 }
3025 
3026 /* Can be overridden by an architecture for additional checks. */
max_swapfile_size(void)3027 __weak unsigned long max_swapfile_size(void)
3028 {
3029 	return generic_max_swapfile_size();
3030 }
3031 
read_swap_header(struct swap_info_struct * p,union swap_header * swap_header,struct inode * inode)3032 static unsigned long read_swap_header(struct swap_info_struct *p,
3033 					union swap_header *swap_header,
3034 					struct inode *inode)
3035 {
3036 	int i;
3037 	unsigned long maxpages;
3038 	unsigned long swapfilepages;
3039 	unsigned long last_page;
3040 
3041 	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
3042 		pr_err("Unable to find swap-space signature\n");
3043 		return 0;
3044 	}
3045 
3046 	/* swap partition endianess hack... */
3047 	if (swab32(swap_header->info.version) == 1) {
3048 		swab32s(&swap_header->info.version);
3049 		swab32s(&swap_header->info.last_page);
3050 		swab32s(&swap_header->info.nr_badpages);
3051 		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3052 			return 0;
3053 		for (i = 0; i < swap_header->info.nr_badpages; i++)
3054 			swab32s(&swap_header->info.badpages[i]);
3055 	}
3056 	/* Check the swap header's sub-version */
3057 	if (swap_header->info.version != 1) {
3058 		pr_warn("Unable to handle swap header version %d\n",
3059 			swap_header->info.version);
3060 		return 0;
3061 	}
3062 
3063 	p->lowest_bit  = 1;
3064 	p->cluster_next = 1;
3065 	p->cluster_nr = 0;
3066 
3067 	maxpages = max_swapfile_size();
3068 	last_page = swap_header->info.last_page;
3069 	if (!last_page) {
3070 		pr_warn("Empty swap-file\n");
3071 		return 0;
3072 	}
3073 	if (last_page > maxpages) {
3074 		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
3075 			maxpages << (PAGE_SHIFT - 10),
3076 			last_page << (PAGE_SHIFT - 10));
3077 	}
3078 	if (maxpages > last_page) {
3079 		maxpages = last_page + 1;
3080 		/* p->max is an unsigned int: don't overflow it */
3081 		if ((unsigned int)maxpages == 0)
3082 			maxpages = UINT_MAX;
3083 	}
3084 	p->highest_bit = maxpages - 1;
3085 
3086 	if (!maxpages)
3087 		return 0;
3088 	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3089 	if (swapfilepages && maxpages > swapfilepages) {
3090 		pr_warn("Swap area shorter than signature indicates\n");
3091 		return 0;
3092 	}
3093 	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3094 		return 0;
3095 	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3096 		return 0;
3097 
3098 	return maxpages;
3099 }
3100 
3101 #define SWAP_CLUSTER_INFO_COLS						\
3102 	DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3103 #define SWAP_CLUSTER_SPACE_COLS						\
3104 	DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3105 #define SWAP_CLUSTER_COLS						\
3106 	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3107 
setup_swap_map_and_extents(struct swap_info_struct * p,union swap_header * swap_header,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long maxpages,sector_t * span)3108 static int setup_swap_map_and_extents(struct swap_info_struct *p,
3109 					union swap_header *swap_header,
3110 					unsigned char *swap_map,
3111 					struct swap_cluster_info *cluster_info,
3112 					unsigned long maxpages,
3113 					sector_t *span)
3114 {
3115 	unsigned int j, k;
3116 	unsigned int nr_good_pages;
3117 	int nr_extents;
3118 	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3119 	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3120 	unsigned long i, idx;
3121 
3122 	nr_good_pages = maxpages - 1;	/* omit header page */
3123 
3124 	cluster_list_init(&p->free_clusters);
3125 	cluster_list_init(&p->discard_clusters);
3126 
3127 	for (i = 0; i < swap_header->info.nr_badpages; i++) {
3128 		unsigned int page_nr = swap_header->info.badpages[i];
3129 		if (page_nr == 0 || page_nr > swap_header->info.last_page)
3130 			return -EINVAL;
3131 		if (page_nr < maxpages) {
3132 			swap_map[page_nr] = SWAP_MAP_BAD;
3133 			nr_good_pages--;
3134 			/*
3135 			 * Haven't marked the cluster free yet, no list
3136 			 * operation involved
3137 			 */
3138 			inc_cluster_info_page(p, cluster_info, page_nr);
3139 		}
3140 	}
3141 
3142 	/* Haven't marked the cluster free yet, no list operation involved */
3143 	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3144 		inc_cluster_info_page(p, cluster_info, i);
3145 
3146 	if (nr_good_pages) {
3147 		swap_map[0] = SWAP_MAP_BAD;
3148 		/*
3149 		 * Not mark the cluster free yet, no list
3150 		 * operation involved
3151 		 */
3152 		inc_cluster_info_page(p, cluster_info, 0);
3153 		p->max = maxpages;
3154 		p->pages = nr_good_pages;
3155 		nr_extents = setup_swap_extents(p, span);
3156 		if (nr_extents < 0)
3157 			return nr_extents;
3158 		nr_good_pages = p->pages;
3159 	}
3160 	if (!nr_good_pages) {
3161 		pr_warn("Empty swap-file\n");
3162 		return -EINVAL;
3163 	}
3164 
3165 	if (!cluster_info)
3166 		return nr_extents;
3167 
3168 
3169 	/*
3170 	 * Reduce false cache line sharing between cluster_info and
3171 	 * sharing same address space.
3172 	 */
3173 	for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3174 		j = (k + col) % SWAP_CLUSTER_COLS;
3175 		for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3176 			idx = i * SWAP_CLUSTER_COLS + j;
3177 			if (idx >= nr_clusters)
3178 				continue;
3179 			if (cluster_count(&cluster_info[idx]))
3180 				continue;
3181 			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3182 			cluster_list_add_tail(&p->free_clusters, cluster_info,
3183 					      idx);
3184 		}
3185 	}
3186 	return nr_extents;
3187 }
3188 
3189 /*
3190  * Helper to sys_swapon determining if a given swap
3191  * backing device queue supports DISCARD operations.
3192  */
swap_discardable(struct swap_info_struct * si)3193 static bool swap_discardable(struct swap_info_struct *si)
3194 {
3195 	struct request_queue *q = bdev_get_queue(si->bdev);
3196 
3197 	if (!q || !blk_queue_discard(q))
3198 		return false;
3199 
3200 	return true;
3201 }
3202 
SYSCALL_DEFINE2(swapon,const char __user *,specialfile,int,swap_flags)3203 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3204 {
3205 	struct swap_info_struct *p;
3206 	struct filename *name;
3207 	struct file *swap_file = NULL;
3208 	struct address_space *mapping;
3209 	int prio;
3210 	int error;
3211 	union swap_header *swap_header;
3212 	int nr_extents;
3213 	sector_t span;
3214 	unsigned long maxpages;
3215 	unsigned char *swap_map = NULL;
3216 	struct swap_cluster_info *cluster_info = NULL;
3217 	unsigned long *frontswap_map = NULL;
3218 	struct page *page = NULL;
3219 	struct inode *inode = NULL;
3220 	bool inced_nr_rotate_swap = false;
3221 
3222 	if (swap_flags & ~SWAP_FLAGS_VALID)
3223 		return -EINVAL;
3224 
3225 	if (!capable(CAP_SYS_ADMIN))
3226 		return -EPERM;
3227 
3228 	if (!swap_avail_heads)
3229 		return -ENOMEM;
3230 
3231 	p = alloc_swap_info();
3232 	if (IS_ERR(p))
3233 		return PTR_ERR(p);
3234 
3235 	INIT_WORK(&p->discard_work, swap_discard_work);
3236 
3237 	name = getname(specialfile);
3238 	if (IS_ERR(name)) {
3239 		error = PTR_ERR(name);
3240 		name = NULL;
3241 		goto bad_swap;
3242 	}
3243 	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3244 	if (IS_ERR(swap_file)) {
3245 		error = PTR_ERR(swap_file);
3246 		swap_file = NULL;
3247 		goto bad_swap;
3248 	}
3249 
3250 	p->swap_file = swap_file;
3251 	mapping = swap_file->f_mapping;
3252 	inode = mapping->host;
3253 
3254 	error = claim_swapfile(p, inode);
3255 	if (unlikely(error))
3256 		goto bad_swap;
3257 
3258 	inode_lock(inode);
3259 	if (IS_SWAPFILE(inode)) {
3260 		error = -EBUSY;
3261 		goto bad_swap_unlock_inode;
3262 	}
3263 
3264 	/*
3265 	 * Read the swap header.
3266 	 */
3267 	if (!mapping->a_ops->readpage) {
3268 		error = -EINVAL;
3269 		goto bad_swap_unlock_inode;
3270 	}
3271 	page = read_mapping_page(mapping, 0, swap_file);
3272 	if (IS_ERR(page)) {
3273 		error = PTR_ERR(page);
3274 		goto bad_swap_unlock_inode;
3275 	}
3276 	swap_header = kmap(page);
3277 
3278 	maxpages = read_swap_header(p, swap_header, inode);
3279 	if (unlikely(!maxpages)) {
3280 		error = -EINVAL;
3281 		goto bad_swap_unlock_inode;
3282 	}
3283 
3284 	/* OK, set up the swap map and apply the bad block list */
3285 	swap_map = vzalloc(maxpages);
3286 	if (!swap_map) {
3287 		error = -ENOMEM;
3288 		goto bad_swap_unlock_inode;
3289 	}
3290 
3291 	if (p->bdev && blk_queue_stable_writes(p->bdev->bd_disk->queue))
3292 		p->flags |= SWP_STABLE_WRITES;
3293 
3294 	if (p->bdev && p->bdev->bd_disk->fops->rw_page)
3295 		p->flags |= SWP_SYNCHRONOUS_IO;
3296 
3297 	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3298 		int cpu;
3299 		unsigned long ci, nr_cluster;
3300 
3301 		p->flags |= SWP_SOLIDSTATE;
3302 		p->cluster_next_cpu = alloc_percpu(unsigned int);
3303 		if (!p->cluster_next_cpu) {
3304 			error = -ENOMEM;
3305 			goto bad_swap_unlock_inode;
3306 		}
3307 		/*
3308 		 * select a random position to start with to help wear leveling
3309 		 * SSD
3310 		 */
3311 		for_each_possible_cpu(cpu) {
3312 			per_cpu(*p->cluster_next_cpu, cpu) =
3313 				1 + prandom_u32_max(p->highest_bit);
3314 		}
3315 		nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3316 
3317 		cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3318 					GFP_KERNEL);
3319 		if (!cluster_info) {
3320 			error = -ENOMEM;
3321 			goto bad_swap_unlock_inode;
3322 		}
3323 
3324 		for (ci = 0; ci < nr_cluster; ci++)
3325 			spin_lock_init(&((cluster_info + ci)->lock));
3326 
3327 		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3328 		if (!p->percpu_cluster) {
3329 			error = -ENOMEM;
3330 			goto bad_swap_unlock_inode;
3331 		}
3332 		for_each_possible_cpu(cpu) {
3333 			struct percpu_cluster *cluster;
3334 			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3335 			cluster_set_null(&cluster->index);
3336 		}
3337 	} else {
3338 		atomic_inc(&nr_rotate_swap);
3339 		inced_nr_rotate_swap = true;
3340 	}
3341 
3342 	error = swap_cgroup_swapon(p->type, maxpages);
3343 	if (error)
3344 		goto bad_swap_unlock_inode;
3345 
3346 	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3347 		cluster_info, maxpages, &span);
3348 	if (unlikely(nr_extents < 0)) {
3349 		error = nr_extents;
3350 		goto bad_swap_unlock_inode;
3351 	}
3352 	/* frontswap enabled? set up bit-per-page map for frontswap */
3353 	if (IS_ENABLED(CONFIG_FRONTSWAP))
3354 		frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3355 					 sizeof(long),
3356 					 GFP_KERNEL);
3357 
3358 	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
3359 		/*
3360 		 * When discard is enabled for swap with no particular
3361 		 * policy flagged, we set all swap discard flags here in
3362 		 * order to sustain backward compatibility with older
3363 		 * swapon(8) releases.
3364 		 */
3365 		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3366 			     SWP_PAGE_DISCARD);
3367 
3368 		/*
3369 		 * By flagging sys_swapon, a sysadmin can tell us to
3370 		 * either do single-time area discards only, or to just
3371 		 * perform discards for released swap page-clusters.
3372 		 * Now it's time to adjust the p->flags accordingly.
3373 		 */
3374 		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3375 			p->flags &= ~SWP_PAGE_DISCARD;
3376 		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3377 			p->flags &= ~SWP_AREA_DISCARD;
3378 
3379 		/* issue a swapon-time discard if it's still required */
3380 		if (p->flags & SWP_AREA_DISCARD) {
3381 			int err = discard_swap(p);
3382 			if (unlikely(err))
3383 				pr_err("swapon: discard_swap(%p): %d\n",
3384 					p, err);
3385 		}
3386 	}
3387 
3388 	error = init_swap_address_space(p->type, maxpages);
3389 	if (error)
3390 		goto bad_swap_unlock_inode;
3391 
3392 	/*
3393 	 * Flush any pending IO and dirty mappings before we start using this
3394 	 * swap device.
3395 	 */
3396 	inode->i_flags |= S_SWAPFILE;
3397 	error = inode_drain_writes(inode);
3398 	if (error) {
3399 		inode->i_flags &= ~S_SWAPFILE;
3400 		goto free_swap_address_space;
3401 	}
3402 
3403 	mutex_lock(&swapon_mutex);
3404 	prio = -1;
3405 	if (swap_flags & SWAP_FLAG_PREFER)
3406 		prio =
3407 		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3408 
3409 	trace_android_vh_swap_avail_heads_init(swap_avail_heads);
3410 	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3411 
3412 	trace_android_vh_init_swap_info_struct(p, swap_avail_heads);
3413 	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3414 		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3415 		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3416 		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3417 		(p->flags & SWP_DISCARDABLE) ? "D" : "",
3418 		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
3419 		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3420 		(frontswap_map) ? "FS" : "");
3421 
3422 	mutex_unlock(&swapon_mutex);
3423 	atomic_inc(&proc_poll_event);
3424 	wake_up_interruptible(&proc_poll_wait);
3425 
3426 	error = 0;
3427 	goto out;
3428 free_swap_address_space:
3429 	exit_swap_address_space(p->type);
3430 bad_swap_unlock_inode:
3431 	inode_unlock(inode);
3432 bad_swap:
3433 	free_percpu(p->percpu_cluster);
3434 	p->percpu_cluster = NULL;
3435 	free_percpu(p->cluster_next_cpu);
3436 	p->cluster_next_cpu = NULL;
3437 	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3438 		set_blocksize(p->bdev, p->old_block_size);
3439 		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3440 	}
3441 	inode = NULL;
3442 	destroy_swap_extents(p);
3443 	swap_cgroup_swapoff(p->type);
3444 	spin_lock(&swap_lock);
3445 	p->swap_file = NULL;
3446 	p->flags = 0;
3447 	spin_unlock(&swap_lock);
3448 	vfree(swap_map);
3449 	kvfree(cluster_info);
3450 	kvfree(frontswap_map);
3451 	if (inced_nr_rotate_swap)
3452 		atomic_dec(&nr_rotate_swap);
3453 	if (swap_file)
3454 		filp_close(swap_file, NULL);
3455 out:
3456 	if (page && !IS_ERR(page)) {
3457 		kunmap(page);
3458 		put_page(page);
3459 	}
3460 	if (name)
3461 		putname(name);
3462 	if (inode)
3463 		inode_unlock(inode);
3464 	if (!error)
3465 		enable_swap_slots_cache();
3466 	return error;
3467 }
3468 
si_swapinfo(struct sysinfo * val)3469 void si_swapinfo(struct sysinfo *val)
3470 {
3471 	unsigned int type;
3472 	unsigned long nr_to_be_unused = 0;
3473 
3474 	spin_lock(&swap_lock);
3475 	for (type = 0; type < nr_swapfiles; type++) {
3476 		struct swap_info_struct *si = swap_info[type];
3477 		bool skip = false;
3478 
3479 		trace_android_vh_si_swapinfo(si, &skip);
3480 		if (!skip && (si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3481 			nr_to_be_unused += si->inuse_pages;
3482 	}
3483 	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3484 	val->totalswap = total_swap_pages + nr_to_be_unused;
3485 	spin_unlock(&swap_lock);
3486 }
3487 EXPORT_SYMBOL_GPL(si_swapinfo);
3488 
3489 /*
3490  * Verify that a swap entry is valid and increment its swap map count.
3491  *
3492  * Returns error code in following case.
3493  * - success -> 0
3494  * - swp_entry is invalid -> EINVAL
3495  * - swp_entry is migration entry -> EINVAL
3496  * - swap-cache reference is requested but there is already one. -> EEXIST
3497  * - swap-cache reference is requested but the entry is not used. -> ENOENT
3498  * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3499  */
__swap_duplicate(swp_entry_t entry,unsigned char usage)3500 static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3501 {
3502 	struct swap_info_struct *p;
3503 	struct swap_cluster_info *ci;
3504 	unsigned long offset;
3505 	unsigned char count;
3506 	unsigned char has_cache;
3507 	int err = -EINVAL;
3508 
3509 	p = get_swap_device(entry);
3510 	if (!p)
3511 		goto out;
3512 
3513 	offset = swp_offset(entry);
3514 	ci = lock_cluster_or_swap_info(p, offset);
3515 
3516 	count = p->swap_map[offset];
3517 
3518 	/*
3519 	 * swapin_readahead() doesn't check if a swap entry is valid, so the
3520 	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3521 	 */
3522 	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3523 		err = -ENOENT;
3524 		goto unlock_out;
3525 	}
3526 
3527 	has_cache = count & SWAP_HAS_CACHE;
3528 	count &= ~SWAP_HAS_CACHE;
3529 	err = 0;
3530 
3531 	if (usage == SWAP_HAS_CACHE) {
3532 
3533 		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
3534 		if (!has_cache && count)
3535 			has_cache = SWAP_HAS_CACHE;
3536 		else if (has_cache)		/* someone else added cache */
3537 			err = -EEXIST;
3538 		else				/* no users remaining */
3539 			err = -ENOENT;
3540 
3541 	} else if (count || has_cache) {
3542 
3543 		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3544 			count += usage;
3545 		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3546 			err = -EINVAL;
3547 		else if (swap_count_continued(p, offset, count))
3548 			count = COUNT_CONTINUED;
3549 		else
3550 			err = -ENOMEM;
3551 	} else
3552 		err = -ENOENT;			/* unused swap entry */
3553 
3554 	WRITE_ONCE(p->swap_map[offset], count | has_cache);
3555 
3556 unlock_out:
3557 	unlock_cluster_or_swap_info(p, ci);
3558 out:
3559 	if (p)
3560 		put_swap_device(p);
3561 	return err;
3562 }
3563 
3564 /*
3565  * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3566  * (in which case its reference count is never incremented).
3567  */
swap_shmem_alloc(swp_entry_t entry)3568 void swap_shmem_alloc(swp_entry_t entry)
3569 {
3570 	__swap_duplicate(entry, SWAP_MAP_SHMEM);
3571 }
3572 
3573 /*
3574  * Increase reference count of swap entry by 1.
3575  * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3576  * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3577  * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3578  * might occur if a page table entry has got corrupted.
3579  */
swap_duplicate(swp_entry_t entry)3580 int swap_duplicate(swp_entry_t entry)
3581 {
3582 	int err = 0;
3583 
3584 	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3585 		err = add_swap_count_continuation(entry, GFP_ATOMIC);
3586 	return err;
3587 }
3588 
3589 /*
3590  * @entry: swap entry for which we allocate swap cache.
3591  *
3592  * Called when allocating swap cache for existing swap entry,
3593  * This can return error codes. Returns 0 at success.
3594  * -EEXIST means there is a swap cache.
3595  * Note: return code is different from swap_duplicate().
3596  */
swapcache_prepare(swp_entry_t entry)3597 int swapcache_prepare(swp_entry_t entry)
3598 {
3599 	return __swap_duplicate(entry, SWAP_HAS_CACHE);
3600 }
3601 
swp_swap_info(swp_entry_t entry)3602 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3603 {
3604 	return swap_type_to_swap_info(swp_type(entry));
3605 }
3606 EXPORT_SYMBOL_GPL(swp_swap_info);
3607 
page_swap_info(struct page * page)3608 struct swap_info_struct *page_swap_info(struct page *page)
3609 {
3610 	swp_entry_t entry = { .val = page_private(page) };
3611 	return swp_swap_info(entry);
3612 }
3613 
3614 /*
3615  * out-of-line __page_file_ methods to avoid include hell.
3616  */
__page_file_mapping(struct page * page)3617 struct address_space *__page_file_mapping(struct page *page)
3618 {
3619 	return page_swap_info(page)->swap_file->f_mapping;
3620 }
3621 EXPORT_SYMBOL_GPL(__page_file_mapping);
3622 
__page_file_index(struct page * page)3623 pgoff_t __page_file_index(struct page *page)
3624 {
3625 	swp_entry_t swap = { .val = page_private(page) };
3626 	return swp_offset(swap);
3627 }
3628 EXPORT_SYMBOL_GPL(__page_file_index);
3629 
3630 /*
3631  * add_swap_count_continuation - called when a swap count is duplicated
3632  * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3633  * page of the original vmalloc'ed swap_map, to hold the continuation count
3634  * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3635  * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3636  *
3637  * These continuation pages are seldom referenced: the common paths all work
3638  * on the original swap_map, only referring to a continuation page when the
3639  * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3640  *
3641  * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3642  * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3643  * can be called after dropping locks.
3644  */
add_swap_count_continuation(swp_entry_t entry,gfp_t gfp_mask)3645 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3646 {
3647 	struct swap_info_struct *si;
3648 	struct swap_cluster_info *ci;
3649 	struct page *head;
3650 	struct page *page;
3651 	struct page *list_page;
3652 	pgoff_t offset;
3653 	unsigned char count;
3654 	int ret = 0;
3655 
3656 	/*
3657 	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3658 	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3659 	 */
3660 	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3661 
3662 	si = get_swap_device(entry);
3663 	if (!si) {
3664 		/*
3665 		 * An acceptable race has occurred since the failing
3666 		 * __swap_duplicate(): the swap device may be swapoff
3667 		 */
3668 		goto outer;
3669 	}
3670 	spin_lock(&si->lock);
3671 
3672 	offset = swp_offset(entry);
3673 
3674 	ci = lock_cluster(si, offset);
3675 
3676 	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
3677 
3678 	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3679 		/*
3680 		 * The higher the swap count, the more likely it is that tasks
3681 		 * will race to add swap count continuation: we need to avoid
3682 		 * over-provisioning.
3683 		 */
3684 		goto out;
3685 	}
3686 
3687 	if (!page) {
3688 		ret = -ENOMEM;
3689 		goto out;
3690 	}
3691 
3692 	/*
3693 	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3694 	 * no architecture is using highmem pages for kernel page tables: so it
3695 	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3696 	 */
3697 	head = vmalloc_to_page(si->swap_map + offset);
3698 	offset &= ~PAGE_MASK;
3699 
3700 	spin_lock(&si->cont_lock);
3701 	/*
3702 	 * Page allocation does not initialize the page's lru field,
3703 	 * but it does always reset its private field.
3704 	 */
3705 	if (!page_private(head)) {
3706 		BUG_ON(count & COUNT_CONTINUED);
3707 		INIT_LIST_HEAD(&head->lru);
3708 		set_page_private(head, SWP_CONTINUED);
3709 		si->flags |= SWP_CONTINUED;
3710 	}
3711 
3712 	list_for_each_entry(list_page, &head->lru, lru) {
3713 		unsigned char *map;
3714 
3715 		/*
3716 		 * If the previous map said no continuation, but we've found
3717 		 * a continuation page, free our allocation and use this one.
3718 		 */
3719 		if (!(count & COUNT_CONTINUED))
3720 			goto out_unlock_cont;
3721 
3722 		map = kmap_atomic(list_page) + offset;
3723 		count = *map;
3724 		kunmap_atomic(map);
3725 
3726 		/*
3727 		 * If this continuation count now has some space in it,
3728 		 * free our allocation and use this one.
3729 		 */
3730 		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3731 			goto out_unlock_cont;
3732 	}
3733 
3734 	list_add_tail(&page->lru, &head->lru);
3735 	page = NULL;			/* now it's attached, don't free it */
3736 out_unlock_cont:
3737 	spin_unlock(&si->cont_lock);
3738 out:
3739 	unlock_cluster(ci);
3740 	spin_unlock(&si->lock);
3741 	put_swap_device(si);
3742 outer:
3743 	if (page)
3744 		__free_page(page);
3745 	return ret;
3746 }
3747 
3748 /*
3749  * swap_count_continued - when the original swap_map count is incremented
3750  * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3751  * into, carry if so, or else fail until a new continuation page is allocated;
3752  * when the original swap_map count is decremented from 0 with continuation,
3753  * borrow from the continuation and report whether it still holds more.
3754  * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3755  * lock.
3756  */
swap_count_continued(struct swap_info_struct * si,pgoff_t offset,unsigned char count)3757 static bool swap_count_continued(struct swap_info_struct *si,
3758 				 pgoff_t offset, unsigned char count)
3759 {
3760 	struct page *head;
3761 	struct page *page;
3762 	unsigned char *map;
3763 	bool ret;
3764 
3765 	head = vmalloc_to_page(si->swap_map + offset);
3766 	if (page_private(head) != SWP_CONTINUED) {
3767 		BUG_ON(count & COUNT_CONTINUED);
3768 		return false;		/* need to add count continuation */
3769 	}
3770 
3771 	spin_lock(&si->cont_lock);
3772 	offset &= ~PAGE_MASK;
3773 	page = list_next_entry(head, lru);
3774 	map = kmap_atomic(page) + offset;
3775 
3776 	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
3777 		goto init_map;		/* jump over SWAP_CONT_MAX checks */
3778 
3779 	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3780 		/*
3781 		 * Think of how you add 1 to 999
3782 		 */
3783 		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3784 			kunmap_atomic(map);
3785 			page = list_next_entry(page, lru);
3786 			BUG_ON(page == head);
3787 			map = kmap_atomic(page) + offset;
3788 		}
3789 		if (*map == SWAP_CONT_MAX) {
3790 			kunmap_atomic(map);
3791 			page = list_next_entry(page, lru);
3792 			if (page == head) {
3793 				ret = false;	/* add count continuation */
3794 				goto out;
3795 			}
3796 			map = kmap_atomic(page) + offset;
3797 init_map:		*map = 0;		/* we didn't zero the page */
3798 		}
3799 		*map += 1;
3800 		kunmap_atomic(map);
3801 		while ((page = list_prev_entry(page, lru)) != head) {
3802 			map = kmap_atomic(page) + offset;
3803 			*map = COUNT_CONTINUED;
3804 			kunmap_atomic(map);
3805 		}
3806 		ret = true;			/* incremented */
3807 
3808 	} else {				/* decrementing */
3809 		/*
3810 		 * Think of how you subtract 1 from 1000
3811 		 */
3812 		BUG_ON(count != COUNT_CONTINUED);
3813 		while (*map == COUNT_CONTINUED) {
3814 			kunmap_atomic(map);
3815 			page = list_next_entry(page, lru);
3816 			BUG_ON(page == head);
3817 			map = kmap_atomic(page) + offset;
3818 		}
3819 		BUG_ON(*map == 0);
3820 		*map -= 1;
3821 		if (*map == 0)
3822 			count = 0;
3823 		kunmap_atomic(map);
3824 		while ((page = list_prev_entry(page, lru)) != head) {
3825 			map = kmap_atomic(page) + offset;
3826 			*map = SWAP_CONT_MAX | count;
3827 			count = COUNT_CONTINUED;
3828 			kunmap_atomic(map);
3829 		}
3830 		ret = count == COUNT_CONTINUED;
3831 	}
3832 out:
3833 	spin_unlock(&si->cont_lock);
3834 	return ret;
3835 }
3836 
3837 /*
3838  * free_swap_count_continuations - swapoff free all the continuation pages
3839  * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3840  */
free_swap_count_continuations(struct swap_info_struct * si)3841 static void free_swap_count_continuations(struct swap_info_struct *si)
3842 {
3843 	pgoff_t offset;
3844 
3845 	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3846 		struct page *head;
3847 		head = vmalloc_to_page(si->swap_map + offset);
3848 		if (page_private(head)) {
3849 			struct page *page, *next;
3850 
3851 			list_for_each_entry_safe(page, next, &head->lru, lru) {
3852 				list_del(&page->lru);
3853 				__free_page(page);
3854 			}
3855 		}
3856 	}
3857 }
3858 
3859 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
__cgroup_throttle_swaprate(struct page * page,gfp_t gfp_mask)3860 void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
3861 {
3862 	struct swap_info_struct *si, *next;
3863 	int nid = page_to_nid(page);
3864 	bool skip = false;
3865 
3866 	if (!(gfp_mask & __GFP_IO))
3867 		return;
3868 
3869 	if (!blk_cgroup_congested())
3870 		return;
3871 
3872 	/*
3873 	 * We've already scheduled a throttle, avoid taking the global swap
3874 	 * lock.
3875 	 */
3876 	if (current->throttle_queue)
3877 		return;
3878 
3879 	trace_android_vh___cgroup_throttle_swaprate(nid, &skip);
3880 	if (skip)
3881 		return;
3882 
3883 	spin_lock(&swap_avail_lock);
3884 	plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3885 				  avail_lists[nid]) {
3886 		if (si->bdev) {
3887 			blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
3888 			break;
3889 		}
3890 	}
3891 	spin_unlock(&swap_avail_lock);
3892 }
3893 #endif
3894 
swapfile_init(void)3895 static int __init swapfile_init(void)
3896 {
3897 	int nid;
3898 
3899 	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3900 					 GFP_KERNEL);
3901 	if (!swap_avail_heads) {
3902 		pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3903 		return -ENOMEM;
3904 	}
3905 
3906 	for_each_node(nid)
3907 		plist_head_init(&swap_avail_heads[nid]);
3908 
3909 	return 0;
3910 }
3911 subsys_initcall(swapfile_init);
3912