Lines Matching full:si
134 static int __try_to_reclaim_swap(struct swap_info_struct *si, in __try_to_reclaim_swap() argument
137 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
178 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
186 se = first_se(si); in discard_swap()
190 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
201 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
248 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
251 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster()
265 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
358 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, in lock_cluster() argument
363 ci = si->cluster_info; in lock_cluster()
382 struct swap_info_struct *si, unsigned long offset) in lock_cluster_or_swap_info() argument
387 ci = lock_cluster(si, offset); in lock_cluster_or_swap_info()
390 spin_lock(&si->lock); in lock_cluster_or_swap_info()
395 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, in unlock_cluster_or_swap_info() argument
401 spin_unlock(&si->lock); in unlock_cluster_or_swap_info()
460 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
465 * si->swap_map directly. To make sure the discarding cluster isn't in swap_cluster_schedule_discard()
469 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard()
472 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); in swap_cluster_schedule_discard()
474 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
477 static void __free_cluster(struct swap_info_struct *si, unsigned long idx) in __free_cluster() argument
479 struct swap_cluster_info *ci = si->cluster_info; in __free_cluster()
482 cluster_list_add_tail(&si->free_clusters, ci, idx); in __free_cluster()
487 * will be added to free cluster list. caller should hold si->lock.
489 static void swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
494 info = si->cluster_info; in swap_do_scheduled_discard()
496 while (!cluster_list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
497 idx = cluster_list_del_first(&si->discard_clusters, info); in swap_do_scheduled_discard()
498 spin_unlock(&si->lock); in swap_do_scheduled_discard()
500 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
503 spin_lock(&si->lock); in swap_do_scheduled_discard()
504 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); in swap_do_scheduled_discard()
505 __free_cluster(si, idx); in swap_do_scheduled_discard()
506 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
514 struct swap_info_struct *si; in swap_discard_work() local
516 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
518 spin_lock(&si->lock); in swap_discard_work()
519 swap_do_scheduled_discard(si); in swap_discard_work()
520 spin_unlock(&si->lock); in swap_discard_work()
525 struct swap_info_struct *si; in swap_users_ref_free() local
527 si = container_of(ref, struct swap_info_struct, users); in swap_users_ref_free()
528 complete(&si->comp); in swap_users_ref_free()
531 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) in alloc_cluster() argument
533 struct swap_cluster_info *ci = si->cluster_info; in alloc_cluster()
535 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); in alloc_cluster()
536 cluster_list_del_first(&si->free_clusters, ci); in alloc_cluster()
540 static void free_cluster(struct swap_info_struct *si, unsigned long idx) in free_cluster() argument
542 struct swap_cluster_info *ci = si->cluster_info + idx; in free_cluster()
550 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
552 swap_cluster_schedule_discard(si, idx); in free_cluster()
556 __free_cluster(si, idx); in free_cluster()
604 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, in scan_swap_map_ssd_cluster_conflict() argument
611 conflict = !cluster_list_empty(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
612 offset != cluster_list_first(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
613 cluster_is_free(&si->cluster_info[offset]); in scan_swap_map_ssd_cluster_conflict()
618 percpu_cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_ssd_cluster_conflict()
627 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, in scan_swap_map_try_ssd_cluster() argument
635 cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_try_ssd_cluster()
637 if (!cluster_list_empty(&si->free_clusters)) { in scan_swap_map_try_ssd_cluster()
638 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
641 } else if (!cluster_list_empty(&si->discard_clusters)) { in scan_swap_map_try_ssd_cluster()
645 * reread cluster_next_cpu since we dropped si->lock in scan_swap_map_try_ssd_cluster()
647 swap_do_scheduled_discard(si); in scan_swap_map_try_ssd_cluster()
648 *scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_try_ssd_cluster()
660 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster()
663 ci = lock_cluster(si, tmp); in scan_swap_map_try_ssd_cluster()
665 if (!si->swap_map[tmp]) in scan_swap_map_try_ssd_cluster()
697 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, in swap_range_alloc() argument
702 if (offset == si->lowest_bit) in swap_range_alloc()
703 si->lowest_bit += nr_entries; in swap_range_alloc()
704 if (end == si->highest_bit) in swap_range_alloc()
705 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries); in swap_range_alloc()
706 WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries); in swap_range_alloc()
707 if (si->inuse_pages == si->pages) { in swap_range_alloc()
708 si->lowest_bit = si->max; in swap_range_alloc()
709 si->highest_bit = 0; in swap_range_alloc()
710 del_from_avail_list(si); in swap_range_alloc()
724 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, in swap_range_free() argument
731 if (offset < si->lowest_bit) in swap_range_free()
732 si->lowest_bit = offset; in swap_range_free()
733 if (end > si->highest_bit) { in swap_range_free()
734 bool was_full = !si->highest_bit; in swap_range_free()
736 WRITE_ONCE(si->highest_bit, end); in swap_range_free()
737 if (was_full && (si->flags & SWP_WRITEOK)) in swap_range_free()
738 add_to_avail_list(si); in swap_range_free()
741 WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries); in swap_range_free()
742 if (si->flags & SWP_BLKDEV) in swap_range_free()
744 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
748 arch_swap_invalidate_page(si->type, offset); in swap_range_free()
749 zswap_invalidate(si->type, offset); in swap_range_free()
751 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
754 clear_shadow_from_swap_cache(si->type, begin, end); in swap_range_free()
757 static void set_cluster_next(struct swap_info_struct *si, unsigned long next) in set_cluster_next() argument
761 if (!(si->flags & SWP_SOLIDSTATE)) { in set_cluster_next()
762 si->cluster_next = next; in set_cluster_next()
766 prev = this_cpu_read(*si->cluster_next_cpu); in set_cluster_next()
775 if (si->highest_bit <= si->lowest_bit) in set_cluster_next()
777 next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit); in set_cluster_next()
779 next = max_t(unsigned int, next, si->lowest_bit); in set_cluster_next()
781 this_cpu_write(*si->cluster_next_cpu, next); in set_cluster_next()
784 static bool swap_offset_available_and_locked(struct swap_info_struct *si, in swap_offset_available_and_locked() argument
787 if (data_race(!si->swap_map[offset])) { in swap_offset_available_and_locked()
788 spin_lock(&si->lock); in swap_offset_available_and_locked()
792 if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in swap_offset_available_and_locked()
793 spin_lock(&si->lock); in swap_offset_available_and_locked()
800 static int scan_swap_map_slots(struct swap_info_struct *si, in scan_swap_map_slots() argument
823 si->flags += SWP_SCANNING; in scan_swap_map_slots()
829 if (si->flags & SWP_SOLIDSTATE) in scan_swap_map_slots()
830 scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_slots()
832 scan_base = si->cluster_next; in scan_swap_map_slots()
836 if (si->cluster_info) { in scan_swap_map_slots()
837 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
839 } else if (unlikely(!si->cluster_nr--)) { in scan_swap_map_slots()
840 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { in scan_swap_map_slots()
841 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
845 spin_unlock(&si->lock); in scan_swap_map_slots()
850 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info in scan_swap_map_slots()
853 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
857 for (; last_in_cluster <= si->highest_bit; offset++) { in scan_swap_map_slots()
858 if (si->swap_map[offset]) in scan_swap_map_slots()
861 spin_lock(&si->lock); in scan_swap_map_slots()
863 si->cluster_next = offset; in scan_swap_map_slots()
864 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
874 spin_lock(&si->lock); in scan_swap_map_slots()
875 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
879 if (si->cluster_info) { in scan_swap_map_slots()
880 while (scan_swap_map_ssd_cluster_conflict(si, offset)) { in scan_swap_map_slots()
884 if (!scan_swap_map_try_ssd_cluster(si, &offset, in scan_swap_map_slots()
889 if (!(si->flags & SWP_WRITEOK)) in scan_swap_map_slots()
891 if (!si->highest_bit) in scan_swap_map_slots()
893 if (offset > si->highest_bit) in scan_swap_map_slots()
894 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
896 ci = lock_cluster(si, offset); in scan_swap_map_slots()
898 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map_slots()
901 spin_unlock(&si->lock); in scan_swap_map_slots()
902 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); in scan_swap_map_slots()
903 spin_lock(&si->lock); in scan_swap_map_slots()
910 if (si->swap_map[offset]) { in scan_swap_map_slots()
917 WRITE_ONCE(si->swap_map[offset], usage); in scan_swap_map_slots()
918 inc_cluster_info_page(si, si->cluster_info, offset); in scan_swap_map_slots()
921 swap_range_alloc(si, offset, 1); in scan_swap_map_slots()
922 slots[n_ret++] = swp_entry(si->type, offset); in scan_swap_map_slots()
925 if ((n_ret == nr) || (offset >= si->highest_bit)) in scan_swap_map_slots()
934 spin_unlock(&si->lock); in scan_swap_map_slots()
936 spin_lock(&si->lock); in scan_swap_map_slots()
941 if (si->cluster_info) { in scan_swap_map_slots()
942 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
944 } else if (si->cluster_nr && !si->swap_map[++offset]) { in scan_swap_map_slots()
946 --si->cluster_nr; in scan_swap_map_slots()
961 scan_limit = si->highest_bit; in scan_swap_map_slots()
964 if (!si->swap_map[offset]) in scan_swap_map_slots()
970 set_cluster_next(si, offset + 1); in scan_swap_map_slots()
971 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
975 spin_unlock(&si->lock); in scan_swap_map_slots()
976 while (++offset <= READ_ONCE(si->highest_bit)) { in scan_swap_map_slots()
982 if (swap_offset_available_and_locked(si, offset)) in scan_swap_map_slots()
985 offset = si->lowest_bit; in scan_swap_map_slots()
992 if (swap_offset_available_and_locked(si, offset)) in scan_swap_map_slots()
996 spin_lock(&si->lock); in scan_swap_map_slots()
999 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
1003 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) in swap_alloc_cluster() argument
1018 if (cluster_list_empty(&si->free_clusters)) in swap_alloc_cluster()
1021 idx = cluster_list_first(&si->free_clusters); in swap_alloc_cluster()
1023 ci = lock_cluster(si, offset); in swap_alloc_cluster()
1024 alloc_cluster(si, idx); in swap_alloc_cluster()
1027 memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); in swap_alloc_cluster()
1029 swap_range_alloc(si, offset, SWAPFILE_CLUSTER); in swap_alloc_cluster()
1030 *slot = swp_entry(si->type, offset); in swap_alloc_cluster()
1035 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) in swap_free_cluster() argument
1040 ci = lock_cluster(si, offset); in swap_free_cluster()
1041 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); in swap_free_cluster()
1043 free_cluster(si, idx); in swap_free_cluster()
1045 swap_range_free(si, offset, SWAPFILE_CLUSTER); in swap_free_cluster()
1051 struct swap_info_struct *si, *next; in get_swap_pages() local
1073 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { in get_swap_pages()
1074 /* requeue si to after same-priority siblings */ in get_swap_pages()
1075 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); in get_swap_pages()
1077 spin_lock(&si->lock); in get_swap_pages()
1078 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { in get_swap_pages()
1080 if (plist_node_empty(&si->avail_lists[node])) { in get_swap_pages()
1081 spin_unlock(&si->lock); in get_swap_pages()
1084 WARN(!si->highest_bit, in get_swap_pages()
1086 si->type); in get_swap_pages()
1087 WARN(!(si->flags & SWP_WRITEOK), in get_swap_pages()
1089 si->type); in get_swap_pages()
1090 __del_from_avail_list(si); in get_swap_pages()
1091 spin_unlock(&si->lock); in get_swap_pages()
1095 if (si->flags & SWP_BLKDEV) in get_swap_pages()
1096 n_ret = swap_alloc_cluster(si, swp_entries); in get_swap_pages()
1098 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, in get_swap_pages()
1100 spin_unlock(&si->lock); in get_swap_pages()
1108 * if we got here, it's likely that si was almost full before, in get_swap_pages()
1109 * and since scan_swap_map_slots() can drop the si->lock, in get_swap_pages()
1111 * same si and it filled up before we could get one; or, the si in get_swap_pages()
1113 * si->lock. Since we dropped the swap_avail_lock, the in get_swap_pages()
1264 struct swap_info_struct *si; in get_swap_device() local
1269 si = swp_swap_info(entry); in get_swap_device()
1270 if (!si) in get_swap_device()
1272 if (!percpu_ref_tryget_live(&si->users)) in get_swap_device()
1275 * Guarantee the si->users are checked before accessing other in get_swap_device()
1283 if (offset >= si->max) in get_swap_device()
1286 return si; in get_swap_device()
1293 percpu_ref_put(&si->users); in get_swap_device()
1351 struct swap_info_struct *si; in put_swap_folio() local
1357 si = _swap_info_get(entry); in put_swap_folio()
1358 if (!si) in put_swap_folio()
1361 ci = lock_cluster_or_swap_info(si, offset); in put_swap_folio()
1364 map = si->swap_map + offset; in put_swap_folio()
1373 unlock_cluster_or_swap_info(si, ci); in put_swap_folio()
1374 spin_lock(&si->lock); in put_swap_folio()
1376 swap_free_cluster(si, idx); in put_swap_folio()
1377 spin_unlock(&si->lock); in put_swap_folio()
1382 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { in put_swap_folio()
1383 unlock_cluster_or_swap_info(si, ci); in put_swap_folio()
1387 lock_cluster_or_swap_info(si, offset); in put_swap_folio()
1390 unlock_cluster_or_swap_info(si, ci); in put_swap_folio()
1396 struct swap_info_struct *si; in split_swap_cluster() local
1400 si = _swap_info_get(entry); in split_swap_cluster()
1401 if (!si) in split_swap_cluster()
1403 ci = lock_cluster(si, offset); in split_swap_cluster()
1447 struct swap_info_struct *si = swp_swap_info(entry); in __swap_count() local
1450 return swap_count(si->swap_map[offset]); in __swap_count()
1458 int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) in swap_swapcount() argument
1464 ci = lock_cluster_or_swap_info(si, offset); in swap_swapcount()
1465 count = swap_count(si->swap_map[offset]); in swap_swapcount()
1466 unlock_cluster_or_swap_info(si, ci); in swap_swapcount()
1516 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, in swap_page_trans_huge_swapped() argument
1520 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1526 ci = lock_cluster_or_swap_info(si, offset); in swap_page_trans_huge_swapped()
1539 unlock_cluster_or_swap_info(si, ci); in swap_page_trans_huge_swapped()
1546 struct swap_info_struct *si = _swap_info_get(entry); in folio_swapped() local
1548 if (!si) in folio_swapped()
1552 return swap_swapcount(si, entry) != 0; in folio_swapped()
1554 return swap_page_trans_huge_swapped(si, entry); in folio_swapped()
1633 struct swap_info_struct *si = swap_type_to_swap_info(type); in get_swap_page_of_type() local
1636 if (!si) in get_swap_page_of_type()
1640 spin_lock(&si->lock); in get_swap_page_of_type()
1641 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) in get_swap_page_of_type()
1643 spin_unlock(&si->lock); in get_swap_page_of_type()
1707 struct swap_info_struct *si = swap_type_to_swap_info(type); in swapdev_block() local
1710 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
1712 se = offset_to_swap_extent(si, offset); in swapdev_block()
1848 struct swap_info_struct *si; in unuse_pte_range() local
1850 si = swap_info[type]; in unuse_pte_range()
1894 swp_count = READ_ONCE(si->swap_map[offset]); in unuse_pte_range()
2024 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
2036 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2037 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2044 if (i == si->max) in find_next_to_unuse()
2056 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
2061 if (!READ_ONCE(si->inuse_pages)) in try_to_unuse()
2074 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2102 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2104 (i = find_next_to_unuse(si, i)) != 0) { in try_to_unuse()
2137 if (READ_ONCE(si->inuse_pages)) { in try_to_unuse()
2451 struct swap_info_struct *si = p; in SYSCALL_DEFINE1() local
2454 plist_for_each_entry_continue(si, &swap_active_head, list) { in SYSCALL_DEFINE1()
2455 si->prio++; in SYSCALL_DEFINE1()
2456 si->list.prio--; in SYSCALL_DEFINE1()
2458 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
2459 si->avail_lists[nid].prio--; in SYSCALL_DEFINE1()
2595 struct swap_info_struct *si; in swap_start() local
2604 for (type = 0; (si = swap_type_to_swap_info(type)); type++) { in swap_start()
2605 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
2608 return si; in swap_start()
2616 struct swap_info_struct *si = v; in swap_next() local
2622 type = si->type + 1; in swap_next()
2625 for (; (si = swap_type_to_swap_info(type)); type++) { in swap_next()
2626 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
2628 return si; in swap_next()
2641 struct swap_info_struct *si = v; in swap_show() local
2646 if (si == SEQ_START_TOKEN) { in swap_show()
2651 bytes = K(si->pages); in swap_show()
2652 inuse = K(READ_ONCE(si->inuse_pages)); in swap_show()
2654 file = si->swap_file; in swap_show()
2662 si->prio); in swap_show()
3259 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
3261 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3262 nr_to_be_unused += READ_ONCE(si->inuse_pages); in si_swapinfo()
3278 struct swap_info_struct *si = swap_info[type]; in free_swap_is_low() local
3280 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in free_swap_is_low()
3281 nr_to_be_unused += si->inuse_pages; in free_swap_is_low()
3399 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry) in swapcache_clear() argument
3405 ci = lock_cluster_or_swap_info(si, offset); in swapcache_clear()
3406 usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE); in swapcache_clear()
3407 unlock_cluster_or_swap_info(si, ci); in swapcache_clear()
3456 struct swap_info_struct *si; in add_swap_count_continuation() local
3471 si = get_swap_device(entry); in add_swap_count_continuation()
3472 if (!si) { in add_swap_count_continuation()
3479 spin_lock(&si->lock); in add_swap_count_continuation()
3483 ci = lock_cluster(si, offset); in add_swap_count_continuation()
3485 count = swap_count(si->swap_map[offset]); in add_swap_count_continuation()
3501 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3504 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3513 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3541 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3544 spin_unlock(&si->lock); in add_swap_count_continuation()
3545 put_swap_device(si); in add_swap_count_continuation()
3561 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
3569 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3575 spin_lock(&si->cont_lock); in swap_count_continued()
3637 spin_unlock(&si->cont_lock); in swap_count_continued()
3645 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
3649 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
3651 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
3666 struct swap_info_struct *si, *next; in __folio_throttle_swaprate() local
3683 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], in __folio_throttle_swaprate()
3685 if (si->bdev) { in __folio_throttle_swaprate()
3686 blkcg_schedule_throttle(si->bdev->bd_disk, true); in __folio_throttle_swaprate()