• Home
  • Raw
  • Download

Lines Matching refs:si

96 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)  in __try_to_reclaim_swap()  argument
98 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
124 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
132 se = &si->first_swap_extent; in discard_swap()
136 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
143 list_for_each_entry(se, &si->first_swap_extent.list, list) { in discard_swap()
147 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
161 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
164 struct swap_extent *se = si->curr_swap_extent; in discard_swap_cluster()
182 si->curr_swap_extent = se; in discard_swap_cluster()
186 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
258 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
267 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard()
270 if (cluster_is_null(&si->discard_cluster_head)) { in swap_cluster_schedule_discard()
271 cluster_set_next_flag(&si->discard_cluster_head, in swap_cluster_schedule_discard()
273 cluster_set_next_flag(&si->discard_cluster_tail, in swap_cluster_schedule_discard()
276 unsigned int tail = cluster_next(&si->discard_cluster_tail); in swap_cluster_schedule_discard()
277 cluster_set_next(&si->cluster_info[tail], idx); in swap_cluster_schedule_discard()
278 cluster_set_next_flag(&si->discard_cluster_tail, in swap_cluster_schedule_discard()
282 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
289 static void swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
294 info = si->cluster_info; in swap_do_scheduled_discard()
296 while (!cluster_is_null(&si->discard_cluster_head)) { in swap_do_scheduled_discard()
297 idx = cluster_next(&si->discard_cluster_head); in swap_do_scheduled_discard()
299 cluster_set_next_flag(&si->discard_cluster_head, in swap_do_scheduled_discard()
301 if (cluster_next(&si->discard_cluster_tail) == idx) { in swap_do_scheduled_discard()
302 cluster_set_null(&si->discard_cluster_head); in swap_do_scheduled_discard()
303 cluster_set_null(&si->discard_cluster_tail); in swap_do_scheduled_discard()
305 spin_unlock(&si->lock); in swap_do_scheduled_discard()
307 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
310 spin_lock(&si->lock); in swap_do_scheduled_discard()
312 if (cluster_is_null(&si->free_cluster_head)) { in swap_do_scheduled_discard()
313 cluster_set_next_flag(&si->free_cluster_head, in swap_do_scheduled_discard()
315 cluster_set_next_flag(&si->free_cluster_tail, in swap_do_scheduled_discard()
320 tail = cluster_next(&si->free_cluster_tail); in swap_do_scheduled_discard()
322 cluster_set_next_flag(&si->free_cluster_tail, in swap_do_scheduled_discard()
325 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
332 struct swap_info_struct *si; in swap_discard_work() local
334 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
336 spin_lock(&si->lock); in swap_discard_work()
337 swap_do_scheduled_discard(si); in swap_discard_work()
338 spin_unlock(&si->lock); in swap_discard_work()
414 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, in scan_swap_map_ssd_cluster_conflict() argument
421 conflict = !cluster_is_null(&si->free_cluster_head) && in scan_swap_map_ssd_cluster_conflict()
422 offset != cluster_next(&si->free_cluster_head) && in scan_swap_map_ssd_cluster_conflict()
423 cluster_is_free(&si->cluster_info[offset]); in scan_swap_map_ssd_cluster_conflict()
428 percpu_cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_ssd_cluster_conflict()
437 static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, in scan_swap_map_try_ssd_cluster() argument
445 cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_try_ssd_cluster()
447 if (!cluster_is_null(&si->free_cluster_head)) { in scan_swap_map_try_ssd_cluster()
448 cluster->index = si->free_cluster_head; in scan_swap_map_try_ssd_cluster()
451 } else if (!cluster_is_null(&si->discard_cluster_head)) { in scan_swap_map_try_ssd_cluster()
456 swap_do_scheduled_discard(si); in scan_swap_map_try_ssd_cluster()
457 *scan_base = *offset = si->cluster_next; in scan_swap_map_try_ssd_cluster()
470 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * in scan_swap_map_try_ssd_cluster()
472 if (!si->swap_map[tmp]) { in scan_swap_map_try_ssd_cluster()
487 static unsigned long scan_swap_map(struct swap_info_struct *si, in scan_swap_map() argument
506 si->flags += SWP_SCANNING; in scan_swap_map()
507 scan_base = offset = si->cluster_next; in scan_swap_map()
510 if (si->cluster_info) { in scan_swap_map()
511 scan_swap_map_try_ssd_cluster(si, &offset, &scan_base); in scan_swap_map()
515 if (unlikely(!si->cluster_nr--)) { in scan_swap_map()
516 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { in scan_swap_map()
517 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map()
521 spin_unlock(&si->lock); in scan_swap_map()
529 scan_base = offset = si->lowest_bit; in scan_swap_map()
533 for (; last_in_cluster <= si->highest_bit; offset++) { in scan_swap_map()
534 if (si->swap_map[offset]) in scan_swap_map()
537 spin_lock(&si->lock); in scan_swap_map()
539 si->cluster_next = offset; in scan_swap_map()
540 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map()
550 spin_lock(&si->lock); in scan_swap_map()
551 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map()
555 if (si->cluster_info) { in scan_swap_map()
556 while (scan_swap_map_ssd_cluster_conflict(si, offset)) in scan_swap_map()
557 scan_swap_map_try_ssd_cluster(si, &offset, &scan_base); in scan_swap_map()
559 if (!(si->flags & SWP_WRITEOK)) in scan_swap_map()
561 if (!si->highest_bit) in scan_swap_map()
563 if (offset > si->highest_bit) in scan_swap_map()
564 scan_base = offset = si->lowest_bit; in scan_swap_map()
567 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map()
569 spin_unlock(&si->lock); in scan_swap_map()
570 swap_was_freed = __try_to_reclaim_swap(si, offset); in scan_swap_map()
571 spin_lock(&si->lock); in scan_swap_map()
578 if (si->swap_map[offset]) in scan_swap_map()
581 if (offset == si->lowest_bit) in scan_swap_map()
582 si->lowest_bit++; in scan_swap_map()
583 if (offset == si->highest_bit) in scan_swap_map()
584 si->highest_bit--; in scan_swap_map()
585 si->inuse_pages++; in scan_swap_map()
586 if (si->inuse_pages == si->pages) { in scan_swap_map()
587 si->lowest_bit = si->max; in scan_swap_map()
588 si->highest_bit = 0; in scan_swap_map()
590 plist_del(&si->avail_list, &swap_avail_head); in scan_swap_map()
593 si->swap_map[offset] = usage; in scan_swap_map()
594 inc_cluster_info_page(si, si->cluster_info, offset); in scan_swap_map()
595 si->cluster_next = offset + 1; in scan_swap_map()
596 si->flags -= SWP_SCANNING; in scan_swap_map()
601 spin_unlock(&si->lock); in scan_swap_map()
602 while (++offset <= si->highest_bit) { in scan_swap_map()
603 if (!si->swap_map[offset]) { in scan_swap_map()
604 spin_lock(&si->lock); in scan_swap_map()
607 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map()
608 spin_lock(&si->lock); in scan_swap_map()
616 offset = si->lowest_bit; in scan_swap_map()
618 if (!si->swap_map[offset]) { in scan_swap_map()
619 spin_lock(&si->lock); in scan_swap_map()
622 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map()
623 spin_lock(&si->lock); in scan_swap_map()
632 spin_lock(&si->lock); in scan_swap_map()
635 si->flags -= SWP_SCANNING; in scan_swap_map()
641 struct swap_info_struct *si, *next; in get_swap_page() local
651 plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { in get_swap_page()
653 plist_requeue(&si->avail_list, &swap_avail_head); in get_swap_page()
655 spin_lock(&si->lock); in get_swap_page()
656 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { in get_swap_page()
658 if (plist_node_empty(&si->avail_list)) { in get_swap_page()
659 spin_unlock(&si->lock); in get_swap_page()
662 WARN(!si->highest_bit, in get_swap_page()
664 si->type); in get_swap_page()
665 WARN(!(si->flags & SWP_WRITEOK), in get_swap_page()
667 si->type); in get_swap_page()
668 plist_del(&si->avail_list, &swap_avail_head); in get_swap_page()
669 spin_unlock(&si->lock); in get_swap_page()
674 offset = scan_swap_map(si, SWAP_HAS_CACHE); in get_swap_page()
675 spin_unlock(&si->lock); in get_swap_page()
677 return swp_entry(si->type, offset); in get_swap_page()
679 si->type); in get_swap_page()
706 struct swap_info_struct *si; in get_swap_page_of_type() local
709 si = swap_info[type]; in get_swap_page_of_type()
710 spin_lock(&si->lock); in get_swap_page_of_type()
711 if (si && (si->flags & SWP_WRITEOK)) { in get_swap_page_of_type()
714 offset = scan_swap_map(si, 1); in get_swap_page_of_type()
716 spin_unlock(&si->lock); in get_swap_page_of_type()
721 spin_unlock(&si->lock); in get_swap_page_of_type()
1324 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
1327 unsigned int max = si->max; in find_next_to_unuse()
1352 if (frontswap_test(si, i)) in find_next_to_unuse()
1357 count = ACCESS_ONCE(si->swap_map[i]); in find_next_to_unuse()
1375 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
1410 while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { in try_to_unuse()
1421 swap_map = &si->swap_map[i]; in try_to_unuse()
1898 struct swap_info_struct *si = p; in SYSCALL_DEFINE1() local
1900 plist_for_each_entry_continue(si, &swap_active_head, list) { in SYSCALL_DEFINE1()
1901 si->prio++; in SYSCALL_DEFINE1()
1902 si->list.prio--; in SYSCALL_DEFINE1()
1903 si->avail_list.prio--; in SYSCALL_DEFINE1()
2017 struct swap_info_struct *si; in swap_start() local
2028 si = swap_info[type]; in swap_start()
2029 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
2032 return si; in swap_start()
2040 struct swap_info_struct *si = v; in swap_next() local
2046 type = si->type + 1; in swap_next()
2050 si = swap_info[type]; in swap_next()
2051 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
2054 return si; in swap_next()
2067 struct swap_info_struct *si = v; in swap_show() local
2071 if (si == SEQ_START_TOKEN) { in swap_show()
2076 file = si->swap_file; in swap_show()
2082 si->pages << (PAGE_SHIFT - 10), in swap_show()
2083 si->inuse_pages << (PAGE_SHIFT - 10), in swap_show()
2084 si->prio); in swap_show()
2379 static bool swap_discardable(struct swap_info_struct *si) in swap_discardable() argument
2381 struct request_queue *q = bdev_get_queue(si->bdev); in swap_discardable()
2615 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
2617 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
2618 nr_to_be_unused += si->inuse_pages; in si_swapinfo()
2787 struct swap_info_struct *si; in add_swap_count_continuation() local
2800 si = swap_info_get(entry); in add_swap_count_continuation()
2801 if (!si) { in add_swap_count_continuation()
2811 count = si->swap_map[offset] & ~SWAP_HAS_CACHE; in add_swap_count_continuation()
2823 spin_unlock(&si->lock); in add_swap_count_continuation()
2832 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
2843 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
2871 spin_unlock(&si->lock); in add_swap_count_continuation()
2886 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
2893 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
2967 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
2971 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
2973 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()