Lines Matching refs:p
535 static void inc_cluster_info_page(struct swap_info_struct *p, in inc_cluster_info_page() argument
543 alloc_cluster(p, idx); in inc_cluster_info_page()
555 static void dec_cluster_info_page(struct swap_info_struct *p, in dec_cluster_info_page() argument
568 free_cluster(p, idx); in dec_cluster_info_page()
658 static void __del_from_avail_list(struct swap_info_struct *p) in __del_from_avail_list() argument
663 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); in __del_from_avail_list()
666 static void del_from_avail_list(struct swap_info_struct *p) in del_from_avail_list() argument
669 __del_from_avail_list(p); in del_from_avail_list()
690 static void add_to_avail_list(struct swap_info_struct *p) in add_to_avail_list() argument
696 WARN_ON(!plist_node_empty(&p->avail_lists[nid])); in add_to_avail_list()
697 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); in add_to_avail_list()
1106 struct swap_info_struct *p; in __swap_info_get() local
1111 p = swp_swap_info(entry); in __swap_info_get()
1112 if (!p) in __swap_info_get()
1114 if (!(p->flags & SWP_USED)) in __swap_info_get()
1117 if (offset >= p->max) in __swap_info_get()
1119 return p; in __swap_info_get()
1135 struct swap_info_struct *p; in _swap_info_get() local
1137 p = __swap_info_get(entry); in _swap_info_get()
1138 if (!p) in _swap_info_get()
1140 if (!p->swap_map[swp_offset(entry)]) in _swap_info_get()
1142 return p; in _swap_info_get()
1153 struct swap_info_struct *p; in swap_info_get() local
1155 p = _swap_info_get(entry); in swap_info_get()
1156 if (p) in swap_info_get()
1157 spin_lock(&p->lock); in swap_info_get()
1158 return p; in swap_info_get()
1164 struct swap_info_struct *p; in swap_info_get_cont() local
1166 p = _swap_info_get(entry); in swap_info_get_cont()
1168 if (p != q) { in swap_info_get_cont()
1171 if (p != NULL) in swap_info_get_cont()
1172 spin_lock(&p->lock); in swap_info_get_cont()
1174 return p; in swap_info_get_cont()
1177 static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, in __swap_entry_free_locked() argument
1184 count = p->swap_map[offset]; in __swap_entry_free_locked()
1200 if (swap_count_continued(p, offset, count)) in __swap_entry_free_locked()
1209 p->swap_map[offset] = usage ? : SWAP_HAS_CACHE; in __swap_entry_free_locked()
1277 static unsigned char __swap_entry_free(struct swap_info_struct *p, in __swap_entry_free() argument
1283 ci = lock_cluster_or_swap_info(p, offset); in __swap_entry_free()
1284 usage = __swap_entry_free_locked(p, offset, usage); in __swap_entry_free()
1285 unlock_cluster_or_swap_info(p, ci); in __swap_entry_free()
1292 static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) in swap_entry_free() argument
1298 ci = lock_cluster(p, offset); in swap_entry_free()
1299 count = p->swap_map[offset]; in swap_entry_free()
1301 p->swap_map[offset] = 0; in swap_entry_free()
1302 dec_cluster_info_page(p, p->cluster_info, offset); in swap_entry_free()
1306 swap_range_free(p, offset, 1); in swap_entry_free()
1315 struct swap_info_struct *p; in swap_free() local
1317 p = _swap_info_get(entry); in swap_free()
1318 if (p) in swap_free()
1319 __swap_entry_free(p, entry, 1); in swap_free()
1398 struct swap_info_struct *p, *prev; in swapcache_free_entries() local
1405 p = NULL; in swapcache_free_entries()
1415 p = swap_info_get_cont(entries[i], prev); in swapcache_free_entries()
1416 if (p) in swapcache_free_entries()
1417 swap_entry_free(p, entries[i]); in swapcache_free_entries()
1418 prev = p; in swapcache_free_entries()
1420 if (p) in swapcache_free_entries()
1421 spin_unlock(&p->lock); in swapcache_free_entries()
1432 struct swap_info_struct *p; in page_swapcount() local
1438 p = _swap_info_get(entry); in page_swapcount()
1439 if (p) { in page_swapcount()
1441 ci = lock_cluster_or_swap_info(p, offset); in page_swapcount()
1442 count = swap_count(p->swap_map[offset]); in page_swapcount()
1443 unlock_cluster_or_swap_info(p, ci); in page_swapcount()
1499 struct swap_info_struct *p; in swp_swapcount() local
1505 p = _swap_info_get(entry); in swp_swapcount()
1506 if (!p) in swp_swapcount()
1511 ci = lock_cluster_or_swap_info(p, offset); in swp_swapcount()
1513 count = swap_count(p->swap_map[offset]); in swp_swapcount()
1520 page = vmalloc_to_page(p->swap_map + offset); in swp_swapcount()
1534 unlock_cluster_or_swap_info(p, ci); in swp_swapcount()
1674 struct swap_info_struct *p; in reuse_swap_page() local
1677 p = swap_info_get(entry); in reuse_swap_page()
1678 if (p->flags & SWP_STABLE_WRITES) { in reuse_swap_page()
1679 spin_unlock(&p->lock); in reuse_swap_page()
1682 spin_unlock(&p->lock); in reuse_swap_page()
1734 struct swap_info_struct *p; in free_swap_and_cache() local
1740 p = _swap_info_get(entry); in free_swap_and_cache()
1741 if (p) { in free_swap_and_cache()
1742 count = __swap_entry_free(p, entry, 1); in free_swap_and_cache()
1744 !swap_page_trans_huge_swapped(p, entry)) in free_swap_and_cache()
1745 __try_to_reclaim_swap(p, swp_offset(entry), in free_swap_and_cache()
1748 return p != NULL; in free_swap_and_cache()
2128 struct list_head *p; in try_to_unuse() local
2150 p = &init_mm.mmlist; in try_to_unuse()
2153 (p = p->next) != &init_mm.mmlist) { in try_to_unuse()
2155 mm = list_entry(p, struct mm_struct, mmlist); in try_to_unuse()
2239 struct list_head *p, *next; in drain_mmlist() local
2246 list_for_each_safe(p, next, &init_mm.mmlist) in drain_mmlist()
2247 list_del_init(p); in drain_mmlist()
2409 static int swap_node(struct swap_info_struct *p) in swap_node() argument
2413 if (p->bdev) in swap_node()
2414 bdev = p->bdev; in swap_node()
2416 bdev = p->swap_file->f_inode->i_sb->s_bdev; in swap_node()
2421 static void setup_swap_info(struct swap_info_struct *p, int prio, in setup_swap_info() argument
2428 p->prio = prio; in setup_swap_info()
2430 p->prio = --least_priority; in setup_swap_info()
2435 p->list.prio = -p->prio; in setup_swap_info()
2437 if (p->prio >= 0) in setup_swap_info()
2438 p->avail_lists[i].prio = -p->prio; in setup_swap_info()
2440 if (swap_node(p) == i) in setup_swap_info()
2441 p->avail_lists[i].prio = 1; in setup_swap_info()
2443 p->avail_lists[i].prio = -p->prio; in setup_swap_info()
2446 p->swap_map = swap_map; in setup_swap_info()
2447 p->cluster_info = cluster_info; in setup_swap_info()
2450 static void _enable_swap_info(struct swap_info_struct *p) in _enable_swap_info() argument
2452 p->flags |= SWP_WRITEOK | SWP_VALID; in _enable_swap_info()
2453 atomic_long_add(p->pages, &nr_swap_pages); in _enable_swap_info()
2454 total_swap_pages += p->pages; in _enable_swap_info()
2467 plist_add(&p->list, &swap_active_head); in _enable_swap_info()
2468 add_to_avail_list(p); in _enable_swap_info()
2471 static void enable_swap_info(struct swap_info_struct *p, int prio, in enable_swap_info() argument
2476 frontswap_init(p->type, frontswap_map); in enable_swap_info()
2478 spin_lock(&p->lock); in enable_swap_info()
2479 setup_swap_info(p, prio, swap_map, cluster_info); in enable_swap_info()
2480 spin_unlock(&p->lock); in enable_swap_info()
2488 spin_lock(&p->lock); in enable_swap_info()
2489 _enable_swap_info(p); in enable_swap_info()
2490 spin_unlock(&p->lock); in enable_swap_info()
2494 static void reinsert_swap_info(struct swap_info_struct *p) in reinsert_swap_info() argument
2497 spin_lock(&p->lock); in reinsert_swap_info()
2498 setup_swap_info(p, p->prio, p->swap_map, p->cluster_info); in reinsert_swap_info()
2499 _enable_swap_info(p); in reinsert_swap_info()
2500 spin_unlock(&p->lock); in reinsert_swap_info()
2517 struct swap_info_struct *p = NULL; in SYSCALL_DEFINE1() local
2544 plist_for_each_entry(p, &swap_active_head, list) { in SYSCALL_DEFINE1()
2545 if (p->flags & SWP_WRITEOK) { in SYSCALL_DEFINE1()
2546 if (p->swap_file->f_mapping == mapping) { in SYSCALL_DEFINE1()
2557 if (!security_vm_enough_memory_mm(current->mm, p->pages)) in SYSCALL_DEFINE1()
2558 vm_unacct_memory(p->pages); in SYSCALL_DEFINE1()
2564 del_from_avail_list(p); in SYSCALL_DEFINE1()
2565 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2566 if (p->prio < 0) { in SYSCALL_DEFINE1()
2567 struct swap_info_struct *si = p; in SYSCALL_DEFINE1()
2580 plist_del(&p->list, &swap_active_head); in SYSCALL_DEFINE1()
2581 atomic_long_sub(p->pages, &nr_swap_pages); in SYSCALL_DEFINE1()
2582 total_swap_pages -= p->pages; in SYSCALL_DEFINE1()
2583 p->flags &= ~SWP_WRITEOK; in SYSCALL_DEFINE1()
2584 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2590 err = try_to_unuse(p->type, false, 0); /* force unuse all pages */ in SYSCALL_DEFINE1()
2595 reinsert_swap_info(p); in SYSCALL_DEFINE1()
2603 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2604 p->flags &= ~SWP_VALID; /* mark swap device as invalid */ in SYSCALL_DEFINE1()
2605 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2613 flush_work(&p->discard_work); in SYSCALL_DEFINE1()
2615 destroy_swap_extents(p); in SYSCALL_DEFINE1()
2616 if (p->flags & SWP_CONTINUED) in SYSCALL_DEFINE1()
2617 free_swap_count_continuations(p); in SYSCALL_DEFINE1()
2619 if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev))) in SYSCALL_DEFINE1()
2624 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2628 p->highest_bit = 0; /* cuts scans short */ in SYSCALL_DEFINE1()
2629 while (p->flags >= SWP_SCANNING) { in SYSCALL_DEFINE1()
2630 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2634 spin_lock(&p->lock); in SYSCALL_DEFINE1()
2637 swap_file = p->swap_file; in SYSCALL_DEFINE1()
2638 old_block_size = p->old_block_size; in SYSCALL_DEFINE1()
2639 p->swap_file = NULL; in SYSCALL_DEFINE1()
2640 p->max = 0; in SYSCALL_DEFINE1()
2641 swap_map = p->swap_map; in SYSCALL_DEFINE1()
2642 p->swap_map = NULL; in SYSCALL_DEFINE1()
2643 cluster_info = p->cluster_info; in SYSCALL_DEFINE1()
2644 p->cluster_info = NULL; in SYSCALL_DEFINE1()
2645 frontswap_map = frontswap_map_get(p); in SYSCALL_DEFINE1()
2646 spin_unlock(&p->lock); in SYSCALL_DEFINE1()
2648 frontswap_invalidate_area(p->type); in SYSCALL_DEFINE1()
2649 frontswap_map_set(p, NULL); in SYSCALL_DEFINE1()
2651 free_percpu(p->percpu_cluster); in SYSCALL_DEFINE1()
2652 p->percpu_cluster = NULL; in SYSCALL_DEFINE1()
2657 swap_cgroup_swapoff(p->type); in SYSCALL_DEFINE1()
2658 exit_swap_address_space(p->type); in SYSCALL_DEFINE1()
2679 p->flags = 0; in SYSCALL_DEFINE1()
2826 struct swap_info_struct *p; in alloc_swap_info() local
2830 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL); in alloc_swap_info()
2831 if (!p) in alloc_swap_info()
2841 kvfree(p); in alloc_swap_info()
2845 p->type = type; in alloc_swap_info()
2846 WRITE_ONCE(swap_info[type], p); in alloc_swap_info()
2855 kvfree(p); in alloc_swap_info()
2856 p = swap_info[type]; in alloc_swap_info()
2862 p->swap_extent_root = RB_ROOT; in alloc_swap_info()
2863 plist_node_init(&p->list, 0); in alloc_swap_info()
2865 plist_node_init(&p->avail_lists[i], 0); in alloc_swap_info()
2866 p->flags = SWP_USED; in alloc_swap_info()
2868 spin_lock_init(&p->lock); in alloc_swap_info()
2869 spin_lock_init(&p->cont_lock); in alloc_swap_info()
2871 return p; in alloc_swap_info()
2874 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) in claim_swapfile() argument
2879 p->bdev = bdgrab(I_BDEV(inode)); in claim_swapfile()
2880 error = blkdev_get(p->bdev, in claim_swapfile()
2881 FMODE_READ | FMODE_WRITE | FMODE_EXCL, p); in claim_swapfile()
2883 p->bdev = NULL; in claim_swapfile()
2886 p->old_block_size = block_size(p->bdev); in claim_swapfile()
2887 error = set_blocksize(p->bdev, PAGE_SIZE); in claim_swapfile()
2890 p->flags |= SWP_BLKDEV; in claim_swapfile()
2892 p->bdev = inode->i_sb->s_bdev; in claim_swapfile()
2931 static unsigned long read_swap_header(struct swap_info_struct *p, in read_swap_header() argument
2962 p->lowest_bit = 1; in read_swap_header()
2963 p->cluster_next = 1; in read_swap_header()
2964 p->cluster_nr = 0; in read_swap_header()
2983 p->highest_bit = maxpages - 1; in read_swap_header()
3007 static int setup_swap_map_and_extents(struct swap_info_struct *p, in setup_swap_map_and_extents() argument
3018 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; in setup_swap_map_and_extents()
3023 cluster_list_init(&p->free_clusters); in setup_swap_map_and_extents()
3024 cluster_list_init(&p->discard_clusters); in setup_swap_map_and_extents()
3037 inc_cluster_info_page(p, cluster_info, page_nr); in setup_swap_map_and_extents()
3043 inc_cluster_info_page(p, cluster_info, i); in setup_swap_map_and_extents()
3051 inc_cluster_info_page(p, cluster_info, 0); in setup_swap_map_and_extents()
3052 p->max = maxpages; in setup_swap_map_and_extents()
3053 p->pages = nr_good_pages; in setup_swap_map_and_extents()
3054 nr_extents = setup_swap_extents(p, span); in setup_swap_map_and_extents()
3057 nr_good_pages = p->pages; in setup_swap_map_and_extents()
3081 cluster_list_add_tail(&p->free_clusters, cluster_info, in setup_swap_map_and_extents()
3104 struct swap_info_struct *p; in SYSCALL_DEFINE2() local
3130 p = alloc_swap_info(); in SYSCALL_DEFINE2()
3131 if (IS_ERR(p)) in SYSCALL_DEFINE2()
3132 return PTR_ERR(p); in SYSCALL_DEFINE2()
3134 INIT_WORK(&p->discard_work, swap_discard_work); in SYSCALL_DEFINE2()
3149 p->swap_file = swap_file; in SYSCALL_DEFINE2()
3154 error = claim_swapfile(p, inode); in SYSCALL_DEFINE2()
3172 maxpages = read_swap_header(p, swap_header, inode); in SYSCALL_DEFINE2()
3186 p->flags |= SWP_STABLE_WRITES; in SYSCALL_DEFINE2()
3189 p->flags |= SWP_SYNCHRONOUS_IO; in SYSCALL_DEFINE2()
3191 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { in SYSCALL_DEFINE2()
3195 p->flags |= SWP_SOLIDSTATE; in SYSCALL_DEFINE2()
3200 p->cluster_next = 1 + (prandom_u32() % p->highest_bit); in SYSCALL_DEFINE2()
3213 p->percpu_cluster = alloc_percpu(struct percpu_cluster); in SYSCALL_DEFINE2()
3214 if (!p->percpu_cluster) { in SYSCALL_DEFINE2()
3220 cluster = per_cpu_ptr(p->percpu_cluster, cpu); in SYSCALL_DEFINE2()
3228 error = swap_cgroup_swapon(p->type, maxpages); in SYSCALL_DEFINE2()
3232 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, in SYSCALL_DEFINE2()
3244 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { in SYSCALL_DEFINE2()
3251 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | in SYSCALL_DEFINE2()
3261 p->flags &= ~SWP_PAGE_DISCARD; in SYSCALL_DEFINE2()
3263 p->flags &= ~SWP_AREA_DISCARD; in SYSCALL_DEFINE2()
3266 if (p->flags & SWP_AREA_DISCARD) { in SYSCALL_DEFINE2()
3267 int err = discard_swap(p); in SYSCALL_DEFINE2()
3270 p, err); in SYSCALL_DEFINE2()
3274 error = init_swap_address_space(p->type, maxpages); in SYSCALL_DEFINE2()
3294 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); in SYSCALL_DEFINE2()
3297 p->pages<<(PAGE_SHIFT-10), name->name, p->prio, in SYSCALL_DEFINE2()
3299 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", in SYSCALL_DEFINE2()
3300 (p->flags & SWP_DISCARDABLE) ? "D" : "", in SYSCALL_DEFINE2()
3301 (p->flags & SWP_AREA_DISCARD) ? "s" : "", in SYSCALL_DEFINE2()
3302 (p->flags & SWP_PAGE_DISCARD) ? "c" : "", in SYSCALL_DEFINE2()
3312 free_percpu(p->percpu_cluster); in SYSCALL_DEFINE2()
3313 p->percpu_cluster = NULL; in SYSCALL_DEFINE2()
3314 if (inode && S_ISBLK(inode->i_mode) && p->bdev) { in SYSCALL_DEFINE2()
3315 set_blocksize(p->bdev, p->old_block_size); in SYSCALL_DEFINE2()
3316 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); in SYSCALL_DEFINE2()
3318 destroy_swap_extents(p); in SYSCALL_DEFINE2()
3319 swap_cgroup_swapoff(p->type); in SYSCALL_DEFINE2()
3321 p->swap_file = NULL; in SYSCALL_DEFINE2()
3322 p->flags = 0; in SYSCALL_DEFINE2()
3380 struct swap_info_struct *p; in __swap_duplicate() local
3387 p = get_swap_device(entry); in __swap_duplicate()
3388 if (!p) in __swap_duplicate()
3392 ci = lock_cluster_or_swap_info(p, offset); in __swap_duplicate()
3394 count = p->swap_map[offset]; in __swap_duplicate()
3425 else if (swap_count_continued(p, offset, count)) in __swap_duplicate()
3432 p->swap_map[offset] = count | has_cache; in __swap_duplicate()
3435 unlock_cluster_or_swap_info(p, ci); in __swap_duplicate()
3437 if (p) in __swap_duplicate()
3438 put_swap_device(p); in __swap_duplicate()