• Home
  • Raw
  • Download

Lines Matching full:pool

51  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
56 * be 63, or 62, respectively, freelists per pool.
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
93 unsigned long pool; /* back link */ member
102 * pool
107 * @pool: pointer to the containing pool
121 struct z3fold_pool *pool; member
133 * struct z3fold_pool - stores metadata for each z3fold pool
134 * @name: pool name
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
143 * @pages_nr: number of z3fold pages in the pool.
146 * pool creation time.
152 * This structure is allocated at pool creation time and maintains metadata
153 * pertaining to a particular z3fold pool.
210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, in alloc_slots() argument
215 slots = kmem_cache_zalloc(pool->c_handle, in alloc_slots()
221 slots->pool = (unsigned long)pool; in alloc_slots()
230 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool()
322 if (test_bit(HANDLES_NOFREE, &slots->pool)) { in free_handle()
340 struct z3fold_pool *pool = slots_to_pool(slots); in free_handle() local
344 kmem_cache_free(pool->c_handle, slots); in free_handle()
377 static int z3fold_register_migration(struct z3fold_pool *pool) in z3fold_register_migration() argument
379 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); in z3fold_register_migration()
380 if (IS_ERR(pool->inode)) { in z3fold_register_migration()
381 pool->inode = NULL; in z3fold_register_migration()
385 pool->inode->i_mapping->private_data = pool; in z3fold_register_migration()
386 pool->inode->i_mapping->a_ops = &z3fold_aops; in z3fold_register_migration()
390 static void z3fold_unregister_migration(struct z3fold_pool *pool) in z3fold_unregister_migration() argument
392 if (pool->inode) in z3fold_unregister_migration()
393 iput(pool->inode); in z3fold_unregister_migration()
398 struct z3fold_pool *pool, gfp_t gfp) in init_z3fold_page() argument
412 slots = alloc_slots(pool, gfp); in init_z3fold_page()
427 zhdr->pool = pool; in init_z3fold_page()
453 * Pool lock should be held as this function accesses first_num
519 return zhdr->pool; in zhdr_to_pool()
525 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in __release_z3fold_page() local
530 spin_lock(&pool->lock); in __release_z3fold_page()
533 spin_unlock(&pool->lock); in __release_z3fold_page()
538 spin_lock(&pool->stale_lock); in __release_z3fold_page()
539 list_add(&zhdr->buddy, &pool->stale); in __release_z3fold_page()
540 queue_work(pool->release_wq, &pool->work); in __release_z3fold_page()
541 spin_unlock(&pool->stale_lock); in __release_z3fold_page()
564 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in release_z3fold_page_locked_list() local
566 spin_lock(&pool->lock); in release_z3fold_page_locked_list()
568 spin_unlock(&pool->lock); in release_z3fold_page_locked_list()
576 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); in free_pages_work() local
578 spin_lock(&pool->stale_lock); in free_pages_work()
579 while (!list_empty(&pool->stale)) { in free_pages_work()
580 struct z3fold_header *zhdr = list_first_entry(&pool->stale, in free_pages_work()
587 spin_unlock(&pool->stale_lock); in free_pages_work()
591 spin_lock(&pool->stale_lock); in free_pages_work()
593 spin_unlock(&pool->stale_lock); in free_pages_work()
621 static inline void add_to_unbuddied(struct z3fold_pool *pool, in add_to_unbuddied() argument
626 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); in add_to_unbuddied()
629 spin_lock(&pool->lock); in add_to_unbuddied()
631 spin_unlock(&pool->lock); in add_to_unbuddied()
633 put_cpu_ptr(pool->unbuddied); in add_to_unbuddied()
677 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in compact_single_buddy() local
713 new_zhdr = __z3fold_alloc(pool, sz, false); in compact_single_buddy()
749 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
760 atomic64_dec(&pool->pages_nr); in compact_single_buddy()
762 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
821 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in do_compact_page() local
833 spin_lock(&pool->lock); in do_compact_page()
835 spin_unlock(&pool->lock); in do_compact_page()
838 atomic64_dec(&pool->pages_nr); in do_compact_page()
851 atomic64_dec(&pool->pages_nr); in do_compact_page()
860 add_to_unbuddied(pool, zhdr); in do_compact_page()
874 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, in __z3fold_alloc() argument
884 unbuddied = get_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
895 spin_lock(&pool->lock); in __z3fold_alloc()
900 spin_unlock(&pool->lock); in __z3fold_alloc()
902 put_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
909 spin_unlock(&pool->lock); in __z3fold_alloc()
916 put_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
924 * list while pool lock was held, and then we've taken in __z3fold_alloc()
931 put_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
940 unbuddied = per_cpu_ptr(pool->unbuddied, cpu); in __z3fold_alloc()
941 spin_lock(&pool->lock); in __z3fold_alloc()
948 spin_unlock(&pool->lock); in __z3fold_alloc()
954 spin_unlock(&pool->lock); in __z3fold_alloc()
971 zhdr->slots = alloc_slots(pool, in __z3fold_alloc()
981 * z3fold_create_pool() - create a new z3fold pool
982 * @name: pool name
983 * @gfp: gfp flags when allocating the z3fold pool structure
984 * @ops: user-defined operations for the z3fold pool
986 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
992 struct z3fold_pool *pool = NULL; in z3fold_create_pool() local
995 pool = kzalloc(sizeof(struct z3fold_pool), gfp); in z3fold_create_pool()
996 if (!pool) in z3fold_create_pool()
998 pool->c_handle = kmem_cache_create("z3fold_handle", in z3fold_create_pool()
1001 if (!pool->c_handle) in z3fold_create_pool()
1003 spin_lock_init(&pool->lock); in z3fold_create_pool()
1004 spin_lock_init(&pool->stale_lock); in z3fold_create_pool()
1005 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); in z3fold_create_pool()
1006 if (!pool->unbuddied) in z3fold_create_pool()
1010 per_cpu_ptr(pool->unbuddied, cpu); in z3fold_create_pool()
1014 INIT_LIST_HEAD(&pool->lru); in z3fold_create_pool()
1015 INIT_LIST_HEAD(&pool->stale); in z3fold_create_pool()
1016 atomic64_set(&pool->pages_nr, 0); in z3fold_create_pool()
1017 pool->name = name; in z3fold_create_pool()
1018 pool->compact_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
1019 if (!pool->compact_wq) in z3fold_create_pool()
1021 pool->release_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
1022 if (!pool->release_wq) in z3fold_create_pool()
1024 if (z3fold_register_migration(pool)) in z3fold_create_pool()
1026 INIT_WORK(&pool->work, free_pages_work); in z3fold_create_pool()
1027 pool->ops = ops; in z3fold_create_pool()
1028 return pool; in z3fold_create_pool()
1031 destroy_workqueue(pool->release_wq); in z3fold_create_pool()
1033 destroy_workqueue(pool->compact_wq); in z3fold_create_pool()
1035 free_percpu(pool->unbuddied); in z3fold_create_pool()
1037 kmem_cache_destroy(pool->c_handle); in z3fold_create_pool()
1039 kfree(pool); in z3fold_create_pool()
1045 * z3fold_destroy_pool() - destroys an existing z3fold pool
1046 * @pool: the z3fold pool to be destroyed
1048 * The pool should be emptied before this function is called.
1050 static void z3fold_destroy_pool(struct z3fold_pool *pool) in z3fold_destroy_pool() argument
1052 kmem_cache_destroy(pool->c_handle); in z3fold_destroy_pool()
1055 * We need to destroy pool->compact_wq before pool->release_wq, in z3fold_destroy_pool()
1056 * as any pending work on pool->compact_wq will call in z3fold_destroy_pool()
1057 * queue_work(pool->release_wq, &pool->work). in z3fold_destroy_pool()
1063 destroy_workqueue(pool->compact_wq); in z3fold_destroy_pool()
1064 destroy_workqueue(pool->release_wq); in z3fold_destroy_pool()
1065 z3fold_unregister_migration(pool); in z3fold_destroy_pool()
1066 free_percpu(pool->unbuddied); in z3fold_destroy_pool()
1067 kfree(pool); in z3fold_destroy_pool()
1072 * @pool: z3fold pool from which to allocate
1074 * @gfp: gfp flags used if the pool needs to grow
1077 * This function will attempt to find a free region in the pool large enough to
1080 * allocated and added to the pool to satisfy the request.
1083 * as z3fold pool pages.
1086 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1089 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() argument
1108 zhdr = __z3fold_alloc(pool, size, can_sleep); in z3fold_alloc()
1114 atomic64_dec(&pool->pages_nr); in z3fold_alloc()
1129 spin_lock(&pool->stale_lock); in z3fold_alloc()
1130 zhdr = list_first_entry_or_null(&pool->stale, in z3fold_alloc()
1139 spin_unlock(&pool->stale_lock); in z3fold_alloc()
1143 spin_unlock(&pool->stale_lock); in z3fold_alloc()
1152 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc()
1157 atomic64_inc(&pool->pages_nr); in z3fold_alloc()
1165 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc()
1169 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc()
1184 add_to_unbuddied(pool, zhdr); in z3fold_alloc()
1187 spin_lock(&pool->lock); in z3fold_alloc()
1192 list_add(&page->lru, &pool->lru); in z3fold_alloc()
1195 spin_unlock(&pool->lock); in z3fold_alloc()
1204 * @pool: pool in which the allocation resided
1212 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) in z3fold_free() argument
1230 spin_lock(&pool->lock); in z3fold_free()
1232 spin_unlock(&pool->lock); in z3fold_free()
1235 atomic64_dec(&pool->pages_nr); in z3fold_free()
1263 atomic64_dec(&pool->pages_nr); in z3fold_free()
1277 spin_lock(&pool->lock); in z3fold_free()
1279 spin_unlock(&pool->lock); in z3fold_free()
1288 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); in z3fold_free()
1293 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1294 * @pool: pool from which a page will attempt to be evicted
1307 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1308 * call the user-defined eviction handler with the pool and handle as
1328 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) in z3fold_reclaim_page() argument
1338 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); in z3fold_reclaim_page()
1340 spin_lock(&pool->lock); in z3fold_reclaim_page()
1341 if (!pool->ops || !pool->ops->evict || retries == 0) { in z3fold_reclaim_page()
1342 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1346 if (list_empty(&pool->lru)) { in z3fold_reclaim_page()
1347 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1350 list_for_each_prev(pos, &pool->lru) { in z3fold_reclaim_page()
1378 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1391 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1406 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1439 ret = pool->ops->evict(pool, middle_handle); in z3fold_reclaim_page()
1444 ret = pool->ops->evict(pool, first_handle); in z3fold_reclaim_page()
1449 ret = pool->ops->evict(pool, last_handle); in z3fold_reclaim_page()
1457 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1460 spin_lock(&pool->lock); in z3fold_reclaim_page()
1461 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1462 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1469 kmem_cache_free(pool->c_handle, slots); in z3fold_reclaim_page()
1470 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1475 * free. Take the global pool lock then to be able in z3fold_reclaim_page()
1478 spin_lock(&pool->lock); in z3fold_reclaim_page()
1479 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1480 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1485 /* We started off locked to we need to lock the pool back */ in z3fold_reclaim_page()
1486 spin_lock(&pool->lock); in z3fold_reclaim_page()
1488 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1494 * @pool: pool in which the allocation resides
1502 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) in z3fold_map() argument
1544 * @pool: pool in which the allocation resides
1547 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) in z3fold_unmap() argument
1567 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1568 * @pool: pool whose size is being queried
1570 * Returns: size in pages of the given pool.
1572 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) in z3fold_get_pool_size() argument
1574 return atomic64_read(&pool->pages_nr); in z3fold_get_pool_size()
1580 struct z3fold_pool *pool; in z3fold_page_isolate() local
1599 pool = zhdr_to_pool(zhdr); in z3fold_page_isolate()
1600 spin_lock(&pool->lock); in z3fold_page_isolate()
1605 spin_unlock(&pool->lock); in z3fold_page_isolate()
1620 struct z3fold_pool *pool; in z3fold_page_migrate() local
1629 pool = zhdr_to_pool(zhdr); in z3fold_page_migrate()
1668 spin_lock(&pool->lock); in z3fold_page_migrate()
1669 list_add(&newpage->lru, &pool->lru); in z3fold_page_migrate()
1670 spin_unlock(&pool->lock); in z3fold_page_migrate()
1674 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); in z3fold_page_migrate()
1685 struct z3fold_pool *pool; in z3fold_page_putback() local
1688 pool = zhdr_to_pool(zhdr); in z3fold_page_putback()
1695 atomic64_dec(&pool->pages_nr); in z3fold_page_putback()
1698 spin_lock(&pool->lock); in z3fold_page_putback()
1699 list_add(&page->lru, &pool->lru); in z3fold_page_putback()
1700 spin_unlock(&pool->lock); in z3fold_page_putback()
1715 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) in z3fold_zpool_evict() argument
1717 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) in z3fold_zpool_evict()
1718 return pool->zpool_ops->evict(pool->zpool, handle); in z3fold_zpool_evict()
1731 struct z3fold_pool *pool; in z3fold_zpool_create() local
1733 pool = z3fold_create_pool(name, gfp, in z3fold_zpool_create()
1735 if (pool) { in z3fold_zpool_create()
1736 pool->zpool = zpool; in z3fold_zpool_create()
1737 pool->zpool_ops = zpool_ops; in z3fold_zpool_create()
1739 return pool; in z3fold_zpool_create()
1742 static void z3fold_zpool_destroy(void *pool) in z3fold_zpool_destroy() argument
1744 z3fold_destroy_pool(pool); in z3fold_zpool_destroy()
1747 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, in z3fold_zpool_malloc() argument
1750 return z3fold_alloc(pool, size, gfp, handle); in z3fold_zpool_malloc()
1752 static void z3fold_zpool_free(void *pool, unsigned long handle) in z3fold_zpool_free() argument
1754 z3fold_free(pool, handle); in z3fold_zpool_free()
1757 static int z3fold_zpool_shrink(void *pool, unsigned int pages, in z3fold_zpool_shrink() argument
1764 ret = z3fold_reclaim_page(pool, 8); in z3fold_zpool_shrink()
1776 static void *z3fold_zpool_map(void *pool, unsigned long handle, in z3fold_zpool_map() argument
1779 return z3fold_map(pool, handle); in z3fold_zpool_map()
1781 static void z3fold_zpool_unmap(void *pool, unsigned long handle) in z3fold_zpool_unmap() argument
1783 z3fold_unmap(pool, handle); in z3fold_zpool_unmap()
1786 static u64 z3fold_zpool_total_size(void *pool) in z3fold_zpool_total_size() argument
1788 return z3fold_get_pool_size(pool) * PAGE_SIZE; in z3fold_zpool_total_size()