• Home
  • Raw
  • Download

Lines Matching +full:pre +full:- +full:verified

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * zswap.c - zswap driver file
7 * RAM-based memory pool. This can result in a significant I/O reduction on
32 #include <linux/page-flags.h>
48 /* The number of same-value filled pages currently stored in zswap */
128 * Enable/disable handling same-value filled pages (enabled by default).
129 * If disabled every page is considered non-same-value filled.
135 /* Enable/disable handling non-same-value filled pages (enabled by default) */
160 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
163 * needs to be verified that it's still valid in the tree.
184 * rbnode - links the entry into red-black tree for the appropriate swap type
185 * swpentry - associated swap entry, the offset indexes into the red-black tree
186 * refcount - the number of outstanding reference to the entry. This is needed
192 * length - the length in bytes of the compressed page data. Needed during
195 * pool - the zswap_pool the entry's data is in
196 * handle - zpool allocation handle that stores the compressed page data
197 * value - value of the same-value filled pages which have same content
198 * objcg - the obj_cgroup that the compressed memory is charged to
199 * lru - handle to the pool's lru used to evict pages.
217 * - the rbtree
218 * - the refcount field of each entry in the tree
227 /* RCU-protected iteration */
253 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
254 zpool_get_type((p)->zpools[0]))
284 total += zpool_get_total_size(pool->zpools[i]); in zswap_update_total_size()
302 entry->refcount = 1; in zswap_entry_cache_alloc()
303 RB_CLEAR_NODE(&entry->rbnode); in zswap_entry_cache_alloc()
317 struct rb_node *node = root->rb_node; in zswap_rb_search()
323 entry_offset = swp_offset(entry->swpentry); in zswap_rb_search()
325 node = node->rb_left; in zswap_rb_search()
327 node = node->rb_right; in zswap_rb_search()
336 * the existing entry is stored in dupentry and the function returns -EEXIST
341 struct rb_node **link = &root->rb_node, *parent = NULL; in zswap_rb_insert()
343 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry); in zswap_rb_insert()
348 myentry_offset = swp_offset(myentry->swpentry); in zswap_rb_insert()
350 link = &(*link)->rb_left; in zswap_rb_insert()
352 link = &(*link)->rb_right; in zswap_rb_insert()
355 return -EEXIST; in zswap_rb_insert()
358 rb_link_node(&entry->rbnode, parent, link); in zswap_rb_insert()
359 rb_insert_color(&entry->rbnode, root); in zswap_rb_insert()
365 if (!RB_EMPTY_NODE(&entry->rbnode)) { in zswap_rb_erase()
366 rb_erase(&entry->rbnode, root); in zswap_rb_erase()
367 RB_CLEAR_NODE(&entry->rbnode); in zswap_rb_erase()
380 return entry->pool->zpools[i]; in zswap_find_zpool()
389 if (entry->objcg) { in zswap_free_entry()
390 obj_cgroup_uncharge_zswap(entry->objcg, entry->length); in zswap_free_entry()
391 obj_cgroup_put(entry->objcg); in zswap_free_entry()
393 if (!entry->length) in zswap_free_entry()
396 spin_lock(&entry->pool->lru_lock); in zswap_free_entry()
397 list_del(&entry->lru); in zswap_free_entry()
398 spin_unlock(&entry->pool->lru_lock); in zswap_free_entry()
399 zpool_free(zswap_find_zpool(entry), entry->handle); in zswap_free_entry()
400 zswap_pool_put(entry->pool); in zswap_free_entry()
410 entry->refcount++; in zswap_entry_get()
419 int refcount = --entry->refcount; in zswap_entry_put()
423 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode)); in zswap_entry_put()
442 * per-cpu code
448 * are sharing dtsmem. So we need this mutex to be per-cpu.
459 return -ENOMEM; in zswap_dstmem_prepare()
464 return -ENOMEM; in zswap_dstmem_prepare()
492 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); in zswap_cpu_comp_prepare()
496 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); in zswap_cpu_comp_prepare()
499 pool->tfm_name, PTR_ERR(acomp)); in zswap_cpu_comp_prepare()
502 acomp_ctx->acomp = acomp; in zswap_cpu_comp_prepare()
504 req = acomp_request_alloc(acomp_ctx->acomp); in zswap_cpu_comp_prepare()
507 pool->tfm_name); in zswap_cpu_comp_prepare()
508 crypto_free_acomp(acomp_ctx->acomp); in zswap_cpu_comp_prepare()
509 return -ENOMEM; in zswap_cpu_comp_prepare()
511 acomp_ctx->req = req; in zswap_cpu_comp_prepare()
513 crypto_init_wait(&acomp_ctx->wait); in zswap_cpu_comp_prepare()
520 crypto_req_done, &acomp_ctx->wait); in zswap_cpu_comp_prepare()
522 acomp_ctx->mutex = per_cpu(zswap_mutex, cpu); in zswap_cpu_comp_prepare()
523 acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu); in zswap_cpu_comp_prepare()
531 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); in zswap_cpu_comp_dead()
534 if (!IS_ERR_OR_NULL(acomp_ctx->req)) in zswap_cpu_comp_dead()
535 acomp_request_free(acomp_ctx->req); in zswap_cpu_comp_dead()
536 if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) in zswap_cpu_comp_dead()
537 crypto_free_acomp(acomp_ctx->acomp); in zswap_cpu_comp_dead()
598 /* type and compressor must be null-terminated */
606 if (strcmp(pool->tfm_name, compressor)) in zswap_pool_find_get()
609 if (strcmp(zpool_get_type(pool->zpools[0]), type)) in zswap_pool_find_get()
628 if (zswap_rb_erase(&tree->rbroot, entry)) in zswap_invalidate_entry()
640 spin_lock(&pool->lru_lock); in zswap_reclaim_entry()
641 if (list_empty(&pool->lru)) { in zswap_reclaim_entry()
642 spin_unlock(&pool->lru_lock); in zswap_reclaim_entry()
643 return -EINVAL; in zswap_reclaim_entry()
645 entry = list_last_entry(&pool->lru, struct zswap_entry, lru); in zswap_reclaim_entry()
646 list_del_init(&entry->lru); in zswap_reclaim_entry()
650 * until the entry is verified to still be alive in the tree. in zswap_reclaim_entry()
652 swpoffset = swp_offset(entry->swpentry); in zswap_reclaim_entry()
653 tree = zswap_trees[swp_type(entry->swpentry)]; in zswap_reclaim_entry()
654 spin_unlock(&pool->lru_lock); in zswap_reclaim_entry()
657 spin_lock(&tree->lock); in zswap_reclaim_entry()
658 if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) { in zswap_reclaim_entry()
659 ret = -EAGAIN; in zswap_reclaim_entry()
664 spin_unlock(&tree->lock); in zswap_reclaim_entry()
668 spin_lock(&tree->lock); in zswap_reclaim_entry()
671 spin_lock(&pool->lru_lock); in zswap_reclaim_entry()
672 list_move(&entry->lru, &pool->lru); in zswap_reclaim_entry()
673 spin_unlock(&pool->lru_lock); in zswap_reclaim_entry()
679 * swapcache. Drop the entry from zswap - unless invalidate already in zswap_reclaim_entry()
680 * took it out while we had the tree->lock released for IO. in zswap_reclaim_entry()
688 spin_unlock(&tree->lock); in zswap_reclaim_entry()
689 return ret ? -EAGAIN : 0; in zswap_reclaim_entry()
702 if (ret != -EAGAIN) in shrink_worker()
740 pool->zpools[i] = zpool_create_pool(type, name, gfp); in zswap_pool_create()
741 if (!pool->zpools[i]) { in zswap_pool_create()
746 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); in zswap_pool_create()
748 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); in zswap_pool_create()
750 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); in zswap_pool_create()
751 if (!pool->acomp_ctx) { in zswap_pool_create()
757 &pool->node); in zswap_pool_create()
760 pr_debug("using %s compressor\n", pool->tfm_name); in zswap_pool_create()
765 kref_init(&pool->kref); in zswap_pool_create()
766 INIT_LIST_HEAD(&pool->list); in zswap_pool_create()
767 INIT_LIST_HEAD(&pool->lru); in zswap_pool_create()
768 spin_lock_init(&pool->lru_lock); in zswap_pool_create()
769 INIT_WORK(&pool->shrink_work, shrink_worker); in zswap_pool_create()
776 if (pool->acomp_ctx) in zswap_pool_create()
777 free_percpu(pool->acomp_ctx); in zswap_pool_create()
778 while (i--) in zswap_pool_create()
779 zpool_destroy_pool(pool->zpools[i]); in zswap_pool_create()
832 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); in zswap_pool_destroy()
833 free_percpu(pool->acomp_ctx); in zswap_pool_destroy()
835 zpool_destroy_pool(pool->zpools[i]); in zswap_pool_destroy()
844 return kref_get_unless_zero(&pool->kref); in zswap_pool_get()
855 WARN_ON(kref_get_unless_zero(&pool->kref)); in __zswap_pool_release()
871 list_del_rcu(&pool->list); in __zswap_pool_empty()
873 INIT_WORK(&pool->release_work, __zswap_pool_release); in __zswap_pool_empty()
874 schedule_work(&pool->release_work); in __zswap_pool_empty()
881 kref_put(&pool->kref, __zswap_pool_empty); in zswap_pool_put()
891 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) in zswap_pool_changed()
896 /* val must be a null-terminated string */
908 /* if this is load-time (pre-init) param setting, in __zswap_param_set()
918 ret = -ENODEV; in __zswap_param_set()
929 return -ENOENT; in __zswap_param_set()
935 return -ENOENT; in __zswap_param_set()
940 return -EINVAL; in __zswap_param_set()
949 list_del_rcu(&pool->list); in __zswap_param_set()
960 ret = -EINVAL; in __zswap_param_set()
966 list_add_rcu(&pool->list, &zswap_pools); in __zswap_param_set()
969 /* add the possibly pre-existing pool to the end of the pools in __zswap_param_set()
973 list_add_tail_rcu(&pool->list, &zswap_pools); in __zswap_param_set()
983 * when the other param is changed. We already verified this in __zswap_param_set()
1014 int ret = -ENODEV; in zswap_enabled_param_set()
1016 /* if this is load-time (pre-init) param setting, only set param. */ in zswap_enabled_param_set()
1058 swp_entry_t swpentry = entry->swpentry; in zswap_writeback_entry()
1074 return -ENOMEM; in zswap_writeback_entry()
1081 ret = -ENOMEM; in zswap_writeback_entry()
1088 ret = -EEXIST; in zswap_writeback_entry()
1099 spin_lock(&tree->lock); in zswap_writeback_entry()
1100 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) { in zswap_writeback_entry()
1101 spin_unlock(&tree->lock); in zswap_writeback_entry()
1105 ret = -ENOMEM; in zswap_writeback_entry()
1108 spin_unlock(&tree->lock); in zswap_writeback_entry()
1111 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); in zswap_writeback_entry()
1114 src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); in zswap_writeback_entry()
1116 memcpy(tmp, src, entry->length); in zswap_writeback_entry()
1118 zpool_unmap_handle(pool, entry->handle); in zswap_writeback_entry()
1121 mutex_lock(acomp_ctx->mutex); in zswap_writeback_entry()
1122 sg_init_one(&input, src, entry->length); in zswap_writeback_entry()
1125 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); in zswap_writeback_entry()
1126 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); in zswap_writeback_entry()
1127 dlen = acomp_ctx->req->dlen; in zswap_writeback_entry()
1128 mutex_unlock(acomp_ctx->mutex); in zswap_writeback_entry()
1133 zpool_unmap_handle(pool, entry->handle); in zswap_writeback_entry()
1167 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; in zswap_is_page_same_filled()
1195 swp_entry_t swp = folio->swap; in zswap_store()
1198 struct page *page = &folio->page; in zswap_store()
1228 spin_lock(&tree->lock); in zswap_store()
1229 dupentry = zswap_rb_search(&tree->rbroot, offset); in zswap_store()
1234 spin_unlock(&tree->lock); in zswap_store()
1241 * cgroup-aware entry LRU, we will push out entries system-wide based on in zswap_store()
1273 entry->swpentry = swp_entry(type, offset); in zswap_store()
1274 entry->length = 0; in zswap_store()
1275 entry->value = value; in zswap_store()
1286 entry->pool = zswap_pool_current_get(); in zswap_store()
1287 if (!entry->pool) in zswap_store()
1291 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); in zswap_store()
1293 mutex_lock(acomp_ctx->mutex); in zswap_store()
1295 dst = acomp_ctx->dstmem; in zswap_store()
1301 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); in zswap_store()
1314 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); in zswap_store()
1315 dlen = acomp_ctx->req->dlen; in zswap_store()
1326 if (ret == -ENOSPC) { in zswap_store()
1337 mutex_unlock(acomp_ctx->mutex); in zswap_store()
1340 entry->swpentry = swp_entry(type, offset); in zswap_store()
1341 entry->handle = handle; in zswap_store()
1342 entry->length = dlen; in zswap_store()
1345 entry->objcg = objcg; in zswap_store()
1347 obj_cgroup_charge_zswap(objcg, entry->length); in zswap_store()
1353 spin_lock(&tree->lock); in zswap_store()
1360 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) { in zswap_store()
1365 if (entry->length) { in zswap_store()
1366 spin_lock(&entry->pool->lru_lock); in zswap_store()
1367 list_add(&entry->lru, &entry->pool->lru); in zswap_store()
1368 spin_unlock(&entry->pool->lru_lock); in zswap_store()
1370 spin_unlock(&tree->lock); in zswap_store()
1380 mutex_unlock(acomp_ctx->mutex); in zswap_store()
1381 zswap_pool_put(entry->pool); in zswap_store()
1391 if (pool && !queue_work(shrink_wq, &pool->shrink_work)) in zswap_store()
1398 swp_entry_t swp = folio->swap; in zswap_load()
1401 struct page *page = &folio->page; in zswap_load()
1414 spin_lock(&tree->lock); in zswap_load()
1415 entry = zswap_entry_find_get(&tree->rbroot, offset); in zswap_load()
1417 spin_unlock(&tree->lock); in zswap_load()
1420 spin_unlock(&tree->lock); in zswap_load()
1422 if (!entry->length) { in zswap_load()
1424 zswap_fill_page(dst, entry->value); in zswap_load()
1432 tmp = kmalloc(entry->length, GFP_KERNEL); in zswap_load()
1441 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); in zswap_load()
1444 memcpy(tmp, src, entry->length); in zswap_load()
1446 zpool_unmap_handle(zpool, entry->handle); in zswap_load()
1449 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); in zswap_load()
1450 mutex_lock(acomp_ctx->mutex); in zswap_load()
1451 sg_init_one(&input, src, entry->length); in zswap_load()
1454 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); in zswap_load()
1455 if (crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)) in zswap_load()
1457 mutex_unlock(acomp_ctx->mutex); in zswap_load()
1460 zpool_unmap_handle(zpool, entry->handle); in zswap_load()
1467 if (entry->objcg) in zswap_load()
1468 count_objcg_event(entry->objcg, ZSWPIN); in zswap_load()
1470 spin_lock(&tree->lock); in zswap_load()
1474 } else if (entry->length) { in zswap_load()
1475 spin_lock(&entry->pool->lru_lock); in zswap_load()
1476 list_move(&entry->lru, &entry->pool->lru); in zswap_load()
1477 spin_unlock(&entry->pool->lru_lock); in zswap_load()
1480 spin_unlock(&tree->lock); in zswap_load()
1491 spin_lock(&tree->lock); in zswap_invalidate()
1492 entry = zswap_rb_search(&tree->rbroot, offset); in zswap_invalidate()
1495 spin_unlock(&tree->lock); in zswap_invalidate()
1499 spin_unlock(&tree->lock); in zswap_invalidate()
1512 tree->rbroot = RB_ROOT; in zswap_swapon()
1513 spin_lock_init(&tree->lock); in zswap_swapon()
1526 spin_lock(&tree->lock); in zswap_swapoff()
1527 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) in zswap_swapoff()
1529 tree->rbroot = RB_ROOT; in zswap_swapoff()
1530 spin_unlock(&tree->lock); in zswap_swapoff()
1546 return -ENODEV; in zswap_debugfs_init()
1610 pr_info("loaded using pool %s/%s\n", pool->tfm_name, in zswap_setup()
1611 zpool_get_type(pool->zpools[0])); in zswap_setup()
1612 list_add(&pool->list, &zswap_pools); in zswap_setup()
1619 shrink_wq = create_workqueue("zswap-shrink"); in zswap_setup()
1636 /* if built-in, we aren't unloaded on failure; don't allow use */ in zswap_setup()
1639 return -ENOMEM; in zswap_setup()