• Home
  • Raw
  • Download

Lines Matching refs:c

357 	struct dm_bufio_client *c;  member
1025 static void dm_bufio_lock(struct dm_bufio_client *c) in dm_bufio_lock() argument
1027 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_lock()
1028 spin_lock_bh(&c->spinlock); in dm_bufio_lock()
1030 mutex_lock_nested(&c->lock, dm_bufio_in_request()); in dm_bufio_lock()
1033 static void dm_bufio_unlock(struct dm_bufio_client *c) in dm_bufio_unlock() argument
1035 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in dm_bufio_unlock()
1036 spin_unlock_bh(&c->spinlock); in dm_bufio_unlock()
1038 mutex_unlock(&c->lock); in dm_bufio_unlock()
1116 diff = (long)b->c->block_size; in adjust_total_allocated()
1180 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, in alloc_buffer_data() argument
1183 if (unlikely(c->slab_cache != NULL)) { in alloc_buffer_data()
1185 return kmem_cache_alloc(c->slab_cache, gfp_mask); in alloc_buffer_data()
1188 if (c->block_size <= KMALLOC_MAX_SIZE && in alloc_buffer_data()
1192 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in alloc_buffer_data()
1197 return __vmalloc(c->block_size, gfp_mask); in alloc_buffer_data()
1203 static void free_buffer_data(struct dm_bufio_client *c, in free_buffer_data() argument
1208 kmem_cache_free(c->slab_cache, data); in free_buffer_data()
1213 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); in free_buffer_data()
1230 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) in alloc_buffer() argument
1232 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer()
1237 b->c = c; in alloc_buffer()
1239 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
1241 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
1257 struct dm_bufio_client *c = b->c; in free_buffer() local
1260 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
1261 kmem_cache_free(c->slab_buffer, b); in free_buffer()
1304 .client = b->c->dm_io, in use_dmio()
1307 .bdev = b->c->bdev, in use_dmio()
1348 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); in use_bio()
1362 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) in block_to_sector() argument
1366 if (likely(c->sectors_per_block_bits >= 0)) in block_to_sector()
1367 sector = block << c->sectors_per_block_bits; in block_to_sector()
1369 sector = block * (c->block_size >> SECTOR_SHIFT); in block_to_sector()
1370 sector += c->start; in block_to_sector()
1384 sector = block_to_sector(b->c, b->block); in submit_io()
1387 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
1390 if (b->c->write_callback) in submit_io()
1391 b->c->write_callback(b); in submit_io()
1397 if (unlikely(end > b->c->block_size)) in submit_io()
1398 end = b->c->block_size; in submit_io()
1426 struct dm_bufio_client *c = b->c; in write_endio() local
1428 (void)cmpxchg(&c->async_write_error, 0, in write_endio()
1503 struct dm_bufio_client *c = context; in is_clean() local
1513 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && in is_clean()
1535 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) in __get_unclaimed_buffer() argument
1539 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); in __get_unclaimed_buffer()
1546 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) in __get_unclaimed_buffer()
1549 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); in __get_unclaimed_buffer()
1565 static void __wait_for_free_buffer(struct dm_bufio_client *c) in __wait_for_free_buffer() argument
1569 add_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1571 dm_bufio_unlock(c); in __wait_for_free_buffer()
1580 remove_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
1582 dm_bufio_lock(c); in __wait_for_free_buffer()
1598 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag n… in __alloc_buffer_wait_no_callback() argument
1618 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
1627 dm_bufio_unlock(c); in __alloc_buffer_wait_no_callback()
1628 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
1629 dm_bufio_lock(c); in __alloc_buffer_wait_no_callback()
1635 if (!list_empty(&c->reserved_buffers)) { in __alloc_buffer_wait_no_callback()
1636 b = list_to_buffer(c->reserved_buffers.next); in __alloc_buffer_wait_no_callback()
1638 c->need_reserved_buffers++; in __alloc_buffer_wait_no_callback()
1643 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
1647 __wait_for_free_buffer(c); in __alloc_buffer_wait_no_callback()
1651 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) in __alloc_buffer_wait() argument
1653 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait()
1658 if (c->alloc_callback) in __alloc_buffer_wait()
1659 c->alloc_callback(b); in __alloc_buffer_wait()
1669 struct dm_bufio_client *c = b->c; in __free_buffer_wake() local
1672 if (!c->need_reserved_buffers) in __free_buffer_wake()
1675 list_add(&b->lru.list, &c->reserved_buffers); in __free_buffer_wake()
1676 c->need_reserved_buffers--; in __free_buffer_wake()
1683 if (unlikely(waitqueue_active(&c->free_buffer_wait))) in __free_buffer_wake()
1684 wake_up(&c->free_buffer_wait); in __free_buffer_wake()
1698 static void __move_clean_buffers(struct dm_bufio_client *c) in __move_clean_buffers() argument
1700 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); in __move_clean_buffers()
1719 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, in __write_dirty_buffers_async() argument
1724 __move_clean_buffers(c); in __write_dirty_buffers_async()
1725 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); in __write_dirty_buffers_async()
1733 static void __check_watermark(struct dm_bufio_client *c, in __check_watermark() argument
1736 if (cache_count(&c->cache, LIST_DIRTY) > in __check_watermark()
1737 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) in __check_watermark()
1738 __write_dirty_buffers_async(c, 1, write_list); in __check_watermark()
1747 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) in cache_put_and_wake() argument
1753 if (cache_put(&c->cache, b) && in cache_put_and_wake()
1754 unlikely(waitqueue_active(&c->free_buffer_wait))) in cache_put_and_wake()
1755 wake_up(&c->free_buffer_wait); in cache_put_and_wake()
1762 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, in __bufio_new() argument
1774 new_b = __alloc_buffer_wait(c, nf); in __bufio_new()
1782 b = cache_get(&c->cache, block); in __bufio_new()
1788 __check_watermark(c, write_list); in __bufio_new()
1810 cache_insert(&c->cache, b); in __bufio_new()
1816 cache_put_and_wake(c, b); in __bufio_new()
1828 cache_put_and_wake(c, b); in __bufio_new()
1858 static void *new_read(struct dm_bufio_client *c, sector_t block, in new_read() argument
1873 b = cache_get(&c->cache, block); in new_read()
1876 cache_put_and_wake(c, b); in new_read()
1888 cache_put_and_wake(c, b); in new_read()
1897 dm_bufio_lock(c); in new_read()
1898 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1899 dm_bufio_unlock(c); in new_read()
1931 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, in dm_bufio_get() argument
1934 return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT); in dm_bufio_get()
1938 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block, in __dm_bufio_read() argument
1944 return new_read(c, block, NF_READ, bp, ioprio); in __dm_bufio_read()
1947 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, in dm_bufio_read() argument
1950 return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT); in dm_bufio_read()
1954 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, in dm_bufio_read_with_ioprio() argument
1957 return __dm_bufio_read(c, block, bp, ioprio); in dm_bufio_read_with_ioprio()
1961 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, in dm_bufio_new() argument
1967 return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT); in dm_bufio_new()
1971 static void __dm_bufio_prefetch(struct dm_bufio_client *c, in __dm_bufio_prefetch() argument
1988 b = cache_get(&c->cache, block); in __dm_bufio_prefetch()
1991 cache_put_and_wake(c, b); in __dm_bufio_prefetch()
1995 dm_bufio_lock(c); in __dm_bufio_prefetch()
1996 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in __dm_bufio_prefetch()
1999 dm_bufio_unlock(c); in __dm_bufio_prefetch()
2003 dm_bufio_lock(c); in __dm_bufio_prefetch()
2006 dm_bufio_unlock(c); in __dm_bufio_prefetch()
2016 dm_bufio_lock(c); in __dm_bufio_prefetch()
2018 dm_bufio_unlock(c); in __dm_bufio_prefetch()
2025 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks) in dm_bufio_prefetch() argument
2027 return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT); in dm_bufio_prefetch()
2031 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block, in dm_bufio_prefetch_with_ioprio() argument
2034 return __dm_bufio_prefetch(c, block, n_blocks, ioprio); in dm_bufio_prefetch_with_ioprio()
2040 struct dm_bufio_client *c = b->c; in dm_bufio_release() local
2051 dm_bufio_lock(c); in dm_bufio_release()
2054 if (cache_remove(&c->cache, b)) { in dm_bufio_release()
2056 dm_bufio_unlock(c); in dm_bufio_release()
2060 dm_bufio_unlock(c); in dm_bufio_release()
2063 cache_put_and_wake(c, b); in dm_bufio_release()
2070 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty() local
2073 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
2075 dm_bufio_lock(c); in dm_bufio_mark_partial_buffer_dirty()
2082 cache_mark(&c->cache, b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
2090 dm_bufio_unlock(c); in dm_bufio_mark_partial_buffer_dirty()
2096 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
2100 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers_async() argument
2107 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers_async()
2108 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers_async()
2109 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers_async()
2128 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers() argument
2137 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
2138 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers()
2139 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
2141 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
2143 nr_buffers = cache_count(&c->cache, LIST_DIRTY); in dm_bufio_write_dirty_buffers()
2144 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); in dm_bufio_write_dirty_buffers()
2145 while ((e = lru_iter_next(&it, is_writing, c))) { in dm_bufio_write_dirty_buffers()
2153 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
2155 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
2161 cache_mark(&c->cache, b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
2163 cache_put_and_wake(c, b); in dm_bufio_write_dirty_buffers()
2169 wake_up(&c->free_buffer_wait); in dm_bufio_write_dirty_buffers()
2170 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
2172 a = xchg(&c->async_write_error, 0); in dm_bufio_write_dirty_buffers()
2173 f = dm_bufio_issue_flush(c); in dm_bufio_write_dirty_buffers()
2184 int dm_bufio_issue_flush(struct dm_bufio_client *c) in dm_bufio_issue_flush() argument
2190 .client = c->dm_io, in dm_bufio_issue_flush()
2193 .bdev = c->bdev, in dm_bufio_issue_flush()
2208 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) in dm_bufio_issue_discard() argument
2214 .client = c->dm_io, in dm_bufio_issue_discard()
2217 .bdev = c->bdev, in dm_bufio_issue_discard()
2218 .sector = block_to_sector(c, block), in dm_bufio_issue_discard()
2219 .count = block_to_sector(c, count), in dm_bufio_issue_discard()
2229 static bool forget_buffer(struct dm_bufio_client *c, sector_t block) in forget_buffer() argument
2233 b = cache_get(&c->cache, block); in forget_buffer()
2236 if (cache_remove(&c->cache, b)) in forget_buffer()
2239 cache_put_and_wake(c, b); in forget_buffer()
2241 cache_put_and_wake(c, b); in forget_buffer()
2254 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) in dm_bufio_forget() argument
2256 dm_bufio_lock(c); in dm_bufio_forget()
2257 forget_buffer(c, block); in dm_bufio_forget()
2258 dm_bufio_unlock(c); in dm_bufio_forget()
2267 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) in dm_bufio_forget_buffers() argument
2269 dm_bufio_lock(c); in dm_bufio_forget_buffers()
2270 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); in dm_bufio_forget_buffers()
2271 dm_bufio_unlock(c); in dm_bufio_forget_buffers()
2275 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n) in dm_bufio_set_minimum_buffers() argument
2277 c->minimum_buffers = n; in dm_bufio_set_minimum_buffers()
2281 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c) in dm_bufio_get_block_size() argument
2283 return c->block_size; in dm_bufio_get_block_size()
2287 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) in dm_bufio_get_device_size() argument
2289 sector_t s = bdev_nr_sectors(c->bdev); in dm_bufio_get_device_size()
2291 if (s >= c->start) in dm_bufio_get_device_size()
2292 s -= c->start; in dm_bufio_get_device_size()
2295 if (likely(c->sectors_per_block_bits >= 0)) in dm_bufio_get_device_size()
2296 s >>= c->sectors_per_block_bits; in dm_bufio_get_device_size()
2298 sector_div(s, c->block_size >> SECTOR_SHIFT); in dm_bufio_get_device_size()
2303 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) in dm_bufio_get_dm_io_client() argument
2305 return c->dm_io; in dm_bufio_get_dm_io_client()
2329 return b->c; in dm_bufio_get_client()
2349 static void drop_buffers(struct dm_bufio_client *c) in drop_buffers() argument
2360 dm_bufio_write_dirty_buffers_async(c); in drop_buffers()
2362 dm_bufio_lock(c); in drop_buffers()
2364 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
2370 cache_iterate(&c->cache, i, warn_leak, &warned); in drop_buffers()
2374 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
2379 WARN_ON(cache_count(&c->cache, i)); in drop_buffers()
2381 dm_bufio_unlock(c); in drop_buffers()
2384 static unsigned long get_retain_buffers(struct dm_bufio_client *c) in get_retain_buffers() argument
2388 if (likely(c->sectors_per_block_bits >= 0)) in get_retain_buffers()
2389 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; in get_retain_buffers()
2391 retain_bytes /= c->block_size; in get_retain_buffers()
2396 static void __scan(struct dm_bufio_client *c) in __scan() argument
2401 unsigned long retain_target = get_retain_buffers(c); in __scan()
2402 unsigned long count = cache_total(&c->cache); in __scan()
2407 atomic_long_set(&c->need_shrink, 0); in __scan()
2408 if (!atomic_long_read(&c->need_shrink)) in __scan()
2411 b = cache_evict(&c->cache, l, in __scan()
2412 l == LIST_CLEAN ? is_clean : is_dirty, c); in __scan()
2419 atomic_long_dec(&c->need_shrink); in __scan()
2428 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); in shrink_work() local
2430 dm_bufio_lock(c); in shrink_work()
2431 __scan(c); in shrink_work()
2432 dm_bufio_unlock(c); in shrink_work()
2437 struct dm_bufio_client *c; in dm_bufio_shrink_scan() local
2446 c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_scan()
2447 atomic_long_add(sc->nr_to_scan, &c->need_shrink); in dm_bufio_shrink_scan()
2448 queue_work(dm_bufio_wq, &c->shrink_work); in dm_bufio_shrink_scan()
2455 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_count() local
2456 unsigned long count = cache_total(&c->cache); in dm_bufio_shrink_count()
2457 unsigned long retain_target = get_retain_buffers(c); in dm_bufio_shrink_count()
2458 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); in dm_bufio_shrink_count()
2484 struct dm_bufio_client *c; in dm_bufio_client_create() local
2494 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); in dm_bufio_client_create()
2495 if (!c) { in dm_bufio_client_create()
2499 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); in dm_bufio_client_create()
2501 c->bdev = bdev; in dm_bufio_client_create()
2502 c->block_size = block_size; in dm_bufio_client_create()
2504 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
2506 c->sectors_per_block_bits = -1; in dm_bufio_client_create()
2508 c->alloc_callback = alloc_callback; in dm_bufio_client_create()
2509 c->write_callback = write_callback; in dm_bufio_client_create()
2512 c->no_sleep = true; in dm_bufio_client_create()
2516 mutex_init(&c->lock); in dm_bufio_client_create()
2517 spin_lock_init(&c->spinlock); in dm_bufio_client_create()
2518 INIT_LIST_HEAD(&c->reserved_buffers); in dm_bufio_client_create()
2519 c->need_reserved_buffers = reserved_buffers; in dm_bufio_client_create()
2521 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); in dm_bufio_client_create()
2523 init_waitqueue_head(&c->free_buffer_wait); in dm_bufio_client_create()
2524 c->async_write_error = 0; in dm_bufio_client_create()
2526 c->dm_io = dm_io_client_create(); in dm_bufio_client_create()
2527 if (IS_ERR(c->dm_io)) { in dm_bufio_client_create()
2528 r = PTR_ERR(c->dm_io); in dm_bufio_client_create()
2537 c->slab_cache = kmem_cache_create(slab_name, block_size, align, in dm_bufio_client_create()
2539 if (!c->slab_cache) { in dm_bufio_client_create()
2548 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, in dm_bufio_client_create()
2550 if (!c->slab_buffer) { in dm_bufio_client_create()
2555 while (c->need_reserved_buffers) { in dm_bufio_client_create()
2556 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create()
2565 INIT_WORK(&c->shrink_work, shrink_work); in dm_bufio_client_create()
2566 atomic_long_set(&c->need_shrink, 0); in dm_bufio_client_create()
2568 c->shrinker.count_objects = dm_bufio_shrink_count; in dm_bufio_client_create()
2569 c->shrinker.scan_objects = dm_bufio_shrink_scan; in dm_bufio_client_create()
2570 c->shrinker.seeks = 1; in dm_bufio_client_create()
2571 c->shrinker.batch = 0; in dm_bufio_client_create()
2572 r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)", in dm_bufio_client_create()
2579 list_add(&c->client_list, &dm_bufio_all_clients); in dm_bufio_client_create()
2583 return c; in dm_bufio_client_create()
2586 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_create()
2587 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_create()
2592 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_create()
2593 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_create()
2594 dm_io_client_destroy(c->dm_io); in dm_bufio_client_create()
2596 mutex_destroy(&c->lock); in dm_bufio_client_create()
2597 if (c->no_sleep) in dm_bufio_client_create()
2599 kfree(c); in dm_bufio_client_create()
2609 void dm_bufio_client_destroy(struct dm_bufio_client *c) in dm_bufio_client_destroy() argument
2613 drop_buffers(c); in dm_bufio_client_destroy()
2615 unregister_shrinker(&c->shrinker); in dm_bufio_client_destroy()
2616 flush_work(&c->shrink_work); in dm_bufio_client_destroy()
2620 list_del(&c->client_list); in dm_bufio_client_destroy()
2626 WARN_ON(c->need_reserved_buffers); in dm_bufio_client_destroy()
2628 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_destroy()
2629 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); in dm_bufio_client_destroy()
2636 if (cache_count(&c->cache, i)) in dm_bufio_client_destroy()
2637 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2640 WARN_ON(cache_count(&c->cache, i)); in dm_bufio_client_destroy()
2642 cache_destroy(&c->cache); in dm_bufio_client_destroy()
2643 kmem_cache_destroy(c->slab_cache); in dm_bufio_client_destroy()
2644 kmem_cache_destroy(c->slab_buffer); in dm_bufio_client_destroy()
2645 dm_io_client_destroy(c->dm_io); in dm_bufio_client_destroy()
2646 mutex_destroy(&c->lock); in dm_bufio_client_destroy()
2647 if (c->no_sleep) in dm_bufio_client_destroy()
2649 kfree(c); in dm_bufio_client_destroy()
2653 void dm_bufio_client_reset(struct dm_bufio_client *c) in dm_bufio_client_reset() argument
2655 drop_buffers(c); in dm_bufio_client_reset()
2656 flush_work(&c->shrink_work); in dm_bufio_client_reset()
2660 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) in dm_bufio_set_sector_offset() argument
2662 c->start = start; in dm_bufio_set_sector_offset()
2708 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { in select_for_evict()
2718 static unsigned long __evict_many(struct dm_bufio_client *c, in __evict_many() argument
2727 b = cache_evict(&c->cache, list_mode, select_for_evict, params); in __evict_many()
2744 static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) in evict_old_buffers() argument
2747 unsigned long retain = get_retain_buffers(c); in evict_old_buffers()
2751 dm_bufio_lock(c); in evict_old_buffers()
2753 __check_watermark(c, &write_list); in evict_old_buffers()
2755 dm_bufio_unlock(c); in evict_old_buffers()
2757 dm_bufio_lock(c); in evict_old_buffers()
2760 count = cache_total(&c->cache); in evict_old_buffers()
2762 __evict_many(c, &params, LIST_CLEAN, count - retain); in evict_old_buffers()
2764 dm_bufio_unlock(c); in evict_old_buffers()
2770 struct dm_bufio_client *c; in cleanup_old_buffers() local
2784 list_for_each_entry(c, &dm_bufio_all_clients, client_list) in cleanup_old_buffers()
2785 evict_old_buffers(c, max_age_hz); in cleanup_old_buffers()
2824 struct dm_bufio_client *c; in __insert_client() local
2828 c = container_of(h, struct dm_bufio_client, client_list); in __insert_client()
2829 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) in __insert_client()
2840 struct dm_bufio_client *c; in __evict_a_few() local
2848 c = __pop_client(); in __evict_a_few()
2849 if (!c) in __evict_a_few()
2852 dm_bufio_lock(c); in __evict_a_few()
2853 count = __evict_many(c, &params, LIST_CLEAN, nr_buffers); in __evict_a_few()
2854 dm_bufio_unlock(c); in __evict_a_few()
2857 c->oldest_buffer = params.last_accessed; in __evict_a_few()
2858 __insert_client(c); in __evict_a_few()
2866 struct dm_bufio_client *c; in check_watermarks() local
2869 list_for_each_entry(c, &dm_bufio_all_clients, client_list) { in check_watermarks()
2870 dm_bufio_lock(c); in check_watermarks()
2871 __check_watermark(c, &write_list); in check_watermarks()
2872 dm_bufio_unlock(c); in check_watermarks()