• Home
  • Raw
  • Download

Lines Matching refs:c

162 	struct dm_bufio_client *c;  member
178 static inline int dm_bufio_cache_index(struct dm_bufio_client *c) in dm_bufio_cache_index() argument
180 unsigned ret = c->blocks_per_page_bits - 1; in dm_bufio_cache_index()
187 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)]) argument
188 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)]) argument
192 static void dm_bufio_lock(struct dm_bufio_client *c) in dm_bufio_lock() argument
194 mutex_lock_nested(&c->lock, dm_bufio_in_request()); in dm_bufio_lock()
197 static int dm_bufio_trylock(struct dm_bufio_client *c) in dm_bufio_trylock() argument
199 return mutex_trylock(&c->lock); in dm_bufio_trylock()
202 static void dm_bufio_unlock(struct dm_bufio_client *c) in dm_bufio_unlock() argument
204 mutex_unlock(&c->lock); in dm_bufio_unlock()
276 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) in __find() argument
278 struct rb_node *n = c->buffer_tree.rb_node; in __find()
293 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
295 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; in __insert()
312 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
315 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
317 rb_erase(&b->node, &c->buffer_tree); in __remove()
386 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, in alloc_buffer_data() argument
389 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { in alloc_buffer_data()
391 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); in alloc_buffer_data()
394 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT && in alloc_buffer_data()
398 c->pages_per_block_bits); in alloc_buffer_data()
414 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); in alloc_buffer_data()
420 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); in alloc_buffer_data()
426 static void free_buffer_data(struct dm_bufio_client *c, in free_buffer_data() argument
431 kmem_cache_free(DM_BUFIO_CACHE(c), data); in free_buffer_data()
435 free_pages((unsigned long)data, c->pages_per_block_bits); in free_buffer_data()
452 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) in alloc_buffer() argument
454 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size, in alloc_buffer()
460 b->c = c; in alloc_buffer()
462 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
468 adjust_total_allocated(b->data_mode, (long)c->block_size); in alloc_buffer()
481 struct dm_bufio_client *c = b->c; in free_buffer() local
483 adjust_total_allocated(b->data_mode, -(long)c->block_size); in free_buffer()
485 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
494 struct dm_bufio_client *c = b->c; in __link_buffer() local
496 c->n_buffers[dirty]++; in __link_buffer()
499 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
500 __insert(b->c, b); in __link_buffer()
509 struct dm_bufio_client *c = b->c; in __unlink_buffer() local
511 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
513 c->n_buffers[b->list_mode]--; in __unlink_buffer()
514 __remove(b->c, b); in __unlink_buffer()
523 struct dm_bufio_client *c = b->c; in __relink_lru() local
525 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
527 c->n_buffers[b->list_mode]--; in __relink_lru()
528 c->n_buffers[dirty]++; in __relink_lru()
530 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
577 .client = b->c->dm_io, in use_dmio()
580 .bdev = b->c->bdev, in use_dmio()
625 bio_set_dev(&b->bio, b->c->bdev); in use_inline_bio()
641 BUG_ON(b->c->block_size <= PAGE_SIZE); in use_inline_bio()
659 sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; in submit_io()
662 n_sectors = 1 << b->c->sectors_per_block_bits; in submit_io()
665 if (b->c->write_callback) in submit_io()
666 b->c->write_callback(b); in submit_io()
672 if (unlikely(end > b->c->block_size)) in submit_io()
673 end = b->c->block_size; in submit_io()
702 struct dm_bufio_client *c = b->c; in write_endio() local
704 (void)cmpxchg(&c->async_write_error, 0, in write_endio()
779 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) in __get_unclaimed_buffer() argument
783 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
795 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
816 static void __wait_for_free_buffer(struct dm_bufio_client *c) in __wait_for_free_buffer() argument
820 add_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
822 dm_bufio_unlock(c); in __wait_for_free_buffer()
826 remove_wait_queue(&c->free_buffer_wait, &wait); in __wait_for_free_buffer()
828 dm_bufio_lock(c); in __wait_for_free_buffer()
844 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag n… in __alloc_buffer_wait_no_callback() argument
864 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
873 dm_bufio_unlock(c); in __alloc_buffer_wait_no_callback()
874 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
875 dm_bufio_lock(c); in __alloc_buffer_wait_no_callback()
881 if (!list_empty(&c->reserved_buffers)) { in __alloc_buffer_wait_no_callback()
882 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
885 c->need_reserved_buffers++; in __alloc_buffer_wait_no_callback()
890 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
894 __wait_for_free_buffer(c); in __alloc_buffer_wait_no_callback()
898 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) in __alloc_buffer_wait() argument
900 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait()
905 if (c->alloc_callback) in __alloc_buffer_wait()
906 c->alloc_callback(b); in __alloc_buffer_wait()
916 struct dm_bufio_client *c = b->c; in __free_buffer_wake() local
918 if (!c->need_reserved_buffers) in __free_buffer_wake()
921 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
922 c->need_reserved_buffers--; in __free_buffer_wake()
925 wake_up(&c->free_buffer_wait); in __free_buffer_wake()
928 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, in __write_dirty_buffers_async() argument
933 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
953 static void __get_memory_limit(struct dm_bufio_client *c, in __get_memory_limit() argument
967 (c->sectors_per_block_bits + SECTOR_SHIFT); in __get_memory_limit()
969 if (buffers < c->minimum_buffers) in __get_memory_limit()
970 buffers = c->minimum_buffers; in __get_memory_limit()
982 static void __check_watermark(struct dm_bufio_client *c, in __check_watermark() argument
987 __get_memory_limit(c, &threshold_buffers, &limit_buffers); in __check_watermark()
989 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] > in __check_watermark()
992 struct dm_buffer *b = __get_unclaimed_buffer(c); in __check_watermark()
1001 if (c->n_buffers[LIST_DIRTY] > threshold_buffers) in __check_watermark()
1002 __write_dirty_buffers_async(c, 1, write_list); in __check_watermark()
1009 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, in __bufio_new() argument
1017 b = __find(c, block); in __bufio_new()
1024 new_b = __alloc_buffer_wait(c, nf); in __bufio_new()
1032 b = __find(c, block); in __bufio_new()
1038 __check_watermark(c, write_list); in __bufio_new()
1100 static void *new_read(struct dm_bufio_client *c, sector_t block, in new_read() argument
1108 dm_bufio_lock(c); in new_read()
1109 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1114 dm_bufio_unlock(c); in new_read()
1139 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, in dm_bufio_get() argument
1142 return new_read(c, block, NF_GET, bp); in dm_bufio_get()
1146 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, in dm_bufio_read() argument
1151 return new_read(c, block, NF_READ, bp); in dm_bufio_read()
1155 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, in dm_bufio_new() argument
1160 return new_read(c, block, NF_FRESH, bp); in dm_bufio_new()
1164 void dm_bufio_prefetch(struct dm_bufio_client *c, in dm_bufio_prefetch() argument
1174 dm_bufio_lock(c); in dm_bufio_prefetch()
1179 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1182 dm_bufio_unlock(c); in dm_bufio_prefetch()
1186 dm_bufio_lock(c); in dm_bufio_prefetch()
1189 dm_bufio_unlock(c); in dm_bufio_prefetch()
1199 dm_bufio_lock(c); in dm_bufio_prefetch()
1203 dm_bufio_unlock(c); in dm_bufio_prefetch()
1212 struct dm_bufio_client *c = b->c; in dm_bufio_release() local
1214 dm_bufio_lock(c); in dm_bufio_release()
1220 wake_up(&c->free_buffer_wait); in dm_bufio_release()
1236 dm_bufio_unlock(c); in dm_bufio_release()
1243 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty() local
1246 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
1248 dm_bufio_lock(c); in dm_bufio_mark_partial_buffer_dirty()
1263 dm_bufio_unlock(c); in dm_bufio_mark_partial_buffer_dirty()
1269 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
1273 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers_async() argument
1279 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers_async()
1280 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers_async()
1281 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers_async()
1293 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) in dm_bufio_write_dirty_buffers() argument
1301 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1302 __write_dirty_buffers_async(c, 0, &write_list); in dm_bufio_write_dirty_buffers()
1303 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1305 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1308 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1311 if (buffers_processed < c->n_buffers[LIST_DIRTY]) in dm_bufio_write_dirty_buffers()
1317 if (buffers_processed < c->n_buffers[LIST_DIRTY]) { in dm_bufio_write_dirty_buffers()
1320 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1323 dm_bufio_lock(c); in dm_bufio_write_dirty_buffers()
1353 wake_up(&c->free_buffer_wait); in dm_bufio_write_dirty_buffers()
1354 dm_bufio_unlock(c); in dm_bufio_write_dirty_buffers()
1356 a = xchg(&c->async_write_error, 0); in dm_bufio_write_dirty_buffers()
1357 f = dm_bufio_issue_flush(c); in dm_bufio_write_dirty_buffers()
1368 int dm_bufio_issue_flush(struct dm_bufio_client *c) in dm_bufio_issue_flush() argument
1375 .client = c->dm_io, in dm_bufio_issue_flush()
1378 .bdev = c->bdev, in dm_bufio_issue_flush()
1403 struct dm_bufio_client *c = b->c; in dm_bufio_release_move() local
1408 dm_bufio_lock(c); in dm_bufio_release_move()
1411 new = __find(c, new_block); in dm_bufio_release_move()
1414 __wait_for_free_buffer(c); in dm_bufio_release_move()
1436 b->dirty_end = c->block_size; in dm_bufio_release_move()
1460 dm_bufio_unlock(c); in dm_bufio_release_move()
1471 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) in dm_bufio_forget() argument
1475 dm_bufio_lock(c); in dm_bufio_forget()
1477 b = __find(c, block); in dm_bufio_forget()
1483 dm_bufio_unlock(c); in dm_bufio_forget()
1487 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) in dm_bufio_set_minimum_buffers() argument
1489 c->minimum_buffers = n; in dm_bufio_set_minimum_buffers()
1493 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c) in dm_bufio_get_block_size() argument
1495 return c->block_size; in dm_bufio_get_block_size()
1499 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) in dm_bufio_get_device_size() argument
1501 return i_size_read(c->bdev->bd_inode) >> in dm_bufio_get_device_size()
1502 (SECTOR_SHIFT + c->sectors_per_block_bits); in dm_bufio_get_device_size()
1526 return b->c; in dm_bufio_get_client()
1530 static void drop_buffers(struct dm_bufio_client *c) in drop_buffers() argument
1541 dm_bufio_write_dirty_buffers_async(c); in drop_buffers()
1543 dm_bufio_lock(c); in drop_buffers()
1545 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1549 list_for_each_entry(b, &c->lru[i], lru_list) { in drop_buffers()
1561 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1566 BUG_ON(!list_empty(&c->lru[i])); in drop_buffers()
1568 dm_bufio_unlock(c); in drop_buffers()
1598 static unsigned long get_retain_buffers(struct dm_bufio_client *c) in get_retain_buffers() argument
1601 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); in get_retain_buffers()
1604 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, in __scan() argument
1610 unsigned long count = c->n_buffers[LIST_CLEAN] + in __scan()
1611 c->n_buffers[LIST_DIRTY]; in __scan()
1612 unsigned long retain_target = get_retain_buffers(c); in __scan()
1615 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1629 struct dm_bufio_client *c; in dm_bufio_shrink_scan() local
1632 c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_scan()
1634 dm_bufio_lock(c); in dm_bufio_shrink_scan()
1635 else if (!dm_bufio_trylock(c)) in dm_bufio_shrink_scan()
1638 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); in dm_bufio_shrink_scan()
1639 dm_bufio_unlock(c); in dm_bufio_shrink_scan()
1646 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); in dm_bufio_shrink_count() local
1647 unsigned long count = ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + in dm_bufio_shrink_count()
1648 ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); in dm_bufio_shrink_count()
1649 unsigned long retain_target = get_retain_buffers(c); in dm_bufio_shrink_count()
1663 struct dm_bufio_client *c; in dm_bufio_client_create() local
1669 c = kzalloc(sizeof(*c), GFP_KERNEL); in dm_bufio_client_create()
1670 if (!c) { in dm_bufio_client_create()
1674 c->buffer_tree = RB_ROOT; in dm_bufio_client_create()
1676 c->bdev = bdev; in dm_bufio_client_create()
1677 c->block_size = block_size; in dm_bufio_client_create()
1678 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
1679 c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ? in dm_bufio_client_create()
1681 c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ? in dm_bufio_client_create()
1684 c->aux_size = aux_size; in dm_bufio_client_create()
1685 c->alloc_callback = alloc_callback; in dm_bufio_client_create()
1686 c->write_callback = write_callback; in dm_bufio_client_create()
1689 INIT_LIST_HEAD(&c->lru[i]); in dm_bufio_client_create()
1690 c->n_buffers[i] = 0; in dm_bufio_client_create()
1693 mutex_init(&c->lock); in dm_bufio_client_create()
1694 INIT_LIST_HEAD(&c->reserved_buffers); in dm_bufio_client_create()
1695 c->need_reserved_buffers = reserved_buffers; in dm_bufio_client_create()
1697 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS; in dm_bufio_client_create()
1699 init_waitqueue_head(&c->free_buffer_wait); in dm_bufio_client_create()
1700 c->async_write_error = 0; in dm_bufio_client_create()
1702 c->dm_io = dm_io_client_create(); in dm_bufio_client_create()
1703 if (IS_ERR(c->dm_io)) { in dm_bufio_client_create()
1704 r = PTR_ERR(c->dm_io); in dm_bufio_client_create()
1709 if (c->blocks_per_page_bits) { in dm_bufio_client_create()
1710 if (!DM_BUFIO_CACHE_NAME(c)) { in dm_bufio_client_create()
1711 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size); in dm_bufio_client_create()
1712 if (!DM_BUFIO_CACHE_NAME(c)) { in dm_bufio_client_create()
1719 if (!DM_BUFIO_CACHE(c)) { in dm_bufio_client_create()
1720 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c), in dm_bufio_client_create()
1721 c->block_size, in dm_bufio_client_create()
1722 c->block_size, 0, NULL); in dm_bufio_client_create()
1723 if (!DM_BUFIO_CACHE(c)) { in dm_bufio_client_create()
1732 while (c->need_reserved_buffers) { in dm_bufio_client_create()
1733 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create()
1744 list_add(&c->client_list, &dm_bufio_all_clients); in dm_bufio_client_create()
1748 c->shrinker.count_objects = dm_bufio_shrink_count; in dm_bufio_client_create()
1749 c->shrinker.scan_objects = dm_bufio_shrink_scan; in dm_bufio_client_create()
1750 c->shrinker.seeks = 1; in dm_bufio_client_create()
1751 c->shrinker.batch = 0; in dm_bufio_client_create()
1752 register_shrinker(&c->shrinker); in dm_bufio_client_create()
1754 return c; in dm_bufio_client_create()
1758 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_create()
1759 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create()
1764 dm_io_client_destroy(c->dm_io); in dm_bufio_client_create()
1766 kfree(c); in dm_bufio_client_create()
1776 void dm_bufio_client_destroy(struct dm_bufio_client *c) in dm_bufio_client_destroy() argument
1780 drop_buffers(c); in dm_bufio_client_destroy()
1782 unregister_shrinker(&c->shrinker); in dm_bufio_client_destroy()
1786 list_del(&c->client_list); in dm_bufio_client_destroy()
1792 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); in dm_bufio_client_destroy()
1793 BUG_ON(c->need_reserved_buffers); in dm_bufio_client_destroy()
1795 while (!list_empty(&c->reserved_buffers)) { in dm_bufio_client_destroy()
1796 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy()
1803 if (c->n_buffers[i]) in dm_bufio_client_destroy()
1804 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); in dm_bufio_client_destroy()
1807 BUG_ON(c->n_buffers[i]); in dm_bufio_client_destroy()
1809 dm_io_client_destroy(c->dm_io); in dm_bufio_client_destroy()
1810 kfree(c); in dm_bufio_client_destroy()
1814 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) in dm_bufio_set_sector_offset() argument
1816 c->start = start; in dm_bufio_set_sector_offset()
1835 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) in __evict_old_buffers() argument
1838 unsigned long retain_target = get_retain_buffers(c); in __evict_old_buffers()
1842 dm_bufio_lock(c); in __evict_old_buffers()
1844 __check_watermark(c, &write_list); in __evict_old_buffers()
1846 dm_bufio_unlock(c); in __evict_old_buffers()
1848 dm_bufio_lock(c); in __evict_old_buffers()
1851 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; in __evict_old_buffers()
1852 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1865 dm_bufio_unlock(c); in __evict_old_buffers()
1871 struct dm_bufio_client *c; in cleanup_old_buffers() local
1877 list_for_each_entry(c, &dm_bufio_all_clients, client_list) in cleanup_old_buffers()
1878 __evict_old_buffers(c, max_age_hz); in cleanup_old_buffers()