Lines Matching refs:b
250 static void buffer_record_stack(struct dm_buffer *b) in buffer_record_stack() argument
252 b->stack_trace.nr_entries = 0; in buffer_record_stack()
253 b->stack_trace.max_entries = MAX_STACK; in buffer_record_stack()
254 b->stack_trace.entries = b->stack_entries; in buffer_record_stack()
255 b->stack_trace.skip = 2; in buffer_record_stack()
256 save_stack_trace(&b->stack_trace); in buffer_record_stack()
266 struct dm_buffer *b; in __find() local
269 b = container_of(n, struct dm_buffer, node); in __find()
271 if (b->block == block) in __find()
272 return b; in __find()
274 n = (b->block < block) ? n->rb_left : n->rb_right; in __find()
280 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
288 if (found->block == b->block) { in __insert()
289 BUG_ON(found != b); in __insert()
294 new = (found->block < b->block) ? in __insert()
298 rb_link_node(&b->node, parent, new); in __insert()
299 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
302 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
304 rb_erase(&b->node, &c->buffer_tree); in __remove()
445 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size, in alloc_buffer() local
448 if (!b) in alloc_buffer()
451 b->c = c; in alloc_buffer()
453 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
454 if (!b->data) { in alloc_buffer()
455 kfree(b); in alloc_buffer()
459 adjust_total_allocated(b->data_mode, (long)c->block_size); in alloc_buffer()
462 memset(&b->stack_trace, 0, sizeof(b->stack_trace)); in alloc_buffer()
464 return b; in alloc_buffer()
470 static void free_buffer(struct dm_buffer *b) in free_buffer() argument
472 struct dm_bufio_client *c = b->c; in free_buffer()
474 adjust_total_allocated(b->data_mode, -(long)c->block_size); in free_buffer()
476 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
477 kfree(b); in free_buffer()
483 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) in __link_buffer() argument
485 struct dm_bufio_client *c = b->c; in __link_buffer()
488 b->block = block; in __link_buffer()
489 b->list_mode = dirty; in __link_buffer()
490 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
491 __insert(b->c, b); in __link_buffer()
492 b->last_accessed = jiffies; in __link_buffer()
498 static void __unlink_buffer(struct dm_buffer *b) in __unlink_buffer() argument
500 struct dm_bufio_client *c = b->c; in __unlink_buffer()
502 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
504 c->n_buffers[b->list_mode]--; in __unlink_buffer()
505 __remove(b->c, b); in __unlink_buffer()
506 list_del(&b->lru_list); in __unlink_buffer()
512 static void __relink_lru(struct dm_buffer *b, int dirty) in __relink_lru() argument
514 struct dm_bufio_client *c = b->c; in __relink_lru()
516 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
518 c->n_buffers[b->list_mode]--; in __relink_lru()
520 b->list_mode = dirty; in __relink_lru()
521 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
522 b->last_accessed = jiffies; in __relink_lru()
553 struct dm_buffer *b = context; in dmio_complete() local
555 b->bio.bi_error = error ? -EIO : 0; in dmio_complete()
556 b->bio.bi_end_io(&b->bio); in dmio_complete()
559 static void use_dmio(struct dm_buffer *b, int rw, sector_t block, in use_dmio() argument
567 .notify.context = b, in use_dmio()
568 .client = b->c->dm_io, in use_dmio()
571 .bdev = b->c->bdev, in use_dmio()
572 .sector = block << b->c->sectors_per_block_bits, in use_dmio()
573 .count = b->c->block_size >> SECTOR_SHIFT, in use_dmio()
576 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
578 io_req.mem.ptr.addr = b->data; in use_dmio()
581 io_req.mem.ptr.vma = b->data; in use_dmio()
584 b->bio.bi_end_io = end_io; in use_dmio()
588 b->bio.bi_error = r; in use_dmio()
589 end_io(&b->bio); in use_dmio()
608 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, in use_inline_bio() argument
614 bio_init(&b->bio); in use_inline_bio()
615 b->bio.bi_io_vec = b->bio_vec; in use_inline_bio()
616 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; in use_inline_bio()
617 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
618 b->bio.bi_bdev = b->c->bdev; in use_inline_bio()
619 b->bio.bi_end_io = inline_endio; in use_inline_bio()
624 b->bio.bi_private = end_io; in use_inline_bio()
625 bio_set_op_attrs(&b->bio, rw, 0); in use_inline_bio()
631 ptr = b->data; in use_inline_bio()
632 len = b->c->block_size; in use_inline_bio()
640 if (!bio_add_page(&b->bio, virt_to_page(ptr), in use_inline_bio()
643 BUG_ON(b->c->block_size <= PAGE_SIZE); in use_inline_bio()
644 use_dmio(b, rw, block, end_io); in use_inline_bio()
652 submit_bio(&b->bio); in use_inline_bio()
655 static void submit_io(struct dm_buffer *b, int rw, sector_t block, in submit_io() argument
658 if (rw == WRITE && b->c->write_callback) in submit_io()
659 b->c->write_callback(b); in submit_io()
661 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && in submit_io()
662 b->data_mode != DATA_MODE_VMALLOC) in submit_io()
663 use_inline_bio(b, rw, block, end_io); in submit_io()
665 use_dmio(b, rw, block, end_io); in submit_io()
680 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); in write_endio() local
682 b->write_error = bio->bi_error; in write_endio()
684 struct dm_bufio_client *c = b->c; in write_endio()
689 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
692 clear_bit(B_WRITING, &b->state); in write_endio()
695 wake_up_bit(&b->state, B_WRITING); in write_endio()
707 static void __write_dirty_buffer(struct dm_buffer *b, in __write_dirty_buffer() argument
710 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
713 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
714 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
717 submit_io(b, WRITE, b->block, write_endio); in __write_dirty_buffer()
719 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
727 struct dm_buffer *b = in __flush_write_list() local
729 list_del(&b->write_list); in __flush_write_list()
730 submit_io(b, WRITE, b->block, write_endio); in __flush_write_list()
741 static void __make_buffer_clean(struct dm_buffer *b) in __make_buffer_clean() argument
743 BUG_ON(b->hold_count); in __make_buffer_clean()
745 if (!b->state) /* fast case */ in __make_buffer_clean()
748 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
749 __write_dirty_buffer(b, NULL); in __make_buffer_clean()
750 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
759 struct dm_buffer *b; in __get_unclaimed_buffer() local
761 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
762 BUG_ON(test_bit(B_WRITING, &b->state)); in __get_unclaimed_buffer()
763 BUG_ON(test_bit(B_DIRTY, &b->state)); in __get_unclaimed_buffer()
765 if (!b->hold_count) { in __get_unclaimed_buffer()
766 __make_buffer_clean(b); in __get_unclaimed_buffer()
767 __unlink_buffer(b); in __get_unclaimed_buffer()
768 return b; in __get_unclaimed_buffer()
773 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
774 BUG_ON(test_bit(B_READING, &b->state)); in __get_unclaimed_buffer()
776 if (!b->hold_count) { in __get_unclaimed_buffer()
777 __make_buffer_clean(b); in __get_unclaimed_buffer()
778 __unlink_buffer(b); in __get_unclaimed_buffer()
779 return b; in __get_unclaimed_buffer()
824 struct dm_buffer *b; in __alloc_buffer_wait_no_callback() local
840 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
841 if (b) in __alloc_buffer_wait_no_callback()
842 return b; in __alloc_buffer_wait_no_callback()
849 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
851 list_del(&b->lru_list); in __alloc_buffer_wait_no_callback()
854 return b; in __alloc_buffer_wait_no_callback()
857 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
858 if (b) in __alloc_buffer_wait_no_callback()
859 return b; in __alloc_buffer_wait_no_callback()
867 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait() local
869 if (!b) in __alloc_buffer_wait()
873 c->alloc_callback(b); in __alloc_buffer_wait()
875 return b; in __alloc_buffer_wait()
881 static void __free_buffer_wake(struct dm_buffer *b) in __free_buffer_wake() argument
883 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
886 free_buffer(b); in __free_buffer_wake()
888 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
898 struct dm_buffer *b, *tmp; in __write_dirty_buffers_async() local
900 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
901 BUG_ON(test_bit(B_READING, &b->state)); in __write_dirty_buffers_async()
903 if (!test_bit(B_DIRTY, &b->state) && in __write_dirty_buffers_async()
904 !test_bit(B_WRITING, &b->state)) { in __write_dirty_buffers_async()
905 __relink_lru(b, LIST_CLEAN); in __write_dirty_buffers_async()
909 if (no_wait && test_bit(B_WRITING, &b->state)) in __write_dirty_buffers_async()
912 __write_dirty_buffer(b, write_list); in __write_dirty_buffers_async()
959 struct dm_buffer *b = __get_unclaimed_buffer(c); in __check_watermark() local
961 if (!b) in __check_watermark()
964 __free_buffer_wake(b); in __check_watermark()
980 struct dm_buffer *b, *new_b = NULL; in __bufio_new() local
984 b = __find(c, block); in __bufio_new()
985 if (b) in __bufio_new()
999 b = __find(c, block); in __bufio_new()
1000 if (b) { in __bufio_new()
1007 b = new_b; in __bufio_new()
1008 b->hold_count = 1; in __bufio_new()
1009 b->read_error = 0; in __bufio_new()
1010 b->write_error = 0; in __bufio_new()
1011 __link_buffer(b, block, LIST_CLEAN); in __bufio_new()
1014 b->state = 0; in __bufio_new()
1015 return b; in __bufio_new()
1018 b->state = 1 << B_READING; in __bufio_new()
1021 return b; in __bufio_new()
1033 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) in __bufio_new()
1036 b->hold_count++; in __bufio_new()
1037 __relink_lru(b, test_bit(B_DIRTY, &b->state) || in __bufio_new()
1038 test_bit(B_WRITING, &b->state)); in __bufio_new()
1039 return b; in __bufio_new()
1048 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); in read_endio() local
1050 b->read_error = bio->bi_error; in read_endio()
1052 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1055 clear_bit(B_READING, &b->state); in read_endio()
1058 wake_up_bit(&b->state, B_READING); in read_endio()
1071 struct dm_buffer *b; in new_read() local
1076 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1078 if (b && b->hold_count == 1) in new_read()
1079 buffer_record_stack(b); in new_read()
1085 if (!b) in new_read()
1089 submit_io(b, READ, b->block, read_endio); in new_read()
1091 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1093 if (b->read_error) { in new_read()
1094 int error = b->read_error; in new_read()
1096 dm_bufio_release(b); in new_read()
1101 *bp = b; in new_read()
1103 return b->data; in new_read()
1145 struct dm_buffer *b; in dm_bufio_prefetch() local
1146 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1155 if (unlikely(b != NULL)) { in dm_bufio_prefetch()
1159 submit_io(b, READ, b->block, read_endio); in dm_bufio_prefetch()
1160 dm_bufio_release(b); in dm_bufio_prefetch()
1177 void dm_bufio_release(struct dm_buffer *b) in dm_bufio_release() argument
1179 struct dm_bufio_client *c = b->c; in dm_bufio_release()
1183 BUG_ON(!b->hold_count); in dm_bufio_release()
1185 b->hold_count--; in dm_bufio_release()
1186 if (!b->hold_count) { in dm_bufio_release()
1194 if ((b->read_error || b->write_error) && in dm_bufio_release()
1195 !test_bit(B_READING, &b->state) && in dm_bufio_release()
1196 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
1197 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
1198 __unlink_buffer(b); in dm_bufio_release()
1199 __free_buffer_wake(b); in dm_bufio_release()
1207 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) in dm_bufio_mark_buffer_dirty() argument
1209 struct dm_bufio_client *c = b->c; in dm_bufio_mark_buffer_dirty()
1213 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_buffer_dirty()
1215 if (!test_and_set_bit(B_DIRTY, &b->state)) in dm_bufio_mark_buffer_dirty()
1216 __relink_lru(b, LIST_DIRTY); in dm_bufio_mark_buffer_dirty()
1246 struct dm_buffer *b, *tmp; in dm_bufio_write_dirty_buffers() local
1257 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1263 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
1265 if (test_bit(B_WRITING, &b->state)) { in dm_bufio_write_dirty_buffers()
1268 b->hold_count++; in dm_bufio_write_dirty_buffers()
1270 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1273 b->hold_count--; in dm_bufio_write_dirty_buffers()
1275 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1279 if (!test_bit(B_DIRTY, &b->state) && in dm_bufio_write_dirty_buffers()
1280 !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
1281 __relink_lru(b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
1350 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) in dm_bufio_release_move() argument
1352 struct dm_bufio_client *c = b->c; in dm_bufio_release_move()
1376 BUG_ON(!b->hold_count); in dm_bufio_release_move()
1377 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_release_move()
1379 __write_dirty_buffer(b, NULL); in dm_bufio_release_move()
1380 if (b->hold_count == 1) { in dm_bufio_release_move()
1381 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1383 set_bit(B_DIRTY, &b->state); in dm_bufio_release_move()
1384 __unlink_buffer(b); in dm_bufio_release_move()
1385 __link_buffer(b, new_block, LIST_DIRTY); in dm_bufio_release_move()
1388 wait_on_bit_lock_io(&b->state, B_WRITING, in dm_bufio_release_move()
1397 old_block = b->block; in dm_bufio_release_move()
1398 __unlink_buffer(b); in dm_bufio_release_move()
1399 __link_buffer(b, new_block, b->list_mode); in dm_bufio_release_move()
1400 submit_io(b, WRITE, new_block, write_endio); in dm_bufio_release_move()
1401 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1403 __unlink_buffer(b); in dm_bufio_release_move()
1404 __link_buffer(b, old_block, b->list_mode); in dm_bufio_release_move()
1408 dm_bufio_release(b); in dm_bufio_release_move()
1420 struct dm_buffer *b; in dm_bufio_forget() local
1424 b = __find(c, block); in dm_bufio_forget()
1425 if (b && likely(!b->hold_count) && likely(!b->state)) { in dm_bufio_forget()
1426 __unlink_buffer(b); in dm_bufio_forget()
1427 __free_buffer_wake(b); in dm_bufio_forget()
1453 sector_t dm_bufio_get_block_number(struct dm_buffer *b) in dm_bufio_get_block_number() argument
1455 return b->block; in dm_bufio_get_block_number()
1459 void *dm_bufio_get_block_data(struct dm_buffer *b) in dm_bufio_get_block_data() argument
1461 return b->data; in dm_bufio_get_block_data()
1465 void *dm_bufio_get_aux_data(struct dm_buffer *b) in dm_bufio_get_aux_data() argument
1467 return b + 1; in dm_bufio_get_aux_data()
1471 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) in dm_bufio_get_client() argument
1473 return b->c; in dm_bufio_get_client()
1479 struct dm_buffer *b; in drop_buffers() local
1492 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1493 __free_buffer_wake(b); in drop_buffers()
1496 list_for_each_entry(b, &c->lru[i], lru_list) { in drop_buffers()
1500 (unsigned long long)b->block, b->hold_count, i); in drop_buffers()
1502 print_stack_trace(&b->stack_trace, 1); in drop_buffers()
1503 b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */ in drop_buffers()
1508 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1509 __free_buffer_wake(b); in drop_buffers()
1526 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) in __try_evict_buffer() argument
1529 if (test_bit(B_READING, &b->state) || in __try_evict_buffer()
1530 test_bit(B_WRITING, &b->state) || in __try_evict_buffer()
1531 test_bit(B_DIRTY, &b->state)) in __try_evict_buffer()
1535 if (b->hold_count) in __try_evict_buffer()
1538 __make_buffer_clean(b); in __try_evict_buffer()
1539 __unlink_buffer(b); in __try_evict_buffer()
1540 __free_buffer_wake(b); in __try_evict_buffer()
1555 struct dm_buffer *b, *tmp; in __scan() local
1562 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1563 if (__try_evict_buffer(b, gfp_mask)) in __scan()
1688 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create() local
1690 if (!b) { in dm_bufio_client_create()
1694 __free_buffer_wake(b); in dm_bufio_client_create()
1714 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create() local
1716 list_del(&b->lru_list); in dm_bufio_client_create()
1717 free_buffer(b); in dm_bufio_client_create()
1751 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy() local
1753 list_del(&b->lru_list); in dm_bufio_client_destroy()
1754 free_buffer(b); in dm_bufio_client_destroy()
1779 static bool older_than(struct dm_buffer *b, unsigned long age_hz) in older_than() argument
1781 return time_after_eq(jiffies, b->last_accessed + age_hz); in older_than()
1786 struct dm_buffer *b, *tmp; in __evict_old_buffers() local
1801 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1805 if (!older_than(b, age_hz)) in __evict_old_buffers()
1808 if (__try_evict_buffer(b, 0)) in __evict_old_buffers()