• Home
  • Raw
  • Download

Lines Matching refs:b

242 static void buffer_record_stack(struct dm_buffer *b)  in buffer_record_stack()  argument
244 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); in buffer_record_stack()
254 struct dm_buffer *b; in __find() local
257 b = container_of(n, struct dm_buffer, node); in __find()
259 if (b->block == block) in __find()
260 return b; in __find()
262 n = block < b->block ? n->rb_left : n->rb_right; in __find()
271 struct dm_buffer *b; in __find_next() local
275 b = container_of(n, struct dm_buffer, node); in __find_next()
277 if (b->block == block) in __find_next()
278 return b; in __find_next()
280 if (block <= b->block) { in __find_next()
282 best = b; in __find_next()
291 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) in __insert() argument
299 if (found->block == b->block) { in __insert()
300 BUG_ON(found != b); in __insert()
305 new = b->block < found->block ? in __insert()
309 rb_link_node(&b->node, parent, new); in __insert()
310 rb_insert_color(&b->node, &c->buffer_tree); in __insert()
313 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) in __remove() argument
315 rb_erase(&b->node, &c->buffer_tree); in __remove()
320 static void adjust_total_allocated(struct dm_buffer *b, bool unlink) in adjust_total_allocated() argument
331 data_mode = b->data_mode; in adjust_total_allocated()
332 diff = (long)b->c->block_size; in adjust_total_allocated()
345 b->accessed = 1; in adjust_total_allocated()
348 list_add(&b->global_list, &global_queue); in adjust_total_allocated()
353 list_del(&b->global_list); in adjust_total_allocated()
470 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); in alloc_buffer() local
472 if (!b) in alloc_buffer()
475 b->c = c; in alloc_buffer()
477 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer()
478 if (!b->data) { in alloc_buffer()
479 kmem_cache_free(c->slab_buffer, b); in alloc_buffer()
484 b->stack_len = 0; in alloc_buffer()
486 return b; in alloc_buffer()
492 static void free_buffer(struct dm_buffer *b) in free_buffer() argument
494 struct dm_bufio_client *c = b->c; in free_buffer()
496 free_buffer_data(c, b->data, b->data_mode); in free_buffer()
497 kmem_cache_free(c->slab_buffer, b); in free_buffer()
503 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) in __link_buffer() argument
505 struct dm_bufio_client *c = b->c; in __link_buffer()
508 b->block = block; in __link_buffer()
509 b->list_mode = dirty; in __link_buffer()
510 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer()
511 __insert(b->c, b); in __link_buffer()
512 b->last_accessed = jiffies; in __link_buffer()
514 adjust_total_allocated(b, false); in __link_buffer()
520 static void __unlink_buffer(struct dm_buffer *b) in __unlink_buffer() argument
522 struct dm_bufio_client *c = b->c; in __unlink_buffer()
524 BUG_ON(!c->n_buffers[b->list_mode]); in __unlink_buffer()
526 c->n_buffers[b->list_mode]--; in __unlink_buffer()
527 __remove(b->c, b); in __unlink_buffer()
528 list_del(&b->lru_list); in __unlink_buffer()
530 adjust_total_allocated(b, true); in __unlink_buffer()
536 static void __relink_lru(struct dm_buffer *b, int dirty) in __relink_lru() argument
538 struct dm_bufio_client *c = b->c; in __relink_lru()
540 b->accessed = 1; in __relink_lru()
542 BUG_ON(!c->n_buffers[b->list_mode]); in __relink_lru()
544 c->n_buffers[b->list_mode]--; in __relink_lru()
546 b->list_mode = dirty; in __relink_lru()
547 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru()
548 b->last_accessed = jiffies; in __relink_lru()
575 struct dm_buffer *b = context; in dmio_complete() local
577 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); in dmio_complete()
580 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, in use_dmio() argument
588 .notify.context = b, in use_dmio()
589 .client = b->c->dm_io, in use_dmio()
592 .bdev = b->c->bdev, in use_dmio()
597 if (b->data_mode != DATA_MODE_VMALLOC) { in use_dmio()
599 io_req.mem.ptr.addr = (char *)b->data + offset; in use_dmio()
602 io_req.mem.ptr.vma = (char *)b->data + offset; in use_dmio()
607 b->end_io(b, errno_to_blk_status(r)); in use_dmio()
612 struct dm_buffer *b = bio->bi_private; in bio_complete() local
615 b->end_io(b, status); in bio_complete()
618 static void use_bio(struct dm_buffer *b, int rw, sector_t sector, in use_bio() argument
625 vec_size = b->c->block_size >> PAGE_SHIFT; in use_bio()
626 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) in use_bio()
632 use_dmio(b, rw, sector, n_sectors, offset); in use_bio()
637 bio_set_dev(bio, b->c->bdev); in use_bio()
640 bio->bi_private = b; in use_bio()
642 ptr = (char *)b->data + offset; in use_bio()
673 static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t)) in submit_io() argument
679 b->end_io = end_io; in submit_io()
681 sector = block_to_sector(b->c, b->block); in submit_io()
684 n_sectors = b->c->block_size >> SECTOR_SHIFT; in submit_io()
687 if (b->c->write_callback) in submit_io()
688 b->c->write_callback(b); in submit_io()
689 offset = b->write_start; in submit_io()
690 end = b->write_end; in submit_io()
694 if (unlikely(end > b->c->block_size)) in submit_io()
695 end = b->c->block_size; in submit_io()
701 if (b->data_mode != DATA_MODE_VMALLOC) in submit_io()
702 use_bio(b, rw, sector, n_sectors, offset); in submit_io()
704 use_dmio(b, rw, sector, n_sectors, offset); in submit_io()
717 static void write_endio(struct dm_buffer *b, blk_status_t status) in write_endio() argument
719 b->write_error = status; in write_endio()
721 struct dm_bufio_client *c = b->c; in write_endio()
727 BUG_ON(!test_bit(B_WRITING, &b->state)); in write_endio()
730 clear_bit(B_WRITING, &b->state); in write_endio()
733 wake_up_bit(&b->state, B_WRITING); in write_endio()
745 static void __write_dirty_buffer(struct dm_buffer *b, in __write_dirty_buffer() argument
748 if (!test_bit(B_DIRTY, &b->state)) in __write_dirty_buffer()
751 clear_bit(B_DIRTY, &b->state); in __write_dirty_buffer()
752 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __write_dirty_buffer()
754 b->write_start = b->dirty_start; in __write_dirty_buffer()
755 b->write_end = b->dirty_end; in __write_dirty_buffer()
758 submit_io(b, REQ_OP_WRITE, write_endio); in __write_dirty_buffer()
760 list_add_tail(&b->write_list, write_list); in __write_dirty_buffer()
768 struct dm_buffer *b = in __flush_write_list() local
770 list_del(&b->write_list); in __flush_write_list()
771 submit_io(b, REQ_OP_WRITE, write_endio); in __flush_write_list()
782 static void __make_buffer_clean(struct dm_buffer *b) in __make_buffer_clean() argument
784 BUG_ON(b->hold_count); in __make_buffer_clean()
786 if (!b->state) /* fast case */ in __make_buffer_clean()
789 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
790 __write_dirty_buffer(b, NULL); in __make_buffer_clean()
791 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); in __make_buffer_clean()
800 struct dm_buffer *b; in __get_unclaimed_buffer() local
802 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer()
803 BUG_ON(test_bit(B_WRITING, &b->state)); in __get_unclaimed_buffer()
804 BUG_ON(test_bit(B_DIRTY, &b->state)); in __get_unclaimed_buffer()
806 if (!b->hold_count) { in __get_unclaimed_buffer()
807 __make_buffer_clean(b); in __get_unclaimed_buffer()
808 __unlink_buffer(b); in __get_unclaimed_buffer()
809 return b; in __get_unclaimed_buffer()
814 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer()
815 BUG_ON(test_bit(B_READING, &b->state)); in __get_unclaimed_buffer()
817 if (!b->hold_count) { in __get_unclaimed_buffer()
818 __make_buffer_clean(b); in __get_unclaimed_buffer()
819 __unlink_buffer(b); in __get_unclaimed_buffer()
820 return b; in __get_unclaimed_buffer()
865 struct dm_buffer *b; in __alloc_buffer_wait_no_callback() local
883 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
884 if (b) in __alloc_buffer_wait_no_callback()
885 return b; in __alloc_buffer_wait_no_callback()
893 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); in __alloc_buffer_wait_no_callback()
895 if (b) in __alloc_buffer_wait_no_callback()
896 return b; in __alloc_buffer_wait_no_callback()
901 b = list_entry(c->reserved_buffers.next, in __alloc_buffer_wait_no_callback()
903 list_del(&b->lru_list); in __alloc_buffer_wait_no_callback()
906 return b; in __alloc_buffer_wait_no_callback()
909 b = __get_unclaimed_buffer(c); in __alloc_buffer_wait_no_callback()
910 if (b) in __alloc_buffer_wait_no_callback()
911 return b; in __alloc_buffer_wait_no_callback()
919 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); in __alloc_buffer_wait() local
921 if (!b) in __alloc_buffer_wait()
925 c->alloc_callback(b); in __alloc_buffer_wait()
927 return b; in __alloc_buffer_wait()
933 static void __free_buffer_wake(struct dm_buffer *b) in __free_buffer_wake() argument
935 struct dm_bufio_client *c = b->c; in __free_buffer_wake()
938 free_buffer(b); in __free_buffer_wake()
940 list_add(&b->lru_list, &c->reserved_buffers); in __free_buffer_wake()
950 struct dm_buffer *b, *tmp; in __write_dirty_buffers_async() local
952 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async()
953 BUG_ON(test_bit(B_READING, &b->state)); in __write_dirty_buffers_async()
955 if (!test_bit(B_DIRTY, &b->state) && in __write_dirty_buffers_async()
956 !test_bit(B_WRITING, &b->state)) { in __write_dirty_buffers_async()
957 __relink_lru(b, LIST_CLEAN); in __write_dirty_buffers_async()
961 if (no_wait && test_bit(B_WRITING, &b->state)) in __write_dirty_buffers_async()
964 __write_dirty_buffer(b, write_list); in __write_dirty_buffers_async()
989 struct dm_buffer *b, *new_b = NULL; in __bufio_new() local
993 b = __find(c, block); in __bufio_new()
994 if (b) in __bufio_new()
1008 b = __find(c, block); in __bufio_new()
1009 if (b) { in __bufio_new()
1016 b = new_b; in __bufio_new()
1017 b->hold_count = 1; in __bufio_new()
1018 b->read_error = 0; in __bufio_new()
1019 b->write_error = 0; in __bufio_new()
1020 __link_buffer(b, block, LIST_CLEAN); in __bufio_new()
1023 b->state = 0; in __bufio_new()
1024 return b; in __bufio_new()
1027 b->state = 1 << B_READING; in __bufio_new()
1030 return b; in __bufio_new()
1042 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) in __bufio_new()
1045 b->hold_count++; in __bufio_new()
1046 __relink_lru(b, test_bit(B_DIRTY, &b->state) || in __bufio_new()
1047 test_bit(B_WRITING, &b->state)); in __bufio_new()
1048 return b; in __bufio_new()
1055 static void read_endio(struct dm_buffer *b, blk_status_t status) in read_endio() argument
1057 b->read_error = status; in read_endio()
1059 BUG_ON(!test_bit(B_READING, &b->state)); in read_endio()
1062 clear_bit(B_READING, &b->state); in read_endio()
1065 wake_up_bit(&b->state, B_READING); in read_endio()
1078 struct dm_buffer *b; in new_read() local
1083 b = __bufio_new(c, block, nf, &need_submit, &write_list); in new_read()
1085 if (b && b->hold_count == 1) in new_read()
1086 buffer_record_stack(b); in new_read()
1092 if (!b) in new_read()
1096 submit_io(b, REQ_OP_READ, read_endio); in new_read()
1098 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); in new_read()
1100 if (b->read_error) { in new_read()
1101 int error = blk_status_to_errno(b->read_error); in new_read()
1103 dm_bufio_release(b); in new_read()
1108 *bp = b; in new_read()
1110 return b->data; in new_read()
1152 struct dm_buffer *b; in dm_bufio_prefetch() local
1153 b = __bufio_new(c, block, NF_PREFETCH, &need_submit, in dm_bufio_prefetch()
1162 if (unlikely(b != NULL)) { in dm_bufio_prefetch()
1166 submit_io(b, REQ_OP_READ, read_endio); in dm_bufio_prefetch()
1167 dm_bufio_release(b); in dm_bufio_prefetch()
1184 void dm_bufio_release(struct dm_buffer *b) in dm_bufio_release() argument
1186 struct dm_bufio_client *c = b->c; in dm_bufio_release()
1190 BUG_ON(!b->hold_count); in dm_bufio_release()
1192 b->hold_count--; in dm_bufio_release()
1193 if (!b->hold_count) { in dm_bufio_release()
1201 if ((b->read_error || b->write_error) && in dm_bufio_release()
1202 !test_bit(B_READING, &b->state) && in dm_bufio_release()
1203 !test_bit(B_WRITING, &b->state) && in dm_bufio_release()
1204 !test_bit(B_DIRTY, &b->state)) { in dm_bufio_release()
1205 __unlink_buffer(b); in dm_bufio_release()
1206 __free_buffer_wake(b); in dm_bufio_release()
1214 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, in dm_bufio_mark_partial_buffer_dirty() argument
1217 struct dm_bufio_client *c = b->c; in dm_bufio_mark_partial_buffer_dirty()
1220 BUG_ON(end > b->c->block_size); in dm_bufio_mark_partial_buffer_dirty()
1224 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_mark_partial_buffer_dirty()
1226 if (!test_and_set_bit(B_DIRTY, &b->state)) { in dm_bufio_mark_partial_buffer_dirty()
1227 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
1228 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
1229 __relink_lru(b, LIST_DIRTY); in dm_bufio_mark_partial_buffer_dirty()
1231 if (start < b->dirty_start) in dm_bufio_mark_partial_buffer_dirty()
1232 b->dirty_start = start; in dm_bufio_mark_partial_buffer_dirty()
1233 if (end > b->dirty_end) in dm_bufio_mark_partial_buffer_dirty()
1234 b->dirty_end = end; in dm_bufio_mark_partial_buffer_dirty()
1241 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) in dm_bufio_mark_buffer_dirty() argument
1243 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); in dm_bufio_mark_buffer_dirty()
1271 struct dm_buffer *b, *tmp; in dm_bufio_write_dirty_buffers() local
1282 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers()
1288 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_write_dirty_buffers()
1290 if (test_bit(B_WRITING, &b->state)) { in dm_bufio_write_dirty_buffers()
1293 b->hold_count++; in dm_bufio_write_dirty_buffers()
1295 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1298 b->hold_count--; in dm_bufio_write_dirty_buffers()
1300 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_write_dirty_buffers()
1304 if (!test_bit(B_DIRTY, &b->state) && in dm_bufio_write_dirty_buffers()
1305 !test_bit(B_WRITING, &b->state)) in dm_bufio_write_dirty_buffers()
1306 __relink_lru(b, LIST_CLEAN); in dm_bufio_write_dirty_buffers()
1399 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) in dm_bufio_release_move() argument
1401 struct dm_bufio_client *c = b->c; in dm_bufio_release_move()
1425 BUG_ON(!b->hold_count); in dm_bufio_release_move()
1426 BUG_ON(test_bit(B_READING, &b->state)); in dm_bufio_release_move()
1428 __write_dirty_buffer(b, NULL); in dm_bufio_release_move()
1429 if (b->hold_count == 1) { in dm_bufio_release_move()
1430 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1432 set_bit(B_DIRTY, &b->state); in dm_bufio_release_move()
1433 b->dirty_start = 0; in dm_bufio_release_move()
1434 b->dirty_end = c->block_size; in dm_bufio_release_move()
1435 __unlink_buffer(b); in dm_bufio_release_move()
1436 __link_buffer(b, new_block, LIST_DIRTY); in dm_bufio_release_move()
1439 wait_on_bit_lock_io(&b->state, B_WRITING, in dm_bufio_release_move()
1448 old_block = b->block; in dm_bufio_release_move()
1449 __unlink_buffer(b); in dm_bufio_release_move()
1450 __link_buffer(b, new_block, b->list_mode); in dm_bufio_release_move()
1451 submit_io(b, REQ_OP_WRITE, write_endio); in dm_bufio_release_move()
1452 wait_on_bit_io(&b->state, B_WRITING, in dm_bufio_release_move()
1454 __unlink_buffer(b); in dm_bufio_release_move()
1455 __link_buffer(b, old_block, b->list_mode); in dm_bufio_release_move()
1459 dm_bufio_release(b); in dm_bufio_release_move()
1463 static void forget_buffer_locked(struct dm_buffer *b) in forget_buffer_locked() argument
1465 if (likely(!b->hold_count) && likely(!b->state)) { in forget_buffer_locked()
1466 __unlink_buffer(b); in forget_buffer_locked()
1467 __free_buffer_wake(b); in forget_buffer_locked()
1479 struct dm_buffer *b; in dm_bufio_forget() local
1483 b = __find(c, block); in dm_bufio_forget()
1484 if (b) in dm_bufio_forget()
1485 forget_buffer_locked(b); in dm_bufio_forget()
1493 struct dm_buffer *b; in dm_bufio_forget_buffers() local
1499 b = __find_next(c, block); in dm_bufio_forget_buffers()
1500 if (b) { in dm_bufio_forget_buffers()
1501 block = b->block + 1; in dm_bufio_forget_buffers()
1502 forget_buffer_locked(b); in dm_bufio_forget_buffers()
1507 if (!b) in dm_bufio_forget_buffers()
1547 sector_t dm_bufio_get_block_number(struct dm_buffer *b) in dm_bufio_get_block_number() argument
1549 return b->block; in dm_bufio_get_block_number()
1553 void *dm_bufio_get_block_data(struct dm_buffer *b) in dm_bufio_get_block_data() argument
1555 return b->data; in dm_bufio_get_block_data()
1559 void *dm_bufio_get_aux_data(struct dm_buffer *b) in dm_bufio_get_aux_data() argument
1561 return b + 1; in dm_bufio_get_aux_data()
1565 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) in dm_bufio_get_client() argument
1567 return b->c; in dm_bufio_get_client()
1573 struct dm_buffer *b; in drop_buffers() local
1586 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1587 __free_buffer_wake(b); in drop_buffers()
1590 list_for_each_entry(b, &c->lru[i], lru_list) { in drop_buffers()
1594 (unsigned long long)b->block, b->hold_count, i); in drop_buffers()
1596 stack_trace_print(b->stack_entries, b->stack_len, 1); in drop_buffers()
1598 b->hold_count = 0; in drop_buffers()
1603 while ((b = __get_unclaimed_buffer(c))) in drop_buffers()
1604 __free_buffer_wake(b); in drop_buffers()
1621 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) in __try_evict_buffer() argument
1624 if (test_bit(B_READING, &b->state) || in __try_evict_buffer()
1625 test_bit(B_WRITING, &b->state) || in __try_evict_buffer()
1626 test_bit(B_DIRTY, &b->state)) in __try_evict_buffer()
1630 if (b->hold_count) in __try_evict_buffer()
1633 __make_buffer_clean(b); in __try_evict_buffer()
1634 __unlink_buffer(b); in __try_evict_buffer()
1635 __free_buffer_wake(b); in __try_evict_buffer()
1653 struct dm_buffer *b, *tmp; in __scan() local
1660 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan()
1665 if (__try_evict_buffer(b, GFP_KERNEL)) { in __scan()
1794 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); in dm_bufio_client_create() local
1796 if (!b) { in dm_bufio_client_create()
1800 __free_buffer_wake(b); in dm_bufio_client_create()
1824 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_create() local
1826 list_del(&b->lru_list); in dm_bufio_client_create()
1827 free_buffer(b); in dm_bufio_client_create()
1865 struct dm_buffer *b = list_entry(c->reserved_buffers.next, in dm_bufio_client_destroy() local
1867 list_del(&b->lru_list); in dm_bufio_client_destroy()
1868 free_buffer(b); in dm_bufio_client_destroy()
1902 static bool older_than(struct dm_buffer *b, unsigned long age_hz) in older_than() argument
1904 return time_after_eq(jiffies, b->last_accessed + age_hz); in older_than()
1909 struct dm_buffer *b, *tmp; in __evict_old_buffers() local
1924 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { in __evict_old_buffers()
1928 if (!older_than(b, age_hz)) in __evict_old_buffers()
1931 if (__try_evict_buffer(b, 0)) in __evict_old_buffers()
1944 struct dm_buffer *b; in do_global_cleanup() local
1965 b = list_entry(global_queue.prev, struct dm_buffer, global_list); in do_global_cleanup()
1967 if (b->accessed) { in do_global_cleanup()
1968 b->accessed = 0; in do_global_cleanup()
1969 list_move(&b->global_list, &global_queue); in do_global_cleanup()
1976 current_client = b->c; in do_global_cleanup()
1993 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) { in do_global_cleanup()
1995 list_move(&b->global_list, &global_queue); in do_global_cleanup()