Lines Matching refs:bc
171 static int split_range(struct bow_context *bc, struct bow_range **br, in split_range() argument
190 list_add(&leading_br->trimmed_list, &bc->trimmed_list); in split_range()
192 add_before(&bc->ranges, leading_br, *br); in split_range()
209 add_before(&bc->ranges, new_br, *br); in split_range()
220 static void set_type(struct bow_context *bc, struct bow_range **br, int type) in set_type() argument
228 bc->trims_total -= range_size(*br); in set_type()
233 bc->trims_total += range_size(*br); in set_type()
234 list_add(&(*br)->trimmed_list, &bc->trimmed_list); in set_type()
242 rb_erase(&next->node, &bc->ranges); in set_type()
249 rb_erase(&(*br)->node, &bc->ranges); in set_type()
256 static struct bow_range *find_free_range(struct bow_context *bc) in find_free_range() argument
258 if (list_empty(&bc->trimmed_list)) { in find_free_range()
263 return list_first_entry(&bc->trimmed_list, struct bow_range, in find_free_range()
267 static sector_t sector_to_page(struct bow_context const *bc, sector_t sector) in sector_to_page() argument
269 WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1)) in sector_to_page()
271 return sector >> (bc->block_shift - SECTOR_SHIFT); in sector_to_page()
274 static int copy_data(struct bow_context const *bc, in copy_data() argument
286 *checksum = sector_to_page(bc, source->sector); in copy_data()
288 for (i = 0; i < range_size(source) >> bc->block_shift; ++i) { in copy_data()
291 sector_t page = sector_to_page(bc, source->sector) + i; in copy_data()
293 read = dm_bufio_read(bc->bufio, page, &read_buffer); in copy_data()
301 *checksum = crc32(*checksum, read, bc->block_size); in copy_data()
303 write = dm_bufio_new(bc->bufio, in copy_data()
304 sector_to_page(bc, dest->sector) + i, in copy_data()
312 memcpy(write, read, bc->block_size); in copy_data()
319 dm_bufio_write_dirty_buffers(bc->bufio); in copy_data()
325 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
328 static int backup_log_sector(struct bow_context *bc) in backup_log_sector() argument
335 first_br = container_of(rb_first(&bc->ranges), struct bow_range, node); in backup_log_sector()
342 if (range_size(first_br) != bc->block_size) { in backup_log_sector()
347 free_br = find_free_range(bc); in backup_log_sector()
352 bi_iter.bi_size = bc->block_size; in backup_log_sector()
353 ret = split_range(bc, &free_br, &bi_iter); in backup_log_sector()
356 if (bi_iter.bi_size != bc->block_size) { in backup_log_sector()
361 ret = copy_data(bc, first_br, free_br, &checksum); in backup_log_sector()
365 bc->log_sector->count = 0; in backup_log_sector()
366 bc->log_sector->sequence++; in backup_log_sector()
367 ret = add_log_entry(bc, first_br->sector, free_br->sector, in backup_log_sector()
372 set_type(bc, &free_br, BACKUP); in backup_log_sector()
376 static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest, in add_log_entry() argument
383 + sizeof(struct log_entry) * (bc->log_sector->count + 1) in add_log_entry()
384 > bc->block_size) { in add_log_entry()
385 int ret = backup_log_sector(bc); in add_log_entry()
391 sector = dm_bufio_new(bc->bufio, 0, §or_buffer); in add_log_entry()
398 bc->log_sector->entries[bc->log_sector->count].source = source; in add_log_entry()
399 bc->log_sector->entries[bc->log_sector->count].dest = dest; in add_log_entry()
400 bc->log_sector->entries[bc->log_sector->count].size = size; in add_log_entry()
401 bc->log_sector->entries[bc->log_sector->count].checksum = checksum; in add_log_entry()
402 bc->log_sector->count++; in add_log_entry()
404 memcpy(sector, bc->log_sector, bc->block_size); in add_log_entry()
407 dm_bufio_write_dirty_buffers(bc->bufio); in add_log_entry()
411 static int prepare_log(struct bow_context *bc) in prepare_log() argument
419 first_br = container_of(rb_first(&bc->ranges), struct bow_range, node); in prepare_log()
425 if (range_size(first_br) < bc->block_size) { in prepare_log()
430 bi_iter.bi_size = bc->block_size; in prepare_log()
431 ret = split_range(bc, &first_br, &bi_iter); in prepare_log()
435 if (range_size(first_br) != bc->block_size) { in prepare_log()
441 free_br = find_free_range(bc); in prepare_log()
445 bi_iter.bi_size = bc->block_size; in prepare_log()
446 ret = split_range(bc, &free_br, &bi_iter); in prepare_log()
451 ret = copy_data(bc, first_br, free_br, NULL); in prepare_log()
455 bc->log_sector->sector0 = free_br->sector; in prepare_log()
457 set_type(bc, &free_br, SECTOR0_CURRENT); in prepare_log()
460 free_br = find_free_range(bc); in prepare_log()
464 bi_iter.bi_size = bc->block_size; in prepare_log()
465 ret = split_range(bc, &free_br, &bi_iter); in prepare_log()
470 ret = copy_data(bc, first_br, free_br, &checksum); in prepare_log()
478 bc->log_sector->magic = MAGIC; in prepare_log()
479 bc->log_sector->header_version = HEADER_VERSION; in prepare_log()
480 bc->log_sector->header_size = sizeof(*bc->log_sector); in prepare_log()
481 bc->log_sector->block_size = bc->block_size; in prepare_log()
482 bc->log_sector->count = 0; in prepare_log()
483 bc->log_sector->sequence = 0; in prepare_log()
486 ret = add_log_entry(bc, first_br->sector, free_br->sector, in prepare_log()
491 set_type(bc, &free_br, BACKUP); in prepare_log()
495 static struct bow_range *find_sector0_current(struct bow_context *bc) in find_sector0_current() argument
499 bi_iter.bi_sector = bc->log_sector->sector0; in find_sector0_current()
500 bi_iter.bi_size = bc->block_size; in find_sector0_current()
501 return find_first_overlapping_range(&bc->ranges, &bi_iter); in find_sector0_current()
509 struct bow_context *bc = container_of(kobj, struct bow_context, in state_show() local
512 return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state)); in state_show()
518 struct bow_context *bc = container_of(kobj, struct bow_context, in state_store() local
529 mutex_lock(&bc->ranges_lock); in state_store()
530 original_state = atomic_read(&bc->state); in state_store()
542 ret = prepare_log(bc); in state_store()
548 struct bow_range *br = find_sector0_current(bc); in state_store()
550 container_of(rb_first(&bc->ranges), struct bow_range, in state_store()
553 ret = copy_data(bc, br, sector0_br, 0); in state_store()
559 atomic_inc(&bc->state); in state_store()
563 mutex_unlock(&bc->ranges_lock); in state_store()
570 struct bow_context *bc = container_of(kobj, struct bow_context, in free_show() local
574 mutex_lock(&bc->ranges_lock); in free_show()
575 trims_total = bc->trims_total; in free_show()
576 mutex_unlock(&bc->ranges_lock); in free_show()
600 struct bow_context *bc = (struct bow_context *) ti->private; in dm_bow_dtr() local
603 if (bc->workqueue) in dm_bow_dtr()
604 destroy_workqueue(bc->workqueue); in dm_bow_dtr()
605 if (bc->bufio) in dm_bow_dtr()
606 dm_bufio_client_destroy(bc->bufio); in dm_bow_dtr()
608 kobj = &bc->kobj_holder.kobj; in dm_bow_dtr()
614 mutex_lock(&bc->ranges_lock); in dm_bow_dtr()
615 while (rb_first(&bc->ranges)) { in dm_bow_dtr()
616 struct bow_range *br = container_of(rb_first(&bc->ranges), in dm_bow_dtr()
619 rb_erase(&br->node, &bc->ranges); in dm_bow_dtr()
622 mutex_unlock(&bc->ranges_lock); in dm_bow_dtr()
624 mutex_destroy(&bc->ranges_lock); in dm_bow_dtr()
625 kfree(bc->log_sector); in dm_bow_dtr()
626 kfree(bc); in dm_bow_dtr()
631 struct bow_context *bc = ti->private; in dm_bow_io_hints() local
632 const unsigned int block_size = bc->block_size; in dm_bow_io_hints()
644 bc->forward_trims = false; in dm_bow_io_hints()
647 bc->forward_trims = true; in dm_bow_io_hints()
653 struct bow_context *bc = ti->private; in dm_bow_ctr_optional() local
678 &bc->block_size, &dummy) == 1) { in dm_bow_ctr_optional()
679 if (bc->block_size < SECTOR_SIZE || in dm_bow_ctr_optional()
680 bc->block_size > 4096 || in dm_bow_ctr_optional()
681 !is_power_of_2(bc->block_size)) { in dm_bow_ctr_optional()
696 struct bow_context *bc; in dm_bow_ctr() local
705 bc = kzalloc(sizeof(*bc), GFP_KERNEL); in dm_bow_ctr()
706 if (!bc) { in dm_bow_ctr()
714 ti->private = bc; in dm_bow_ctr()
717 &bc->dev); in dm_bow_ctr()
723 bc->block_size = in dm_bow_ctr()
724 bdev_get_queue(bc->dev->bdev)->limits.logical_block_size; in dm_bow_ctr()
731 bc->block_shift = ilog2(bc->block_size); in dm_bow_ctr()
732 bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL); in dm_bow_ctr()
733 if (!bc->log_sector) { in dm_bow_ctr()
738 init_completion(&bc->kobj_holder.completion); in dm_bow_ctr()
739 mutex_init(&bc->ranges_lock); in dm_bow_ctr()
740 bc->ranges = RB_ROOT; in dm_bow_ctr()
741 bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0, in dm_bow_ctr()
743 if (IS_ERR(bc->bufio)) { in dm_bow_ctr()
745 ret = PTR_ERR(bc->bufio); in dm_bow_ctr()
746 bc->bufio = NULL; in dm_bow_ctr()
750 bc->workqueue = alloc_workqueue("dm-bow", in dm_bow_ctr()
753 if (!bc->workqueue) { in dm_bow_ctr()
759 INIT_LIST_HEAD(&bc->trimmed_list); in dm_bow_ctr()
770 rb_link_node(&br->node, NULL, &bc->ranges.rb_node); in dm_bow_ctr()
771 rb_insert_color(&br->node, &bc->ranges); in dm_bow_ctr()
782 rb_link_node(&br->node, bc->ranges.rb_node, in dm_bow_ctr()
783 &bc->ranges.rb_node->rb_left); in dm_bow_ctr()
784 rb_insert_color(&br->node, &bc->ranges); in dm_bow_ctr()
798 struct bow_context *bc = ti->private; in dm_bow_resume() local
801 if (bc->kobj_holder.kobj.state_initialized) in dm_bow_resume()
804 ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype, in dm_bow_resume()
813 static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br, in prepare_unchanged_range() argument
827 backup_br = find_free_range(bc); in prepare_unchanged_range()
834 ret = split_range(bc, &backup_br, &backup_bi); in prepare_unchanged_range()
843 ret = split_range(bc, &br, bi_iter); in prepare_unchanged_range()
853 ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL); in prepare_unchanged_range()
869 bc->trims_total -= range_size(backup_br); in prepare_unchanged_range()
875 set_type(bc, &backup_br, backup_br->type); in prepare_unchanged_range()
881 ret = add_log_entry(bc, log_source, log_dest, log_size, checksum); in prepare_unchanged_range()
889 bc->log_sector->sector0 = sector0; in prepare_unchanged_range()
891 set_type(bc, &br, br->type); in prepare_unchanged_range()
895 static int prepare_free_range(struct bow_context *bc, struct bow_range *br, in prepare_free_range() argument
900 ret = split_range(bc, &br, bi_iter); in prepare_free_range()
903 set_type(bc, &br, CHANGED); in prepare_free_range()
907 static int prepare_changed_range(struct bow_context *bc, struct bow_range *br, in prepare_changed_range() argument
914 static int prepare_one_range(struct bow_context *bc, in prepare_one_range() argument
917 struct bow_range *br = find_first_overlapping_range(&bc->ranges, in prepare_one_range()
921 return prepare_changed_range(bc, br, bi_iter); in prepare_one_range()
924 return prepare_free_range(bc, br, bi_iter); in prepare_one_range()
928 return prepare_unchanged_range(bc, br, bi_iter, true); in prepare_one_range()
935 return prepare_unchanged_range(bc, br, bi_iter, false); in prepare_one_range()
947 struct bow_context *bc; member
954 struct bow_context *bc = ww->bc; in bow_write() local
961 mutex_lock(&bc->ranges_lock); in bow_write()
963 ret = prepare_one_range(bc, &bi_iter); in bow_write()
970 mutex_unlock(&bc->ranges_lock); in bow_write()
973 bio_set_dev(bio, bc->dev->bdev); in bow_write()
982 static int queue_write(struct bow_context *bc, struct bio *bio) in queue_write() argument
992 ww->bc = bc; in queue_write()
994 queue_work(bc->workqueue, &ww->work); in queue_write()
998 static int handle_sector0(struct bow_context *bc, struct bio *bio) in handle_sector0() argument
1002 if (bio->bi_iter.bi_size > bc->block_size) { in handle_sector0()
1004 bc->block_size >> SECTOR_SHIFT, in handle_sector0()
1015 split->bi_iter.bi_sector = bc->log_sector->sector0; in handle_sector0()
1016 bio_set_dev(split, bc->dev->bdev); in handle_sector0()
1020 ret = queue_write(bc, bio); in handle_sector0()
1022 bio->bi_iter.bi_sector = bc->log_sector->sector0; in handle_sector0()
1028 static int add_trim(struct bow_context *bc, struct bio *bio) in add_trim() argument
1038 br = find_first_overlapping_range(&bc->ranges, &bi_iter); in add_trim()
1042 if (!split_range(bc, &br, &bi_iter)) in add_trim()
1043 set_type(bc, &br, TRIMMED); in add_trim()
1067 static int remove_trim(struct bow_context *bc, struct bio *bio) in remove_trim() argument
1077 br = find_first_overlapping_range(&bc->ranges, &bi_iter); in remove_trim()
1085 if (!split_range(bc, &br, &bi_iter)) in remove_trim()
1086 set_type(bc, &br, UNCHANGED); in remove_trim()
1105 int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio) in remap_unless_illegal_trim() argument
1107 if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) { in remap_unless_illegal_trim()
1112 bio_set_dev(bio, bc->dev->bdev); in remap_unless_illegal_trim()
1122 struct bow_context *bc = ti->private; in dm_bow_map() local
1124 if (likely(bc->state.counter == COMMITTED)) in dm_bow_map()
1125 return remap_unless_illegal_trim(bc, bio); in dm_bow_map()
1128 return remap_unless_illegal_trim(bc, bio); in dm_bow_map()
1130 if (atomic_read(&bc->state) != COMMITTED) { in dm_bow_map()
1133 mutex_lock(&bc->ranges_lock); in dm_bow_map()
1134 state = atomic_read(&bc->state); in dm_bow_map()
1137 ret = add_trim(bc, bio); in dm_bow_map()
1139 ret = remove_trim(bc, bio); in dm_bow_map()
1144 ret = handle_sector0(bc, bio); in dm_bow_map()
1146 ret = queue_write(bc, bio); in dm_bow_map()
1152 mutex_unlock(&bc->ranges_lock); in dm_bow_map()
1156 return remap_unless_illegal_trim(bc, bio); in dm_bow_map()
1165 struct bow_context *bc = ti->private; in dm_bow_tablestatus() local
1175 list_for_each_entry(br, &bc->trimmed_list, trimmed_list) in dm_bow_tablestatus()
1184 if (!rb_first(&bc->ranges)) { in dm_bow_tablestatus()
1189 if (container_of(rb_first(&bc->ranges), struct bow_range, node) in dm_bow_tablestatus()
1196 mutex_lock(&bc->ranges_lock); in dm_bow_tablestatus()
1197 for (i = rb_first(&bc->ranges); i; i = rb_next(i)) { in dm_bow_tablestatus()
1219 if (&br->node != rb_last(&bc->ranges)) { in dm_bow_tablestatus()
1245 mutex_unlock(&bc->ranges_lock); in dm_bow_tablestatus()
1267 struct bow_context *bc = ti->private; in dm_bow_prepare_ioctl() local
1268 struct dm_dev *dev = bc->dev; in dm_bow_prepare_ioctl()
1278 struct bow_context *bc = ti->private; in dm_bow_iterate_devices() local
1280 return fn(ti, bc->dev, 0, ti->len, data); in dm_bow_iterate_devices()