Lines Matching refs:era
669 uint32_t era; member
685 uint64_t key = d->era; in metadata_digest_remove_writeset()
746 d->era = key; in metadata_digest_lookup_writeset()
1126 uint32_t era; member
1150 s->era = md->current_era; in metadata_get_stats()
1157 struct era { struct
1195 static bool block_size_is_power_of_two(struct era *era) in block_size_is_power_of_two() argument
1197 return era->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
1200 static dm_block_t get_block(struct era *era, struct bio *bio) in get_block() argument
1204 if (!block_size_is_power_of_two(era)) in get_block()
1205 (void) sector_div(block_nr, era->sectors_per_block); in get_block()
1207 block_nr >>= era->sectors_per_block_shift; in get_block()
1212 static void remap_to_origin(struct era *era, struct bio *bio) in remap_to_origin() argument
1214 bio_set_dev(bio, era->origin_dev->bdev); in remap_to_origin()
1220 static void wake_worker(struct era *era) in wake_worker() argument
1222 if (!atomic_read(&era->suspended)) in wake_worker()
1223 queue_work(era->wq, &era->worker); in wake_worker()
1226 static void process_old_eras(struct era *era) in process_old_eras() argument
1230 if (!era->digest.step) in process_old_eras()
1233 r = era->digest.step(era->md, &era->digest); in process_old_eras()
1236 era->digest.step = NULL; in process_old_eras()
1238 } else if (era->digest.step) in process_old_eras()
1239 wake_worker(era); in process_old_eras()
1242 static void process_deferred_bios(struct era *era) in process_deferred_bios() argument
1250 struct writeset *ws = era->md->current_writeset; in process_deferred_bios()
1255 spin_lock(&era->deferred_lock); in process_deferred_bios()
1256 bio_list_merge(&deferred_bios, &era->deferred_bios); in process_deferred_bios()
1257 bio_list_init(&era->deferred_bios); in process_deferred_bios()
1258 spin_unlock(&era->deferred_lock); in process_deferred_bios()
1264 r = writeset_test_and_set(&era->md->bitset_info, ws, in process_deferred_bios()
1265 get_block(era, bio)); in process_deferred_bios()
1279 r = metadata_commit(era->md); in process_deferred_bios()
1295 set_bit(get_block(era, bio), ws->bits); in process_deferred_bios()
1302 static void process_rpc_calls(struct era *era) in process_rpc_calls() argument
1310 spin_lock(&era->rpc_lock); in process_rpc_calls()
1311 list_splice_init(&era->rpc_calls, &calls); in process_rpc_calls()
1312 spin_unlock(&era->rpc_lock); in process_rpc_calls()
1315 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); in process_rpc_calls()
1320 r = metadata_commit(era->md); in process_rpc_calls()
1330 static void kick_off_digest(struct era *era) in kick_off_digest() argument
1332 if (era->md->archived_writesets) { in kick_off_digest()
1333 era->md->archived_writesets = false; in kick_off_digest()
1334 metadata_digest_start(era->md, &era->digest); in kick_off_digest()
1340 struct era *era = container_of(ws, struct era, worker); in do_work() local
1342 kick_off_digest(era); in do_work()
1343 process_old_eras(era); in do_work()
1344 process_deferred_bios(era); in do_work()
1345 process_rpc_calls(era); in do_work()
1348 static void defer_bio(struct era *era, struct bio *bio) in defer_bio() argument
1350 spin_lock(&era->deferred_lock); in defer_bio()
1351 bio_list_add(&era->deferred_bios, bio); in defer_bio()
1352 spin_unlock(&era->deferred_lock); in defer_bio()
1354 wake_worker(era); in defer_bio()
1360 static int perform_rpc(struct era *era, struct rpc *rpc) in perform_rpc() argument
1365 spin_lock(&era->rpc_lock); in perform_rpc()
1366 list_add(&rpc->list, &era->rpc_calls); in perform_rpc()
1367 spin_unlock(&era->rpc_lock); in perform_rpc()
1369 wake_worker(era); in perform_rpc()
1375 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *)) in in_worker0() argument
1381 return perform_rpc(era, &rpc); in in_worker0()
1384 static int in_worker1(struct era *era, in in_worker1() argument
1392 return perform_rpc(era, &rpc); in in_worker1()
1395 static void start_worker(struct era *era) in start_worker() argument
1397 atomic_set(&era->suspended, 0); in start_worker()
1400 static void stop_worker(struct era *era) in stop_worker() argument
1402 atomic_set(&era->suspended, 1); in stop_worker()
1403 drain_workqueue(era->wq); in stop_worker()
1409 static void era_destroy(struct era *era) in era_destroy() argument
1411 if (era->md) in era_destroy()
1412 metadata_close(era->md); in era_destroy()
1414 if (era->wq) in era_destroy()
1415 destroy_workqueue(era->wq); in era_destroy()
1417 if (era->origin_dev) in era_destroy()
1418 dm_put_device(era->ti, era->origin_dev); in era_destroy()
1420 if (era->metadata_dev) in era_destroy()
1421 dm_put_device(era->ti, era->metadata_dev); in era_destroy()
1423 kfree(era); in era_destroy()
1426 static dm_block_t calc_nr_blocks(struct era *era) in calc_nr_blocks() argument
1428 return dm_sector_div_up(era->ti->len, era->sectors_per_block); in calc_nr_blocks()
1446 struct era *era; in era_ctr() local
1454 era = kzalloc(sizeof(*era), GFP_KERNEL); in era_ctr()
1455 if (!era) { in era_ctr()
1460 era->ti = ti; in era_ctr()
1462 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); in era_ctr()
1465 era_destroy(era); in era_ctr()
1469 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); in era_ctr()
1472 era_destroy(era); in era_ctr()
1476 r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); in era_ctr()
1479 era_destroy(era); in era_ctr()
1483 r = dm_set_target_max_io_len(ti, era->sectors_per_block); in era_ctr()
1486 era_destroy(era); in era_ctr()
1490 if (!valid_block_size(era->sectors_per_block)) { in era_ctr()
1492 era_destroy(era); in era_ctr()
1495 if (era->sectors_per_block & (era->sectors_per_block - 1)) in era_ctr()
1496 era->sectors_per_block_shift = -1; in era_ctr()
1498 era->sectors_per_block_shift = __ffs(era->sectors_per_block); in era_ctr()
1500 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); in era_ctr()
1503 era_destroy(era); in era_ctr()
1506 era->md = md; in era_ctr()
1508 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in era_ctr()
1509 if (!era->wq) { in era_ctr()
1511 era_destroy(era); in era_ctr()
1514 INIT_WORK(&era->worker, do_work); in era_ctr()
1516 spin_lock_init(&era->deferred_lock); in era_ctr()
1517 bio_list_init(&era->deferred_bios); in era_ctr()
1519 spin_lock_init(&era->rpc_lock); in era_ctr()
1520 INIT_LIST_HEAD(&era->rpc_calls); in era_ctr()
1522 ti->private = era; in era_ctr()
1538 struct era *era = ti->private; in era_map() local
1539 dm_block_t block = get_block(era, bio); in era_map()
1546 remap_to_origin(era, bio); in era_map()
1553 !metadata_current_marked(era->md, block)) { in era_map()
1554 defer_bio(era, bio); in era_map()
1564 struct era *era = ti->private; in era_postsuspend() local
1566 r = in_worker0(era, metadata_era_archive); in era_postsuspend()
1572 stop_worker(era); in era_postsuspend()
1574 r = metadata_commit(era->md); in era_postsuspend()
1584 struct era *era = ti->private; in era_preresume() local
1585 dm_block_t new_size = calc_nr_blocks(era); in era_preresume()
1587 if (era->nr_blocks != new_size) { in era_preresume()
1588 r = metadata_resize(era->md, &new_size); in era_preresume()
1594 r = metadata_commit(era->md); in era_preresume()
1600 era->nr_blocks = new_size; in era_preresume()
1603 start_worker(era); in era_preresume()
1605 r = in_worker0(era, metadata_era_rollover); in era_preresume()
1624 struct era *era = ti->private; in era_status() local
1631 r = in_worker1(era, metadata_get_stats, &stats); in era_status()
1639 (unsigned) stats.era); in era_status()
1648 format_dev_t(buf, era->metadata_dev->bdev->bd_dev); in era_status()
1650 format_dev_t(buf, era->origin_dev->bdev->bd_dev); in era_status()
1651 DMEMIT("%s %u", buf, era->sectors_per_block); in era_status()
1668 struct era *era = ti->private; in era_message() local
1676 return in_worker0(era, metadata_checkpoint); in era_message()
1679 return in_worker0(era, metadata_take_snap); in era_message()
1682 return in_worker0(era, metadata_drop_snap); in era_message()
1696 struct era *era = ti->private; in era_iterate_devices() local
1697 return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); in era_iterate_devices()
1702 struct era *era = ti->private; in era_io_hints() local
1709 if (io_opt_sectors < era->sectors_per_block || in era_io_hints()
1710 do_div(io_opt_sectors, era->sectors_per_block)) { in era_io_hints()
1712 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); in era_io_hints()