• Home
  • Raw
  • Download

Lines Matching +full:data +full:- +full:mirror

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
9 #include "dm-bio-record.h"
17 #include <linux/device-mapper.h>
18 #include <linux/dm-io.h>
19 #include <linux/dm-dirty-log.h>
20 #include <linux/dm-kcopyd.h>
21 #include <linux/dm-region-hash.h>
33 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
34 #define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
39 *---------------------------------------------------------------
40 * Mirror set structures.
41 *---------------------------------------------------------------
50 struct mirror { struct
81 atomic_t default_mirror; /* Default mirror */
91 struct mirror mirror[]; member
101 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); in wakeup_mirrord()
108 clear_bit(0, &ms->timer_pending); in delayed_wake_fn()
114 if (test_and_set_bit(0, &ms->timer_pending)) in delayed_wake()
117 ms->timer.expires = jiffies + HZ / 5; in delayed_wake()
118 add_timer(&ms->timer); in delayed_wake()
132 bl = (rw == WRITE) ? &ms->writes : &ms->reads; in queue_bio()
133 spin_lock_irqsave(&ms->lock, flags); in queue_bio()
134 should_wake = !(bl->head); in queue_bio()
138 spin_unlock_irqrestore(&ms->lock, flags); in queue_bio()
151 struct mirror *m;
152 /* if details->bi_bdev == NULL, details were not saved */
158 * Every mirror should look like this one.
163 * This is yucky. We squirrel the mirror struct away inside
167 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m()
169 return (struct mirror *) bio->bi_next; in bio_get_m()
172 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m()
174 bio->bi_next = (struct bio *) m; in bio_set_m()
177 static struct mirror *get_default_mirror(struct mirror_set *ms) in get_default_mirror()
179 return &ms->mirror[atomic_read(&ms->default_mirror)]; in get_default_mirror()
182 static void set_default_mirror(struct mirror *m) in set_default_mirror()
184 struct mirror_set *ms = m->ms; in set_default_mirror()
185 struct mirror *m0 = &(ms->mirror[0]); in set_default_mirror()
187 atomic_set(&ms->default_mirror, m - m0); in set_default_mirror()
190 static struct mirror *get_valid_mirror(struct mirror_set *ms) in get_valid_mirror()
192 struct mirror *m; in get_valid_mirror()
194 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) in get_valid_mirror()
195 if (!atomic_read(&m->error_count)) in get_valid_mirror()
202 * @m: mirror device to fail
211 * only if the mirror is in-sync.
215 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) in fail_mirror()
217 struct mirror_set *ms = m->ms; in fail_mirror()
218 struct mirror *new; in fail_mirror()
220 ms->leg_failure = 1; in fail_mirror()
227 atomic_inc(&m->error_count); in fail_mirror()
229 if (test_and_set_bit(error_type, &m->error_type)) in fail_mirror()
238 if (!ms->in_sync && !keep_log(ms)) { in fail_mirror()
241 * than to risk returning corrupt data. in fail_mirror()
243 DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.", in fail_mirror()
244 m->dev->name); in fail_mirror()
252 DMWARN("All sides of mirror have failed."); in fail_mirror()
255 queue_work(dm_raid1_wq, &ms->trigger_event); in fail_mirror()
260 struct mirror_set *ms = ti->private; in mirror_flush()
265 struct mirror *m; in mirror_flush()
270 .client = ms->io_client, in mirror_flush()
273 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { in mirror_flush()
274 io[i].bdev = m->dev->bdev; in mirror_flush()
279 error_bits = -1; in mirror_flush()
280 dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT); in mirror_flush()
282 for (i = 0; i < ms->nr_mirrors; i++) in mirror_flush()
284 fail_mirror(ms->mirror + i, in mirror_flush()
286 return -EIO; in mirror_flush()
293 *---------------------------------------------------------------
296 * When a mirror is first activated we may find that some regions
297 * are in the no-sync state. We have to recover these by
298 * recopying from the default mirror to all the others.
299 *---------------------------------------------------------------
309 /* Read error means the failure of default mirror. */ in recovery_complete()
310 DMERR_LIMIT("Unable to read primary mirror during recovery"); in recovery_complete()
318 * Bits correspond to devices (excluding default mirror). in recovery_complete()
319 * The default mirror cannot change during recovery. in recovery_complete()
321 for (m = 0; m < ms->nr_mirrors; m++) { in recovery_complete()
322 if (&ms->mirror[m] == get_default_mirror(ms)) in recovery_complete()
325 fail_mirror(ms->mirror + m, in recovery_complete()
338 struct mirror *m; in recover()
341 sector_t region_size = dm_rh_get_region_size(ms->rh); in recover()
345 from.bdev = m->dev->bdev; in recover()
346 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
347 if (key == (ms->nr_regions - 1)) { in recover()
352 from.count = ms->ti->len & (region_size - 1); in recover()
359 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { in recover()
360 if (&ms->mirror[i] == get_default_mirror(ms)) in recover()
363 m = ms->mirror + i; in recover()
364 dest->bdev = m->dev->bdev; in recover()
365 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
366 dest->count = from.count; in recover()
374 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, in recover()
382 ms->leg_failure = 0; in reset_ms_flags()
383 for (m = 0; m < ms->nr_mirrors; m++) { in reset_ms_flags()
384 atomic_set(&(ms->mirror[m].error_count), 0); in reset_ms_flags()
385 ms->mirror[m].error_type = 0; in reset_ms_flags()
392 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_recovery()
397 dm_rh_recovery_prepare(ms->rh); in do_recovery()
402 while ((reg = dm_rh_recovery_start(ms->rh))) in do_recovery()
408 if (!ms->in_sync && in do_recovery()
409 (log->type->get_sync_count(log) == ms->nr_regions)) { in do_recovery()
411 dm_table_event(ms->ti->table); in do_recovery()
412 ms->in_sync = 1; in do_recovery()
418 *---------------------------------------------------------------
420 *---------------------------------------------------------------
422 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) in choose_mirror()
424 struct mirror *m = get_default_mirror(ms); in choose_mirror()
427 if (likely(!atomic_read(&m->error_count))) in choose_mirror()
430 if (m-- == ms->mirror) in choose_mirror()
431 m += ms->nr_mirrors; in choose_mirror()
437 static int default_ok(struct mirror *m) in default_ok()
439 struct mirror *default_mirror = get_default_mirror(m->ms); in default_ok()
441 return !atomic_read(&default_mirror->error_count); in default_ok()
446 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_available()
447 region_t region = dm_rh_bio_to_region(ms->rh, bio); in mirror_available()
449 if (log->type->in_sync(log, region, 0)) in mirror_available()
450 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available()
456 * remap a buffer to a particular mirror.
458 static sector_t map_sector(struct mirror *m, struct bio *bio) in map_sector()
460 if (unlikely(!bio->bi_iter.bi_size)) in map_sector()
462 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
465 static void map_bio(struct mirror *m, struct bio *bio) in map_bio()
467 bio_set_dev(bio, m->dev->bdev); in map_bio()
468 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio()
471 static void map_region(struct dm_io_region *io, struct mirror *m, in map_region()
474 io->bdev = m->dev->bdev; in map_region()
475 io->sector = map_sector(m, bio); in map_region()
476 io->count = bio_sectors(bio); in map_region()
485 spin_lock_irq(&ms->lock); in hold_bio()
487 if (atomic_read(&ms->suspend)) { in hold_bio()
488 spin_unlock_irq(&ms->lock); in hold_bio()
493 if (dm_noflush_suspending(ms->ti)) in hold_bio()
494 bio->bi_status = BLK_STS_DM_REQUEUE; in hold_bio()
496 bio->bi_status = BLK_STS_IOERR; in hold_bio()
505 bio_list_add(&ms->holds, bio); in hold_bio()
506 spin_unlock_irq(&ms->lock); in hold_bio()
510 *---------------------------------------------------------------
512 *---------------------------------------------------------------
517 struct mirror *m; in read_callback()
529 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { in read_callback()
530 DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.", in read_callback()
531 m->dev->name); in read_callback()
532 queue_bio(m->ms, bio, bio_data_dir(bio)); in read_callback()
536 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", in read_callback()
537 m->dev->name); in read_callback()
542 static void read_async_bio(struct mirror *m, struct bio *bio) in read_async_bio()
551 .client = m->ms->io_client, in read_async_bio()
562 int state = dm_rh_get_state(ms->rh, region, may_block); in region_in_sync()
570 struct mirror *m; in do_reads()
573 region = dm_rh_bio_to_region(ms->rh, bio); in do_reads()
580 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads()
581 else if (m && atomic_read(&m->error_count)) in do_reads()
592 *---------------------------------------------------------------------
600 * NOSYNC: increment pending, just write to the default mirror
601 *---------------------------------------------------------------------
611 ms = bio_get_m(bio)->ms; in write_callback()
630 bio->bi_status = BLK_STS_NOTSUPP; in write_callback()
635 for (i = 0; i < ms->nr_mirrors; i++) in write_callback()
637 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); in write_callback()
644 spin_lock_irqsave(&ms->lock, flags); in write_callback()
645 if (!ms->failures.head) in write_callback()
647 bio_list_add(&ms->failures, bio); in write_callback()
650 spin_unlock_irqrestore(&ms->lock, flags); in write_callback()
657 struct mirror *m; in do_write()
658 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH); in do_write()
665 .client = ms->io_client, in do_write()
674 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) in do_write()
678 * Use default mirror because we only need it to retrieve the reference in do_write()
679 * to the mirror set in write_callback(). in do_write()
683 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT)); in do_write()
692 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in do_writes()
695 if (!writes->head) in do_writes()
707 if ((bio->bi_opf & REQ_PREFLUSH) || in do_writes()
713 region = dm_rh_bio_to_region(ms->rh, bio); in do_writes()
715 if (log->type->is_remote_recovering && in do_writes()
716 log->type->is_remote_recovering(log, region)) { in do_writes()
721 state = dm_rh_get_state(ms->rh, region, 1); in do_writes()
745 spin_lock_irq(&ms->lock); in do_writes()
746 bio_list_merge(&ms->writes, &requeue); in do_writes()
747 spin_unlock_irq(&ms->lock); in do_writes()
756 dm_rh_inc_pending(ms->rh, &sync); in do_writes()
757 dm_rh_inc_pending(ms->rh, &nosync); in do_writes()
764 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; in do_writes()
769 if (unlikely(ms->log_failure) && errors_handled(ms)) { in do_writes()
770 spin_lock_irq(&ms->lock); in do_writes()
771 bio_list_merge(&ms->failures, &sync); in do_writes()
772 spin_unlock_irq(&ms->lock); in do_writes()
779 dm_rh_delay(ms->rh, bio); in do_writes()
782 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) { in do_writes()
783 spin_lock_irq(&ms->lock); in do_writes()
784 bio_list_add(&ms->failures, bio); in do_writes()
785 spin_unlock_irq(&ms->lock); in do_writes()
798 if (likely(!failures->head)) in do_failures()
808 * to reconfigure the mirror, at which point the core in do_failures()
819 if (!ms->log_failure) { in do_failures()
820 ms->in_sync = 0; in do_failures()
821 dm_rh_mark_nosync(ms->rh, bio); in do_failures()
837 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) in do_failures()
851 dm_table_event(ms->ti->table); in trigger_event()
855 *---------------------------------------------------------------
857 *---------------------------------------------------------------
866 spin_lock_irqsave(&ms->lock, flags); in do_mirror()
867 reads = ms->reads; in do_mirror()
868 writes = ms->writes; in do_mirror()
869 failures = ms->failures; in do_mirror()
870 bio_list_init(&ms->reads); in do_mirror()
871 bio_list_init(&ms->writes); in do_mirror()
872 bio_list_init(&ms->failures); in do_mirror()
873 spin_unlock_irqrestore(&ms->lock, flags); in do_mirror()
875 dm_rh_update_states(ms->rh, errors_handled(ms)); in do_mirror()
883 *---------------------------------------------------------------
885 *---------------------------------------------------------------
893 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL); in alloc_context()
896 ti->error = "Cannot allocate mirror context"; in alloc_context()
900 spin_lock_init(&ms->lock); in alloc_context()
901 bio_list_init(&ms->reads); in alloc_context()
902 bio_list_init(&ms->writes); in alloc_context()
903 bio_list_init(&ms->failures); in alloc_context()
904 bio_list_init(&ms->holds); in alloc_context()
906 ms->ti = ti; in alloc_context()
907 ms->nr_mirrors = nr_mirrors; in alloc_context()
908 ms->nr_regions = dm_sector_div_up(ti->len, region_size); in alloc_context()
909 ms->in_sync = 0; in alloc_context()
910 ms->log_failure = 0; in alloc_context()
911 ms->leg_failure = 0; in alloc_context()
912 atomic_set(&ms->suspend, 0); in alloc_context()
913 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); in alloc_context()
915 ms->io_client = dm_io_client_create(); in alloc_context()
916 if (IS_ERR(ms->io_client)) { in alloc_context()
917 ti->error = "Error creating dm_io client"; in alloc_context()
922 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, in alloc_context()
924 ms->ti->begin, MAX_RECOVERY, in alloc_context()
925 dl, region_size, ms->nr_regions); in alloc_context()
926 if (IS_ERR(ms->rh)) { in alloc_context()
927 ti->error = "Error creating dirty region hash"; in alloc_context()
928 dm_io_client_destroy(ms->io_client); in alloc_context()
939 while (m--) in free_context()
940 dm_put_device(ti, ms->mirror[m].dev); in free_context()
942 dm_io_client_destroy(ms->io_client); in free_context()
943 dm_region_hash_destroy(ms->rh); in free_context()
948 unsigned int mirror, char **argv) in get_mirror() argument
956 ti->error = "Invalid offset"; in get_mirror()
957 return -EINVAL; in get_mirror()
960 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), in get_mirror()
961 &ms->mirror[mirror].dev); in get_mirror()
963 ti->error = "Device lookup failure"; in get_mirror()
967 ms->mirror[mirror].ms = ms; in get_mirror()
968 atomic_set(&(ms->mirror[mirror].error_count), 0); in get_mirror()
969 ms->mirror[mirror].error_type = 0; in get_mirror()
970 ms->mirror[mirror].offset = offset; in get_mirror()
987 ti->error = "Insufficient mirror log arguments"; in create_dirty_log()
992 ti->error = "Invalid mirror log argument count"; in create_dirty_log()
999 ti->error = "Insufficient mirror log arguments"; in create_dirty_log()
1006 ti->error = "Error creating mirror dirty log"; in create_dirty_log()
1017 struct dm_target *ti = ms->ti; in parse_features()
1027 ti->error = "Invalid number of features"; in parse_features()
1028 return -EINVAL; in parse_features()
1031 argc--; in parse_features()
1036 ti->error = "Not enough arguments to support feature count"; in parse_features()
1037 return -EINVAL; in parse_features()
1042 ms->features |= DM_RAID1_HANDLE_ERRORS; in parse_features()
1044 ms->features |= DM_RAID1_KEEP_LOG; in parse_features()
1046 ti->error = "Unrecognised feature requested"; in parse_features()
1047 return -EINVAL; in parse_features()
1050 argc--; in parse_features()
1055 ti->error = "keep_log feature requires the handle_errors feature"; in parse_features()
1056 return -EINVAL; in parse_features()
1063 * Construct a mirror mapping:
1084 return -EINVAL; in mirror_ctr()
1087 argc -= args_used; in mirror_ctr()
1091 ti->error = "Invalid number of mirrors"; in mirror_ctr()
1093 return -EINVAL; in mirror_ctr()
1096 argv++, argc--; in mirror_ctr()
1099 ti->error = "Too few mirror arguments"; in mirror_ctr()
1101 return -EINVAL; in mirror_ctr()
1104 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); in mirror_ctr()
1107 return -ENOMEM; in mirror_ctr()
1110 /* Get the mirror parameter sets */ in mirror_ctr()
1118 argc -= 2; in mirror_ctr()
1121 ti->private = ms; in mirror_ctr()
1123 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); in mirror_ctr()
1127 ti->num_flush_bios = 1; in mirror_ctr()
1128 ti->num_discard_bios = 1; in mirror_ctr()
1129 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); in mirror_ctr()
1131 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); in mirror_ctr()
1132 if (!ms->kmirrord_wq) { in mirror_ctr()
1134 r = -ENOMEM; in mirror_ctr()
1137 INIT_WORK(&ms->kmirrord_work, do_mirror); in mirror_ctr()
1138 timer_setup(&ms->timer, delayed_wake_fn, 0); in mirror_ctr()
1139 ms->timer_pending = 0; in mirror_ctr()
1140 INIT_WORK(&ms->trigger_event, trigger_event); in mirror_ctr()
1147 argc -= args_used; in mirror_ctr()
1150 * Any read-balancing addition depends on the in mirror_ctr()
1159 ti->error = "Too many mirror arguments"; in mirror_ctr()
1160 r = -EINVAL; in mirror_ctr()
1164 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); in mirror_ctr()
1165 if (IS_ERR(ms->kcopyd_client)) { in mirror_ctr()
1166 r = PTR_ERR(ms->kcopyd_client); in mirror_ctr()
1174 destroy_workqueue(ms->kmirrord_wq); in mirror_ctr()
1176 free_context(ms, ti, ms->nr_mirrors); in mirror_ctr()
1182 struct mirror_set *ms = ti->private; in mirror_dtr()
1184 del_timer_sync(&ms->timer); in mirror_dtr()
1185 flush_workqueue(ms->kmirrord_wq); in mirror_dtr()
1186 flush_work(&ms->trigger_event); in mirror_dtr()
1187 dm_kcopyd_client_destroy(ms->kcopyd_client); in mirror_dtr()
1188 destroy_workqueue(ms->kmirrord_wq); in mirror_dtr()
1189 free_context(ms, ti, ms->nr_mirrors); in mirror_dtr()
1193 * Mirror mapping function
1198 struct mirror *m; in mirror_map()
1199 struct mirror_set *ms = ti->private; in mirror_map()
1200 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_map()
1204 bio_record->details.bi_bdev = NULL; in mirror_map()
1208 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); in mirror_map()
1213 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); in mirror_map()
1214 if (r < 0 && r != -EWOULDBLOCK) in mirror_map()
1218 * If region is not in-sync queue the bio. in mirror_map()
1220 if (!r || (r == -EWOULDBLOCK)) { in mirror_map()
1221 if (bio->bi_opf & REQ_RAHEAD) in mirror_map()
1229 * The region is in-sync and we can perform reads directly. in mirror_map()
1232 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
1236 dm_bio_record(&bio_record->details, bio); in mirror_map()
1237 bio_record->m = m; in mirror_map()
1248 struct mirror_set *ms = ti->private; in mirror_end_io()
1249 struct mirror *m = NULL; in mirror_end_io()
1258 if (!(bio->bi_opf & REQ_PREFLUSH) && in mirror_end_io()
1260 dm_rh_dec(ms->rh, bio_record->write_region); in mirror_end_io()
1267 if (bio->bi_opf & REQ_RAHEAD) in mirror_end_io()
1271 if (!bio_record->details.bi_bdev) { in mirror_end_io()
1275 * mirror in-sync. in mirror_end_io()
1277 DMERR_LIMIT("Mirror read failed."); in mirror_end_io()
1281 m = bio_record->m; in mirror_end_io()
1283 DMERR("Mirror read failed from %s. Trying alternative device.", in mirror_end_io()
1284 m->dev->name); in mirror_end_io()
1290 * mirror. in mirror_end_io()
1293 bd = &bio_record->details; in mirror_end_io()
1296 bio_record->details.bi_bdev = NULL; in mirror_end_io()
1297 bio->bi_status = 0; in mirror_end_io()
1306 bio_record->details.bi_bdev = NULL; in mirror_end_io()
1313 struct mirror_set *ms = ti->private; in mirror_presuspend()
1314 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_presuspend()
1319 atomic_set(&ms->suspend, 1); in mirror_presuspend()
1324 * a chance to be added in the hold list because ms->suspend in mirror_presuspend()
1327 spin_lock_irq(&ms->lock); in mirror_presuspend()
1328 holds = ms->holds; in mirror_presuspend()
1329 bio_list_init(&ms->holds); in mirror_presuspend()
1330 spin_unlock_irq(&ms->lock); in mirror_presuspend()
1339 dm_rh_stop_recovery(ms->rh); in mirror_presuspend()
1342 !dm_rh_recovery_in_flight(ms->rh)); in mirror_presuspend()
1344 if (log->type->presuspend && log->type->presuspend(log)) in mirror_presuspend()
1354 flush_workqueue(ms->kmirrord_wq); in mirror_presuspend()
1359 struct mirror_set *ms = ti->private; in mirror_postsuspend()
1360 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_postsuspend()
1362 if (log->type->postsuspend && log->type->postsuspend(log)) in mirror_postsuspend()
1369 struct mirror_set *ms = ti->private; in mirror_resume()
1370 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_resume()
1372 atomic_set(&ms->suspend, 0); in mirror_resume()
1373 if (log->type->resume && log->type->resume(log)) in mirror_resume()
1376 dm_rh_start_recovery(ms->rh); in mirror_resume()
1381 * @m: mirror device/leg we want the status of
1385 * A => Alive - No failures
1386 * D => Dead - A write failure occurred leaving mirror out-of-sync
1387 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1388 * R => Read - A read failure occurred, mirror data unaffected
1392 static char device_status_char(struct mirror *m) in device_status_char()
1394 if (!atomic_read(&(m->error_count))) in device_status_char()
1397 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : in device_status_char()
1398 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : in device_status_char()
1399 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : in device_status_char()
1400 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; in device_status_char()
1409 struct mirror_set *ms = ti->private; in mirror_status()
1410 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); in mirror_status()
1415 DMEMIT("%d ", ms->nr_mirrors); in mirror_status()
1416 for (m = 0; m < ms->nr_mirrors; m++) { in mirror_status()
1417 DMEMIT("%s ", ms->mirror[m].dev->name); in mirror_status()
1418 buffer[m] = device_status_char(&(ms->mirror[m])); in mirror_status()
1423 (unsigned long long)log->type->get_sync_count(log), in mirror_status()
1424 (unsigned long long)ms->nr_regions, buffer); in mirror_status()
1426 sz += log->type->status(log, type, result+sz, maxlen-sz); in mirror_status()
1431 sz = log->type->status(log, type, result, maxlen); in mirror_status()
1433 DMEMIT("%d", ms->nr_mirrors); in mirror_status()
1434 for (m = 0; m < ms->nr_mirrors; m++) in mirror_status()
1435 DMEMIT(" %s %llu", ms->mirror[m].dev->name, in mirror_status()
1436 (unsigned long long)ms->mirror[m].offset); in mirror_status()
1451 DMEMIT_TARGET_NAME_VERSION(ti->type); in mirror_status()
1452 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors); in mirror_status()
1453 for (m = 0; m < ms->nr_mirrors; m++) { in mirror_status()
1454 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name); in mirror_status()
1456 m, device_status_char(&(ms->mirror[m]))); in mirror_status()
1463 sz += log->type->status(log, type, result+sz, maxlen-sz); in mirror_status()
1470 iterate_devices_callout_fn fn, void *data) in mirror_iterate_devices() argument
1472 struct mirror_set *ms = ti->private; in mirror_iterate_devices()
1476 for (i = 0; !ret && i < ms->nr_mirrors; i++) in mirror_iterate_devices()
1477 ret = fn(ti, ms->mirror[i].dev, in mirror_iterate_devices()
1478 ms->mirror[i].offset, ti->len, data); in mirror_iterate_devices()
1484 .name = "mirror",
1505 return -ENOMEM; in dm_mirror_init()
1527 MODULE_DESCRIPTION(DM_NAME " mirror target");