• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-bio-record.h"
9 
10 #include <linux/init.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/device-mapper.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-dirty-log.h>
19 #include <linux/dm-kcopyd.h>
20 #include <linux/dm-region-hash.h>
21 
22 #define DM_MSG_PREFIX "raid1"
23 
24 #define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
25 
26 #define MAX_NR_MIRRORS	(DM_KCOPYD_MAX_REGIONS + 1)
27 
28 #define DM_RAID1_HANDLE_ERRORS	0x01
29 #define DM_RAID1_KEEP_LOG	0x02
30 #define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
31 #define keep_log(p)		((p)->features & DM_RAID1_KEEP_LOG)
32 
33 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
34 
35 /*-----------------------------------------------------------------
36  * Mirror set structures.
37  *---------------------------------------------------------------*/
38 enum dm_raid1_error {
39 	DM_RAID1_WRITE_ERROR,
40 	DM_RAID1_FLUSH_ERROR,
41 	DM_RAID1_SYNC_ERROR,
42 	DM_RAID1_READ_ERROR
43 };
44 
45 struct mirror {
46 	struct mirror_set *ms;
47 	atomic_t error_count;
48 	unsigned long error_type;
49 	struct dm_dev *dev;
50 	sector_t offset;
51 };
52 
53 struct mirror_set {
54 	struct dm_target *ti;
55 	struct list_head list;
56 
57 	uint64_t features;
58 
59 	spinlock_t lock;	/* protects the lists */
60 	struct bio_list reads;
61 	struct bio_list writes;
62 	struct bio_list failures;
63 	struct bio_list holds;	/* bios are waiting until suspend */
64 
65 	struct dm_region_hash *rh;
66 	struct dm_kcopyd_client *kcopyd_client;
67 	struct dm_io_client *io_client;
68 
69 	/* recovery */
70 	region_t nr_regions;
71 	int in_sync;
72 	int log_failure;
73 	int leg_failure;
74 	atomic_t suspend;
75 
76 	atomic_t default_mirror;	/* Default mirror */
77 
78 	struct workqueue_struct *kmirrord_wq;
79 	struct work_struct kmirrord_work;
80 	struct timer_list timer;
81 	unsigned long timer_pending;
82 
83 	struct work_struct trigger_event;
84 
85 	unsigned int nr_mirrors;
86 	struct mirror mirror[];
87 };
88 
89 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
90 		"A percentage of time allocated for raid resynchronization");
91 
wakeup_mirrord(void * context)92 static void wakeup_mirrord(void *context)
93 {
94 	struct mirror_set *ms = context;
95 
96 	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
97 }
98 
delayed_wake_fn(struct timer_list * t)99 static void delayed_wake_fn(struct timer_list *t)
100 {
101 	struct mirror_set *ms = from_timer(ms, t, timer);
102 
103 	clear_bit(0, &ms->timer_pending);
104 	wakeup_mirrord(ms);
105 }
106 
delayed_wake(struct mirror_set * ms)107 static void delayed_wake(struct mirror_set *ms)
108 {
109 	if (test_and_set_bit(0, &ms->timer_pending))
110 		return;
111 
112 	ms->timer.expires = jiffies + HZ / 5;
113 	add_timer(&ms->timer);
114 }
115 
wakeup_all_recovery_waiters(void * context)116 static void wakeup_all_recovery_waiters(void *context)
117 {
118 	wake_up_all(&_kmirrord_recovery_stopped);
119 }
120 
queue_bio(struct mirror_set * ms,struct bio * bio,int rw)121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
122 {
123 	unsigned long flags;
124 	int should_wake = 0;
125 	struct bio_list *bl;
126 
127 	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
128 	spin_lock_irqsave(&ms->lock, flags);
129 	should_wake = !(bl->head);
130 	bio_list_add(bl, bio);
131 	spin_unlock_irqrestore(&ms->lock, flags);
132 
133 	if (should_wake)
134 		wakeup_mirrord(ms);
135 }
136 
dispatch_bios(void * context,struct bio_list * bio_list)137 static void dispatch_bios(void *context, struct bio_list *bio_list)
138 {
139 	struct mirror_set *ms = context;
140 	struct bio *bio;
141 
142 	while ((bio = bio_list_pop(bio_list)))
143 		queue_bio(ms, bio, WRITE);
144 }
145 
146 struct dm_raid1_bio_record {
147 	struct mirror *m;
148 	/* if details->bi_bdev == NULL, details were not saved */
149 	struct dm_bio_details details;
150 	region_t write_region;
151 };
152 
153 /*
154  * Every mirror should look like this one.
155  */
156 #define DEFAULT_MIRROR 0
157 
158 /*
159  * This is yucky.  We squirrel the mirror struct away inside
160  * bi_next for read/write buffers.  This is safe since the bh
161  * doesn't get submitted to the lower levels of block layer.
162  */
bio_get_m(struct bio * bio)163 static struct mirror *bio_get_m(struct bio *bio)
164 {
165 	return (struct mirror *) bio->bi_next;
166 }
167 
bio_set_m(struct bio * bio,struct mirror * m)168 static void bio_set_m(struct bio *bio, struct mirror *m)
169 {
170 	bio->bi_next = (struct bio *) m;
171 }
172 
get_default_mirror(struct mirror_set * ms)173 static struct mirror *get_default_mirror(struct mirror_set *ms)
174 {
175 	return &ms->mirror[atomic_read(&ms->default_mirror)];
176 }
177 
set_default_mirror(struct mirror * m)178 static void set_default_mirror(struct mirror *m)
179 {
180 	struct mirror_set *ms = m->ms;
181 	struct mirror *m0 = &(ms->mirror[0]);
182 
183 	atomic_set(&ms->default_mirror, m - m0);
184 }
185 
get_valid_mirror(struct mirror_set * ms)186 static struct mirror *get_valid_mirror(struct mirror_set *ms)
187 {
188 	struct mirror *m;
189 
190 	for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
191 		if (!atomic_read(&m->error_count))
192 			return m;
193 
194 	return NULL;
195 }
196 
197 /* fail_mirror
198  * @m: mirror device to fail
199  * @error_type: one of the enum's, DM_RAID1_*_ERROR
200  *
201  * If errors are being handled, record the type of
202  * error encountered for this device.  If this type
203  * of error has already been recorded, we can return;
204  * otherwise, we must signal userspace by triggering
205  * an event.  Additionally, if the device is the
206  * primary device, we must choose a new primary, but
207  * only if the mirror is in-sync.
208  *
209  * This function must not block.
210  */
fail_mirror(struct mirror * m,enum dm_raid1_error error_type)211 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
212 {
213 	struct mirror_set *ms = m->ms;
214 	struct mirror *new;
215 
216 	ms->leg_failure = 1;
217 
218 	/*
219 	 * error_count is used for nothing more than a
220 	 * simple way to tell if a device has encountered
221 	 * errors.
222 	 */
223 	atomic_inc(&m->error_count);
224 
225 	if (test_and_set_bit(error_type, &m->error_type))
226 		return;
227 
228 	if (!errors_handled(ms))
229 		return;
230 
231 	if (m != get_default_mirror(ms))
232 		goto out;
233 
234 	if (!ms->in_sync && !keep_log(ms)) {
235 		/*
236 		 * Better to issue requests to same failing device
237 		 * than to risk returning corrupt data.
238 		 */
239 		DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.",
240 		      m->dev->name);
241 		goto out;
242 	}
243 
244 	new = get_valid_mirror(ms);
245 	if (new)
246 		set_default_mirror(new);
247 	else
248 		DMWARN("All sides of mirror have failed.");
249 
250 out:
251 	schedule_work(&ms->trigger_event);
252 }
253 
mirror_flush(struct dm_target * ti)254 static int mirror_flush(struct dm_target *ti)
255 {
256 	struct mirror_set *ms = ti->private;
257 	unsigned long error_bits;
258 
259 	unsigned int i;
260 	struct dm_io_region io[MAX_NR_MIRRORS];
261 	struct mirror *m;
262 	struct dm_io_request io_req = {
263 		.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
264 		.mem.type = DM_IO_KMEM,
265 		.mem.ptr.addr = NULL,
266 		.client = ms->io_client,
267 	};
268 
269 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
270 		io[i].bdev = m->dev->bdev;
271 		io[i].sector = 0;
272 		io[i].count = 0;
273 	}
274 
275 	error_bits = -1;
276 	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
277 	if (unlikely(error_bits != 0)) {
278 		for (i = 0; i < ms->nr_mirrors; i++)
279 			if (test_bit(i, &error_bits))
280 				fail_mirror(ms->mirror + i,
281 					    DM_RAID1_FLUSH_ERROR);
282 		return -EIO;
283 	}
284 
285 	return 0;
286 }
287 
288 /*-----------------------------------------------------------------
289  * Recovery.
290  *
291  * When a mirror is first activated we may find that some regions
292  * are in the no-sync state.  We have to recover these by
293  * recopying from the default mirror to all the others.
294  *---------------------------------------------------------------*/
recovery_complete(int read_err,unsigned long write_err,void * context)295 static void recovery_complete(int read_err, unsigned long write_err,
296 			      void *context)
297 {
298 	struct dm_region *reg = context;
299 	struct mirror_set *ms = dm_rh_region_context(reg);
300 	int m, bit = 0;
301 
302 	if (read_err) {
303 		/* Read error means the failure of default mirror. */
304 		DMERR_LIMIT("Unable to read primary mirror during recovery");
305 		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
306 	}
307 
308 	if (write_err) {
309 		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
310 			    write_err);
311 		/*
312 		 * Bits correspond to devices (excluding default mirror).
313 		 * The default mirror cannot change during recovery.
314 		 */
315 		for (m = 0; m < ms->nr_mirrors; m++) {
316 			if (&ms->mirror[m] == get_default_mirror(ms))
317 				continue;
318 			if (test_bit(bit, &write_err))
319 				fail_mirror(ms->mirror + m,
320 					    DM_RAID1_SYNC_ERROR);
321 			bit++;
322 		}
323 	}
324 
325 	dm_rh_recovery_end(reg, !(read_err || write_err));
326 }
327 
recover(struct mirror_set * ms,struct dm_region * reg)328 static void recover(struct mirror_set *ms, struct dm_region *reg)
329 {
330 	unsigned int i;
331 	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
332 	struct mirror *m;
333 	unsigned long flags = 0;
334 	region_t key = dm_rh_get_region_key(reg);
335 	sector_t region_size = dm_rh_get_region_size(ms->rh);
336 
337 	/* fill in the source */
338 	m = get_default_mirror(ms);
339 	from.bdev = m->dev->bdev;
340 	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
341 	if (key == (ms->nr_regions - 1)) {
342 		/*
343 		 * The final region may be smaller than
344 		 * region_size.
345 		 */
346 		from.count = ms->ti->len & (region_size - 1);
347 		if (!from.count)
348 			from.count = region_size;
349 	} else
350 		from.count = region_size;
351 
352 	/* fill in the destinations */
353 	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
354 		if (&ms->mirror[i] == get_default_mirror(ms))
355 			continue;
356 
357 		m = ms->mirror + i;
358 		dest->bdev = m->dev->bdev;
359 		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
360 		dest->count = from.count;
361 		dest++;
362 	}
363 
364 	/* hand to kcopyd */
365 	if (!errors_handled(ms))
366 		flags |= BIT(DM_KCOPYD_IGNORE_ERROR);
367 
368 	dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
369 		       flags, recovery_complete, reg);
370 }
371 
reset_ms_flags(struct mirror_set * ms)372 static void reset_ms_flags(struct mirror_set *ms)
373 {
374 	unsigned int m;
375 
376 	ms->leg_failure = 0;
377 	for (m = 0; m < ms->nr_mirrors; m++) {
378 		atomic_set(&(ms->mirror[m].error_count), 0);
379 		ms->mirror[m].error_type = 0;
380 	}
381 }
382 
do_recovery(struct mirror_set * ms)383 static void do_recovery(struct mirror_set *ms)
384 {
385 	struct dm_region *reg;
386 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
387 
388 	/*
389 	 * Start quiescing some regions.
390 	 */
391 	dm_rh_recovery_prepare(ms->rh);
392 
393 	/*
394 	 * Copy any already quiesced regions.
395 	 */
396 	while ((reg = dm_rh_recovery_start(ms->rh)))
397 		recover(ms, reg);
398 
399 	/*
400 	 * Update the in sync flag.
401 	 */
402 	if (!ms->in_sync &&
403 	    (log->type->get_sync_count(log) == ms->nr_regions)) {
404 		/* the sync is complete */
405 		dm_table_event(ms->ti->table);
406 		ms->in_sync = 1;
407 		reset_ms_flags(ms);
408 	}
409 }
410 
411 /*-----------------------------------------------------------------
412  * Reads
413  *---------------------------------------------------------------*/
choose_mirror(struct mirror_set * ms,sector_t sector)414 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
415 {
416 	struct mirror *m = get_default_mirror(ms);
417 
418 	do {
419 		if (likely(!atomic_read(&m->error_count)))
420 			return m;
421 
422 		if (m-- == ms->mirror)
423 			m += ms->nr_mirrors;
424 	} while (m != get_default_mirror(ms));
425 
426 	return NULL;
427 }
428 
default_ok(struct mirror * m)429 static int default_ok(struct mirror *m)
430 {
431 	struct mirror *default_mirror = get_default_mirror(m->ms);
432 
433 	return !atomic_read(&default_mirror->error_count);
434 }
435 
mirror_available(struct mirror_set * ms,struct bio * bio)436 static int mirror_available(struct mirror_set *ms, struct bio *bio)
437 {
438 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
439 	region_t region = dm_rh_bio_to_region(ms->rh, bio);
440 
441 	if (log->type->in_sync(log, region, 0))
442 		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
443 
444 	return 0;
445 }
446 
447 /*
448  * remap a buffer to a particular mirror.
449  */
map_sector(struct mirror * m,struct bio * bio)450 static sector_t map_sector(struct mirror *m, struct bio *bio)
451 {
452 	if (unlikely(!bio->bi_iter.bi_size))
453 		return 0;
454 	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
455 }
456 
map_bio(struct mirror * m,struct bio * bio)457 static void map_bio(struct mirror *m, struct bio *bio)
458 {
459 	bio_set_dev(bio, m->dev->bdev);
460 	bio->bi_iter.bi_sector = map_sector(m, bio);
461 }
462 
map_region(struct dm_io_region * io,struct mirror * m,struct bio * bio)463 static void map_region(struct dm_io_region *io, struct mirror *m,
464 		       struct bio *bio)
465 {
466 	io->bdev = m->dev->bdev;
467 	io->sector = map_sector(m, bio);
468 	io->count = bio_sectors(bio);
469 }
470 
hold_bio(struct mirror_set * ms,struct bio * bio)471 static void hold_bio(struct mirror_set *ms, struct bio *bio)
472 {
473 	/*
474 	 * Lock is required to avoid race condition during suspend
475 	 * process.
476 	 */
477 	spin_lock_irq(&ms->lock);
478 
479 	if (atomic_read(&ms->suspend)) {
480 		spin_unlock_irq(&ms->lock);
481 
482 		/*
483 		 * If device is suspended, complete the bio.
484 		 */
485 		if (dm_noflush_suspending(ms->ti))
486 			bio->bi_status = BLK_STS_DM_REQUEUE;
487 		else
488 			bio->bi_status = BLK_STS_IOERR;
489 
490 		bio_endio(bio);
491 		return;
492 	}
493 
494 	/*
495 	 * Hold bio until the suspend is complete.
496 	 */
497 	bio_list_add(&ms->holds, bio);
498 	spin_unlock_irq(&ms->lock);
499 }
500 
501 /*-----------------------------------------------------------------
502  * Reads
503  *---------------------------------------------------------------*/
read_callback(unsigned long error,void * context)504 static void read_callback(unsigned long error, void *context)
505 {
506 	struct bio *bio = context;
507 	struct mirror *m;
508 
509 	m = bio_get_m(bio);
510 	bio_set_m(bio, NULL);
511 
512 	if (likely(!error)) {
513 		bio_endio(bio);
514 		return;
515 	}
516 
517 	fail_mirror(m, DM_RAID1_READ_ERROR);
518 
519 	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
520 		DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.",
521 			     m->dev->name);
522 		queue_bio(m->ms, bio, bio_data_dir(bio));
523 		return;
524 	}
525 
526 	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
527 		    m->dev->name);
528 	bio_io_error(bio);
529 }
530 
531 /* Asynchronous read. */
read_async_bio(struct mirror * m,struct bio * bio)532 static void read_async_bio(struct mirror *m, struct bio *bio)
533 {
534 	struct dm_io_region io;
535 	struct dm_io_request io_req = {
536 		.bi_opf = REQ_OP_READ,
537 		.mem.type = DM_IO_BIO,
538 		.mem.ptr.bio = bio,
539 		.notify.fn = read_callback,
540 		.notify.context = bio,
541 		.client = m->ms->io_client,
542 	};
543 
544 	map_region(&io, m, bio);
545 	bio_set_m(bio, m);
546 	BUG_ON(dm_io(&io_req, 1, &io, NULL));
547 }
548 
region_in_sync(struct mirror_set * ms,region_t region,int may_block)549 static inline int region_in_sync(struct mirror_set *ms, region_t region,
550 				 int may_block)
551 {
552 	int state = dm_rh_get_state(ms->rh, region, may_block);
553 	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
554 }
555 
do_reads(struct mirror_set * ms,struct bio_list * reads)556 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
557 {
558 	region_t region;
559 	struct bio *bio;
560 	struct mirror *m;
561 
562 	while ((bio = bio_list_pop(reads))) {
563 		region = dm_rh_bio_to_region(ms->rh, bio);
564 		m = get_default_mirror(ms);
565 
566 		/*
567 		 * We can only read balance if the region is in sync.
568 		 */
569 		if (likely(region_in_sync(ms, region, 1)))
570 			m = choose_mirror(ms, bio->bi_iter.bi_sector);
571 		else if (m && atomic_read(&m->error_count))
572 			m = NULL;
573 
574 		if (likely(m))
575 			read_async_bio(m, bio);
576 		else
577 			bio_io_error(bio);
578 	}
579 }
580 
581 /*-----------------------------------------------------------------
582  * Writes.
583  *
584  * We do different things with the write io depending on the
585  * state of the region that it's in:
586  *
587  * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
588  * RECOVERING:	delay the io until recovery completes
589  * NOSYNC:	increment pending, just write to the default mirror
590  *---------------------------------------------------------------*/
591 
592 
write_callback(unsigned long error,void * context)593 static void write_callback(unsigned long error, void *context)
594 {
595 	unsigned int i;
596 	struct bio *bio = (struct bio *) context;
597 	struct mirror_set *ms;
598 	int should_wake = 0;
599 	unsigned long flags;
600 
601 	ms = bio_get_m(bio)->ms;
602 	bio_set_m(bio, NULL);
603 
604 	/*
605 	 * NOTE: We don't decrement the pending count here,
606 	 * instead it is done by the targets endio function.
607 	 * This way we handle both writes to SYNC and NOSYNC
608 	 * regions with the same code.
609 	 */
610 	if (likely(!error)) {
611 		bio_endio(bio);
612 		return;
613 	}
614 
615 	/*
616 	 * If the bio is discard, return an error, but do not
617 	 * degrade the array.
618 	 */
619 	if (bio_op(bio) == REQ_OP_DISCARD) {
620 		bio->bi_status = BLK_STS_NOTSUPP;
621 		bio_endio(bio);
622 		return;
623 	}
624 
625 	for (i = 0; i < ms->nr_mirrors; i++)
626 		if (test_bit(i, &error))
627 			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
628 
629 	/*
630 	 * Need to raise event.  Since raising
631 	 * events can block, we need to do it in
632 	 * the main thread.
633 	 */
634 	spin_lock_irqsave(&ms->lock, flags);
635 	if (!ms->failures.head)
636 		should_wake = 1;
637 	bio_list_add(&ms->failures, bio);
638 	spin_unlock_irqrestore(&ms->lock, flags);
639 	if (should_wake)
640 		wakeup_mirrord(ms);
641 }
642 
do_write(struct mirror_set * ms,struct bio * bio)643 static void do_write(struct mirror_set *ms, struct bio *bio)
644 {
645 	unsigned int i;
646 	struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
647 	struct mirror *m;
648 	blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
649 	struct dm_io_request io_req = {
650 		.bi_opf = REQ_OP_WRITE | op_flags,
651 		.mem.type = DM_IO_BIO,
652 		.mem.ptr.bio = bio,
653 		.notify.fn = write_callback,
654 		.notify.context = bio,
655 		.client = ms->io_client,
656 	};
657 
658 	if (bio_op(bio) == REQ_OP_DISCARD) {
659 		io_req.bi_opf = REQ_OP_DISCARD | op_flags;
660 		io_req.mem.type = DM_IO_KMEM;
661 		io_req.mem.ptr.addr = NULL;
662 	}
663 
664 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
665 		map_region(dest++, m, bio);
666 
667 	/*
668 	 * Use default mirror because we only need it to retrieve the reference
669 	 * to the mirror set in write_callback().
670 	 */
671 	bio_set_m(bio, get_default_mirror(ms));
672 
673 	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
674 }
675 
do_writes(struct mirror_set * ms,struct bio_list * writes)676 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
677 {
678 	int state;
679 	struct bio *bio;
680 	struct bio_list sync, nosync, recover, *this_list = NULL;
681 	struct bio_list requeue;
682 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
683 	region_t region;
684 
685 	if (!writes->head)
686 		return;
687 
688 	/*
689 	 * Classify each write.
690 	 */
691 	bio_list_init(&sync);
692 	bio_list_init(&nosync);
693 	bio_list_init(&recover);
694 	bio_list_init(&requeue);
695 
696 	while ((bio = bio_list_pop(writes))) {
697 		if ((bio->bi_opf & REQ_PREFLUSH) ||
698 		    (bio_op(bio) == REQ_OP_DISCARD)) {
699 			bio_list_add(&sync, bio);
700 			continue;
701 		}
702 
703 		region = dm_rh_bio_to_region(ms->rh, bio);
704 
705 		if (log->type->is_remote_recovering &&
706 		    log->type->is_remote_recovering(log, region)) {
707 			bio_list_add(&requeue, bio);
708 			continue;
709 		}
710 
711 		state = dm_rh_get_state(ms->rh, region, 1);
712 		switch (state) {
713 		case DM_RH_CLEAN:
714 		case DM_RH_DIRTY:
715 			this_list = &sync;
716 			break;
717 
718 		case DM_RH_NOSYNC:
719 			this_list = &nosync;
720 			break;
721 
722 		case DM_RH_RECOVERING:
723 			this_list = &recover;
724 			break;
725 		}
726 
727 		bio_list_add(this_list, bio);
728 	}
729 
730 	/*
731 	 * Add bios that are delayed due to remote recovery
732 	 * back on to the write queue
733 	 */
734 	if (unlikely(requeue.head)) {
735 		spin_lock_irq(&ms->lock);
736 		bio_list_merge(&ms->writes, &requeue);
737 		spin_unlock_irq(&ms->lock);
738 		delayed_wake(ms);
739 	}
740 
741 	/*
742 	 * Increment the pending counts for any regions that will
743 	 * be written to (writes to recover regions are going to
744 	 * be delayed).
745 	 */
746 	dm_rh_inc_pending(ms->rh, &sync);
747 	dm_rh_inc_pending(ms->rh, &nosync);
748 
749 	/*
750 	 * If the flush fails on a previous call and succeeds here,
751 	 * we must not reset the log_failure variable.  We need
752 	 * userspace interaction to do that.
753 	 */
754 	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
755 
756 	/*
757 	 * Dispatch io.
758 	 */
759 	if (unlikely(ms->log_failure) && errors_handled(ms)) {
760 		spin_lock_irq(&ms->lock);
761 		bio_list_merge(&ms->failures, &sync);
762 		spin_unlock_irq(&ms->lock);
763 		wakeup_mirrord(ms);
764 	} else
765 		while ((bio = bio_list_pop(&sync)))
766 			do_write(ms, bio);
767 
768 	while ((bio = bio_list_pop(&recover)))
769 		dm_rh_delay(ms->rh, bio);
770 
771 	while ((bio = bio_list_pop(&nosync))) {
772 		if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
773 			spin_lock_irq(&ms->lock);
774 			bio_list_add(&ms->failures, bio);
775 			spin_unlock_irq(&ms->lock);
776 			wakeup_mirrord(ms);
777 		} else {
778 			map_bio(get_default_mirror(ms), bio);
779 			submit_bio_noacct(bio);
780 		}
781 	}
782 }
783 
do_failures(struct mirror_set * ms,struct bio_list * failures)784 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
785 {
786 	struct bio *bio;
787 
788 	if (likely(!failures->head))
789 		return;
790 
791 	/*
792 	 * If the log has failed, unattempted writes are being
793 	 * put on the holds list.  We can't issue those writes
794 	 * until a log has been marked, so we must store them.
795 	 *
796 	 * If a 'noflush' suspend is in progress, we can requeue
797 	 * the I/O's to the core.  This give userspace a chance
798 	 * to reconfigure the mirror, at which point the core
799 	 * will reissue the writes.  If the 'noflush' flag is
800 	 * not set, we have no choice but to return errors.
801 	 *
802 	 * Some writes on the failures list may have been
803 	 * submitted before the log failure and represent a
804 	 * failure to write to one of the devices.  It is ok
805 	 * for us to treat them the same and requeue them
806 	 * as well.
807 	 */
808 	while ((bio = bio_list_pop(failures))) {
809 		if (!ms->log_failure) {
810 			ms->in_sync = 0;
811 			dm_rh_mark_nosync(ms->rh, bio);
812 		}
813 
814 		/*
815 		 * If all the legs are dead, fail the I/O.
816 		 * If the device has failed and keep_log is enabled,
817 		 * fail the I/O.
818 		 *
819 		 * If we have been told to handle errors, and keep_log
820 		 * isn't enabled, hold the bio and wait for userspace to
821 		 * deal with the problem.
822 		 *
823 		 * Otherwise pretend that the I/O succeeded. (This would
824 		 * be wrong if the failed leg returned after reboot and
825 		 * got replicated back to the good legs.)
826 		 */
827 		if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
828 			bio_io_error(bio);
829 		else if (errors_handled(ms) && !keep_log(ms))
830 			hold_bio(ms, bio);
831 		else
832 			bio_endio(bio);
833 	}
834 }
835 
trigger_event(struct work_struct * work)836 static void trigger_event(struct work_struct *work)
837 {
838 	struct mirror_set *ms =
839 		container_of(work, struct mirror_set, trigger_event);
840 
841 	dm_table_event(ms->ti->table);
842 }
843 
844 /*-----------------------------------------------------------------
845  * kmirrord
846  *---------------------------------------------------------------*/
do_mirror(struct work_struct * work)847 static void do_mirror(struct work_struct *work)
848 {
849 	struct mirror_set *ms = container_of(work, struct mirror_set,
850 					     kmirrord_work);
851 	struct bio_list reads, writes, failures;
852 	unsigned long flags;
853 
854 	spin_lock_irqsave(&ms->lock, flags);
855 	reads = ms->reads;
856 	writes = ms->writes;
857 	failures = ms->failures;
858 	bio_list_init(&ms->reads);
859 	bio_list_init(&ms->writes);
860 	bio_list_init(&ms->failures);
861 	spin_unlock_irqrestore(&ms->lock, flags);
862 
863 	dm_rh_update_states(ms->rh, errors_handled(ms));
864 	do_recovery(ms);
865 	do_reads(ms, &reads);
866 	do_writes(ms, &writes);
867 	do_failures(ms, &failures);
868 }
869 
870 /*-----------------------------------------------------------------
871  * Target functions
872  *---------------------------------------------------------------*/
alloc_context(unsigned int nr_mirrors,uint32_t region_size,struct dm_target * ti,struct dm_dirty_log * dl)873 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
874 					uint32_t region_size,
875 					struct dm_target *ti,
876 					struct dm_dirty_log *dl)
877 {
878 	struct mirror_set *ms =
879 		kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
880 
881 	if (!ms) {
882 		ti->error = "Cannot allocate mirror context";
883 		return NULL;
884 	}
885 
886 	spin_lock_init(&ms->lock);
887 	bio_list_init(&ms->reads);
888 	bio_list_init(&ms->writes);
889 	bio_list_init(&ms->failures);
890 	bio_list_init(&ms->holds);
891 
892 	ms->ti = ti;
893 	ms->nr_mirrors = nr_mirrors;
894 	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
895 	ms->in_sync = 0;
896 	ms->log_failure = 0;
897 	ms->leg_failure = 0;
898 	atomic_set(&ms->suspend, 0);
899 	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
900 
901 	ms->io_client = dm_io_client_create();
902 	if (IS_ERR(ms->io_client)) {
903 		ti->error = "Error creating dm_io client";
904 		kfree(ms);
905  		return NULL;
906 	}
907 
908 	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
909 				       wakeup_all_recovery_waiters,
910 				       ms->ti->begin, MAX_RECOVERY,
911 				       dl, region_size, ms->nr_regions);
912 	if (IS_ERR(ms->rh)) {
913 		ti->error = "Error creating dirty region hash";
914 		dm_io_client_destroy(ms->io_client);
915 		kfree(ms);
916 		return NULL;
917 	}
918 
919 	return ms;
920 }
921 
free_context(struct mirror_set * ms,struct dm_target * ti,unsigned int m)922 static void free_context(struct mirror_set *ms, struct dm_target *ti,
923 			 unsigned int m)
924 {
925 	while (m--)
926 		dm_put_device(ti, ms->mirror[m].dev);
927 
928 	dm_io_client_destroy(ms->io_client);
929 	dm_region_hash_destroy(ms->rh);
930 	kfree(ms);
931 }
932 
get_mirror(struct mirror_set * ms,struct dm_target * ti,unsigned int mirror,char ** argv)933 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
934 		      unsigned int mirror, char **argv)
935 {
936 	unsigned long long offset;
937 	char dummy;
938 	int ret;
939 
940 	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
941 	    offset != (sector_t)offset) {
942 		ti->error = "Invalid offset";
943 		return -EINVAL;
944 	}
945 
946 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
947 			    &ms->mirror[mirror].dev);
948 	if (ret) {
949 		ti->error = "Device lookup failure";
950 		return ret;
951 	}
952 
953 	ms->mirror[mirror].ms = ms;
954 	atomic_set(&(ms->mirror[mirror].error_count), 0);
955 	ms->mirror[mirror].error_type = 0;
956 	ms->mirror[mirror].offset = offset;
957 
958 	return 0;
959 }
960 
961 /*
962  * Create dirty log: log_type #log_params <log_params>
963  */
create_dirty_log(struct dm_target * ti,unsigned int argc,char ** argv,unsigned int * args_used)964 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
965 					     unsigned int argc, char **argv,
966 					     unsigned int *args_used)
967 {
968 	unsigned int param_count;
969 	struct dm_dirty_log *dl;
970 	char dummy;
971 
972 	if (argc < 2) {
973 		ti->error = "Insufficient mirror log arguments";
974 		return NULL;
975 	}
976 
977 	if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
978 		ti->error = "Invalid mirror log argument count";
979 		return NULL;
980 	}
981 
982 	*args_used = 2 + param_count;
983 
984 	if (argc < *args_used) {
985 		ti->error = "Insufficient mirror log arguments";
986 		return NULL;
987 	}
988 
989 	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
990 				 argv + 2);
991 	if (!dl) {
992 		ti->error = "Error creating mirror dirty log";
993 		return NULL;
994 	}
995 
996 	return dl;
997 }
998 
parse_features(struct mirror_set * ms,unsigned int argc,char ** argv,unsigned int * args_used)999 static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
1000 			  unsigned int *args_used)
1001 {
1002 	unsigned int num_features;
1003 	struct dm_target *ti = ms->ti;
1004 	char dummy;
1005 	int i;
1006 
1007 	*args_used = 0;
1008 
1009 	if (!argc)
1010 		return 0;
1011 
1012 	if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
1013 		ti->error = "Invalid number of features";
1014 		return -EINVAL;
1015 	}
1016 
1017 	argc--;
1018 	argv++;
1019 	(*args_used)++;
1020 
1021 	if (num_features > argc) {
1022 		ti->error = "Not enough arguments to support feature count";
1023 		return -EINVAL;
1024 	}
1025 
1026 	for (i = 0; i < num_features; i++) {
1027 		if (!strcmp("handle_errors", argv[0]))
1028 			ms->features |= DM_RAID1_HANDLE_ERRORS;
1029 		else if (!strcmp("keep_log", argv[0]))
1030 			ms->features |= DM_RAID1_KEEP_LOG;
1031 		else {
1032 			ti->error = "Unrecognised feature requested";
1033 			return -EINVAL;
1034 		}
1035 
1036 		argc--;
1037 		argv++;
1038 		(*args_used)++;
1039 	}
1040 	if (!errors_handled(ms) && keep_log(ms)) {
1041 		ti->error = "keep_log feature requires the handle_errors feature";
1042 		return -EINVAL;
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 /*
1049  * Construct a mirror mapping:
1050  *
1051  * log_type #log_params <log_params>
1052  * #mirrors [mirror_path offset]{2,}
1053  * [#features <features>]
1054  *
1055  * log_type is "core" or "disk"
1056  * #log_params is between 1 and 3
1057  *
1058  * If present, supported features are "handle_errors" and "keep_log".
1059  */
mirror_ctr(struct dm_target * ti,unsigned int argc,char ** argv)1060 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1061 {
1062 	int r;
1063 	unsigned int nr_mirrors, m, args_used;
1064 	struct mirror_set *ms;
1065 	struct dm_dirty_log *dl;
1066 	char dummy;
1067 
1068 	dl = create_dirty_log(ti, argc, argv, &args_used);
1069 	if (!dl)
1070 		return -EINVAL;
1071 
1072 	argv += args_used;
1073 	argc -= args_used;
1074 
1075 	if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1076 	    nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
1077 		ti->error = "Invalid number of mirrors";
1078 		dm_dirty_log_destroy(dl);
1079 		return -EINVAL;
1080 	}
1081 
1082 	argv++, argc--;
1083 
1084 	if (argc < nr_mirrors * 2) {
1085 		ti->error = "Too few mirror arguments";
1086 		dm_dirty_log_destroy(dl);
1087 		return -EINVAL;
1088 	}
1089 
1090 	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1091 	if (!ms) {
1092 		dm_dirty_log_destroy(dl);
1093 		return -ENOMEM;
1094 	}
1095 
1096 	/* Get the mirror parameter sets */
1097 	for (m = 0; m < nr_mirrors; m++) {
1098 		r = get_mirror(ms, ti, m, argv);
1099 		if (r) {
1100 			free_context(ms, ti, m);
1101 			return r;
1102 		}
1103 		argv += 2;
1104 		argc -= 2;
1105 	}
1106 
1107 	ti->private = ms;
1108 
1109 	r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1110 	if (r)
1111 		goto err_free_context;
1112 
1113 	ti->num_flush_bios = 1;
1114 	ti->num_discard_bios = 1;
1115 	ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1116 
1117 	ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1118 	if (!ms->kmirrord_wq) {
1119 		DMERR("couldn't start kmirrord");
1120 		r = -ENOMEM;
1121 		goto err_free_context;
1122 	}
1123 	INIT_WORK(&ms->kmirrord_work, do_mirror);
1124 	timer_setup(&ms->timer, delayed_wake_fn, 0);
1125 	ms->timer_pending = 0;
1126 	INIT_WORK(&ms->trigger_event, trigger_event);
1127 
1128 	r = parse_features(ms, argc, argv, &args_used);
1129 	if (r)
1130 		goto err_destroy_wq;
1131 
1132 	argv += args_used;
1133 	argc -= args_used;
1134 
1135 	/*
1136 	 * Any read-balancing addition depends on the
1137 	 * DM_RAID1_HANDLE_ERRORS flag being present.
1138 	 * This is because the decision to balance depends
1139 	 * on the sync state of a region.  If the above
1140 	 * flag is not present, we ignore errors; and
1141 	 * the sync state may be inaccurate.
1142 	 */
1143 
1144 	if (argc) {
1145 		ti->error = "Too many mirror arguments";
1146 		r = -EINVAL;
1147 		goto err_destroy_wq;
1148 	}
1149 
1150 	ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1151 	if (IS_ERR(ms->kcopyd_client)) {
1152 		r = PTR_ERR(ms->kcopyd_client);
1153 		goto err_destroy_wq;
1154 	}
1155 
1156 	wakeup_mirrord(ms);
1157 	return 0;
1158 
1159 err_destroy_wq:
1160 	destroy_workqueue(ms->kmirrord_wq);
1161 err_free_context:
1162 	free_context(ms, ti, ms->nr_mirrors);
1163 	return r;
1164 }
1165 
mirror_dtr(struct dm_target * ti)1166 static void mirror_dtr(struct dm_target *ti)
1167 {
1168 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1169 
1170 	del_timer_sync(&ms->timer);
1171 	flush_workqueue(ms->kmirrord_wq);
1172 	flush_work(&ms->trigger_event);
1173 	dm_kcopyd_client_destroy(ms->kcopyd_client);
1174 	destroy_workqueue(ms->kmirrord_wq);
1175 	free_context(ms, ti, ms->nr_mirrors);
1176 }
1177 
1178 /*
1179  * Mirror mapping function
1180  */
mirror_map(struct dm_target * ti,struct bio * bio)1181 static int mirror_map(struct dm_target *ti, struct bio *bio)
1182 {
1183 	int r, rw = bio_data_dir(bio);
1184 	struct mirror *m;
1185 	struct mirror_set *ms = ti->private;
1186 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1187 	struct dm_raid1_bio_record *bio_record =
1188 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1189 
1190 	bio_record->details.bi_bdev = NULL;
1191 
1192 	if (rw == WRITE) {
1193 		/* Save region for mirror_end_io() handler */
1194 		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1195 		queue_bio(ms, bio, rw);
1196 		return DM_MAPIO_SUBMITTED;
1197 	}
1198 
1199 	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1200 	if (r < 0 && r != -EWOULDBLOCK)
1201 		return DM_MAPIO_KILL;
1202 
1203 	/*
1204 	 * If region is not in-sync queue the bio.
1205 	 */
1206 	if (!r || (r == -EWOULDBLOCK)) {
1207 		if (bio->bi_opf & REQ_RAHEAD)
1208 			return DM_MAPIO_KILL;
1209 
1210 		queue_bio(ms, bio, rw);
1211 		return DM_MAPIO_SUBMITTED;
1212 	}
1213 
1214 	/*
1215 	 * The region is in-sync and we can perform reads directly.
1216 	 * Store enough information so we can retry if it fails.
1217 	 */
1218 	m = choose_mirror(ms, bio->bi_iter.bi_sector);
1219 	if (unlikely(!m))
1220 		return DM_MAPIO_KILL;
1221 
1222 	dm_bio_record(&bio_record->details, bio);
1223 	bio_record->m = m;
1224 
1225 	map_bio(m, bio);
1226 
1227 	return DM_MAPIO_REMAPPED;
1228 }
1229 
mirror_end_io(struct dm_target * ti,struct bio * bio,blk_status_t * error)1230 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1231 		blk_status_t *error)
1232 {
1233 	int rw = bio_data_dir(bio);
1234 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1235 	struct mirror *m = NULL;
1236 	struct dm_bio_details *bd = NULL;
1237 	struct dm_raid1_bio_record *bio_record =
1238 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1239 
1240 	/*
1241 	 * We need to dec pending if this was a write.
1242 	 */
1243 	if (rw == WRITE) {
1244 		if (!(bio->bi_opf & REQ_PREFLUSH) &&
1245 		    bio_op(bio) != REQ_OP_DISCARD)
1246 			dm_rh_dec(ms->rh, bio_record->write_region);
1247 		return DM_ENDIO_DONE;
1248 	}
1249 
1250 	if (*error == BLK_STS_NOTSUPP)
1251 		goto out;
1252 
1253 	if (bio->bi_opf & REQ_RAHEAD)
1254 		goto out;
1255 
1256 	if (unlikely(*error)) {
1257 		if (!bio_record->details.bi_bdev) {
1258 			/*
1259 			 * There wasn't enough memory to record necessary
1260 			 * information for a retry or there was no other
1261 			 * mirror in-sync.
1262 			 */
1263 			DMERR_LIMIT("Mirror read failed.");
1264 			return DM_ENDIO_DONE;
1265 		}
1266 
1267 		m = bio_record->m;
1268 
1269 		DMERR("Mirror read failed from %s. Trying alternative device.",
1270 		      m->dev->name);
1271 
1272 		fail_mirror(m, DM_RAID1_READ_ERROR);
1273 
1274 		/*
1275 		 * A failed read is requeued for another attempt using an intact
1276 		 * mirror.
1277 		 */
1278 		if (default_ok(m) || mirror_available(ms, bio)) {
1279 			bd = &bio_record->details;
1280 
1281 			dm_bio_restore(bd, bio);
1282 			bio_record->details.bi_bdev = NULL;
1283 			bio->bi_status = 0;
1284 
1285 			queue_bio(ms, bio, rw);
1286 			return DM_ENDIO_INCOMPLETE;
1287 		}
1288 		DMERR("All replicated volumes dead, failing I/O");
1289 	}
1290 
1291 out:
1292 	bio_record->details.bi_bdev = NULL;
1293 
1294 	return DM_ENDIO_DONE;
1295 }
1296 
mirror_presuspend(struct dm_target * ti)1297 static void mirror_presuspend(struct dm_target *ti)
1298 {
1299 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1300 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1301 
1302 	struct bio_list holds;
1303 	struct bio *bio;
1304 
1305 	atomic_set(&ms->suspend, 1);
1306 
1307 	/*
1308 	 * Process bios in the hold list to start recovery waiting
1309 	 * for bios in the hold list. After the process, no bio has
1310 	 * a chance to be added in the hold list because ms->suspend
1311 	 * is set.
1312 	 */
1313 	spin_lock_irq(&ms->lock);
1314 	holds = ms->holds;
1315 	bio_list_init(&ms->holds);
1316 	spin_unlock_irq(&ms->lock);
1317 
1318 	while ((bio = bio_list_pop(&holds)))
1319 		hold_bio(ms, bio);
1320 
1321 	/*
1322 	 * We must finish up all the work that we've
1323 	 * generated (i.e. recovery work).
1324 	 */
1325 	dm_rh_stop_recovery(ms->rh);
1326 
1327 	wait_event(_kmirrord_recovery_stopped,
1328 		   !dm_rh_recovery_in_flight(ms->rh));
1329 
1330 	if (log->type->presuspend && log->type->presuspend(log))
1331 		/* FIXME: need better error handling */
1332 		DMWARN("log presuspend failed");
1333 
1334 	/*
1335 	 * Now that recovery is complete/stopped and the
1336 	 * delayed bios are queued, we need to wait for
1337 	 * the worker thread to complete.  This way,
1338 	 * we know that all of our I/O has been pushed.
1339 	 */
1340 	flush_workqueue(ms->kmirrord_wq);
1341 }
1342 
mirror_postsuspend(struct dm_target * ti)1343 static void mirror_postsuspend(struct dm_target *ti)
1344 {
1345 	struct mirror_set *ms = ti->private;
1346 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1347 
1348 	if (log->type->postsuspend && log->type->postsuspend(log))
1349 		/* FIXME: need better error handling */
1350 		DMWARN("log postsuspend failed");
1351 }
1352 
mirror_resume(struct dm_target * ti)1353 static void mirror_resume(struct dm_target *ti)
1354 {
1355 	struct mirror_set *ms = ti->private;
1356 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1357 
1358 	atomic_set(&ms->suspend, 0);
1359 	if (log->type->resume && log->type->resume(log))
1360 		/* FIXME: need better error handling */
1361 		DMWARN("log resume failed");
1362 	dm_rh_start_recovery(ms->rh);
1363 }
1364 
1365 /*
1366  * device_status_char
1367  * @m: mirror device/leg we want the status of
1368  *
1369  * We return one character representing the most severe error
1370  * we have encountered.
1371  *    A => Alive - No failures
1372  *    D => Dead - A write failure occurred leaving mirror out-of-sync
1373  *    S => Sync - A sychronization failure occurred, mirror out-of-sync
1374  *    R => Read - A read failure occurred, mirror data unaffected
1375  *
1376  * Returns: <char>
1377  */
device_status_char(struct mirror * m)1378 static char device_status_char(struct mirror *m)
1379 {
1380 	if (!atomic_read(&(m->error_count)))
1381 		return 'A';
1382 
1383 	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1384 		(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1385 		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1386 		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1387 }
1388 
1389 
mirror_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)1390 static void mirror_status(struct dm_target *ti, status_type_t type,
1391 			  unsigned int status_flags, char *result, unsigned int maxlen)
1392 {
1393 	unsigned int m, sz = 0;
1394 	int num_feature_args = 0;
1395 	struct mirror_set *ms = (struct mirror_set *) ti->private;
1396 	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1397 	char buffer[MAX_NR_MIRRORS + 1];
1398 
1399 	switch (type) {
1400 	case STATUSTYPE_INFO:
1401 		DMEMIT("%d ", ms->nr_mirrors);
1402 		for (m = 0; m < ms->nr_mirrors; m++) {
1403 			DMEMIT("%s ", ms->mirror[m].dev->name);
1404 			buffer[m] = device_status_char(&(ms->mirror[m]));
1405 		}
1406 		buffer[m] = '\0';
1407 
1408 		DMEMIT("%llu/%llu 1 %s ",
1409 		      (unsigned long long)log->type->get_sync_count(log),
1410 		      (unsigned long long)ms->nr_regions, buffer);
1411 
1412 		sz += log->type->status(log, type, result+sz, maxlen-sz);
1413 
1414 		break;
1415 
1416 	case STATUSTYPE_TABLE:
1417 		sz = log->type->status(log, type, result, maxlen);
1418 
1419 		DMEMIT("%d", ms->nr_mirrors);
1420 		for (m = 0; m < ms->nr_mirrors; m++)
1421 			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1422 			       (unsigned long long)ms->mirror[m].offset);
1423 
1424 		num_feature_args += !!errors_handled(ms);
1425 		num_feature_args += !!keep_log(ms);
1426 		if (num_feature_args) {
1427 			DMEMIT(" %d", num_feature_args);
1428 			if (errors_handled(ms))
1429 				DMEMIT(" handle_errors");
1430 			if (keep_log(ms))
1431 				DMEMIT(" keep_log");
1432 		}
1433 
1434 		break;
1435 
1436 	case STATUSTYPE_IMA:
1437 		DMEMIT_TARGET_NAME_VERSION(ti->type);
1438 		DMEMIT(",nr_mirrors=%d", ms->nr_mirrors);
1439 		for (m = 0; m < ms->nr_mirrors; m++) {
1440 			DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name);
1441 			DMEMIT(",mirror_device_%d_status=%c",
1442 			       m, device_status_char(&(ms->mirror[m])));
1443 		}
1444 
1445 		DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n');
1446 		DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n');
1447 
1448 		DMEMIT(",log_type_status=");
1449 		sz += log->type->status(log, type, result+sz, maxlen-sz);
1450 		DMEMIT(";");
1451 		break;
1452 	}
1453 }
1454 
mirror_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)1455 static int mirror_iterate_devices(struct dm_target *ti,
1456 				  iterate_devices_callout_fn fn, void *data)
1457 {
1458 	struct mirror_set *ms = ti->private;
1459 	int ret = 0;
1460 	unsigned int i;
1461 
1462 	for (i = 0; !ret && i < ms->nr_mirrors; i++)
1463 		ret = fn(ti, ms->mirror[i].dev,
1464 			 ms->mirror[i].offset, ti->len, data);
1465 
1466 	return ret;
1467 }
1468 
1469 static struct target_type mirror_target = {
1470 	.name	 = "mirror",
1471 	.version = {1, 14, 0},
1472 	.module	 = THIS_MODULE,
1473 	.ctr	 = mirror_ctr,
1474 	.dtr	 = mirror_dtr,
1475 	.map	 = mirror_map,
1476 	.end_io	 = mirror_end_io,
1477 	.presuspend = mirror_presuspend,
1478 	.postsuspend = mirror_postsuspend,
1479 	.resume	 = mirror_resume,
1480 	.status	 = mirror_status,
1481 	.iterate_devices = mirror_iterate_devices,
1482 };
1483 
dm_mirror_init(void)1484 static int __init dm_mirror_init(void)
1485 {
1486 	int r;
1487 
1488 	r = dm_register_target(&mirror_target);
1489 	if (r < 0) {
1490 		DMERR("Failed to register mirror target");
1491 		goto bad_target;
1492 	}
1493 
1494 	return 0;
1495 
1496 bad_target:
1497 	return r;
1498 }
1499 
dm_mirror_exit(void)1500 static void __exit dm_mirror_exit(void)
1501 {
1502 	dm_unregister_target(&mirror_target);
1503 }
1504 
1505 /* Module hooks */
1506 module_init(dm_mirror_init);
1507 module_exit(dm_mirror_exit);
1508 
1509 MODULE_DESCRIPTION(DM_NAME " mirror target");
1510 MODULE_AUTHOR("Joe Thornber");
1511 MODULE_LICENSE("GPL");
1512