• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    raid0.c : Multiple Devices driver for Linux
4 	     Copyright (C) 1994-96 Marc ZYNGIER
5 	     <zyngier@ufr-info-p7.ibp.fr> or
6 	     <maz@gloups.fdn.fr>
7 	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8 
9    RAID-0 management functions.
10 
11 */
12 
13 #include <linux/blkdev.h>
14 #include <linux/seq_file.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <trace/events/block.h>
18 #include "md.h"
19 #include "raid0.h"
20 #include "raid5.h"
21 
22 static int default_layout = 0;
23 module_param(default_layout, int, 0644);
24 
25 #define UNSUPPORTED_MDDEV_FLAGS		\
26 	((1L << MD_HAS_JOURNAL) |	\
27 	 (1L << MD_JOURNAL_CLEAN) |	\
28 	 (1L << MD_FAILFAST_SUPPORTED) |\
29 	 (1L << MD_HAS_PPL) |		\
30 	 (1L << MD_HAS_MULTIPLE_PPLS))
31 
32 /*
33  * inform the user of the raid configuration
34 */
dump_zones(struct mddev * mddev)35 static void dump_zones(struct mddev *mddev)
36 {
37 	int j, k;
38 	sector_t zone_size = 0;
39 	sector_t zone_start = 0;
40 	char b[BDEVNAME_SIZE];
41 	struct r0conf *conf = mddev->private;
42 	int raid_disks = conf->strip_zone[0].nb_dev;
43 	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
44 		 mdname(mddev),
45 		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
46 	for (j = 0; j < conf->nr_strip_zones; j++) {
47 		char line[200];
48 		int len = 0;
49 
50 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
51 			len += scnprintf(line+len, 200-len, "%s%s", k?"/":"",
52 					bdevname(conf->devlist[j*raid_disks
53 							       + k]->bdev, b));
54 		pr_debug("md: zone%d=[%s]\n", j, line);
55 
56 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
57 		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
58 			(unsigned long long)zone_start>>1,
59 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
60 			(unsigned long long)zone_size>>1);
61 		zone_start = conf->strip_zone[j].zone_end;
62 	}
63 }
64 
create_strip_zones(struct mddev * mddev,struct r0conf ** private_conf)65 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
66 {
67 	int i, c, err;
68 	sector_t curr_zone_end, sectors;
69 	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
70 	struct strip_zone *zone;
71 	int cnt;
72 	char b[BDEVNAME_SIZE];
73 	char b2[BDEVNAME_SIZE];
74 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
75 	unsigned blksize = 512;
76 
77 	*private_conf = ERR_PTR(-ENOMEM);
78 	if (!conf)
79 		return -ENOMEM;
80 	rdev_for_each(rdev1, mddev) {
81 		pr_debug("md/raid0:%s: looking at %s\n",
82 			 mdname(mddev),
83 			 bdevname(rdev1->bdev, b));
84 		c = 0;
85 
86 		/* round size to chunk_size */
87 		sectors = rdev1->sectors;
88 		sector_div(sectors, mddev->chunk_sectors);
89 		rdev1->sectors = sectors * mddev->chunk_sectors;
90 
91 		blksize = max(blksize, queue_logical_block_size(
92 				      rdev1->bdev->bd_disk->queue));
93 
94 		rdev_for_each(rdev2, mddev) {
95 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
96 				 " with %s(%llu)\n",
97 				 mdname(mddev),
98 				 bdevname(rdev1->bdev,b),
99 				 (unsigned long long)rdev1->sectors,
100 				 bdevname(rdev2->bdev,b2),
101 				 (unsigned long long)rdev2->sectors);
102 			if (rdev2 == rdev1) {
103 				pr_debug("md/raid0:%s:   END\n",
104 					 mdname(mddev));
105 				break;
106 			}
107 			if (rdev2->sectors == rdev1->sectors) {
108 				/*
109 				 * Not unique, don't count it as a new
110 				 * group
111 				 */
112 				pr_debug("md/raid0:%s:   EQUAL\n",
113 					 mdname(mddev));
114 				c = 1;
115 				break;
116 			}
117 			pr_debug("md/raid0:%s:   NOT EQUAL\n",
118 				 mdname(mddev));
119 		}
120 		if (!c) {
121 			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
122 				 mdname(mddev));
123 			conf->nr_strip_zones++;
124 			pr_debug("md/raid0:%s: %d zones\n",
125 				 mdname(mddev), conf->nr_strip_zones);
126 		}
127 	}
128 	pr_debug("md/raid0:%s: FINAL %d zones\n",
129 		 mdname(mddev), conf->nr_strip_zones);
130 
131 	/*
132 	 * now since we have the hard sector sizes, we can make sure
133 	 * chunk size is a multiple of that sector size
134 	 */
135 	if ((mddev->chunk_sectors << 9) % blksize) {
136 		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
137 			mdname(mddev),
138 			mddev->chunk_sectors << 9, blksize);
139 		err = -EINVAL;
140 		goto abort;
141 	}
142 
143 	err = -ENOMEM;
144 	conf->strip_zone = kcalloc(conf->nr_strip_zones,
145 				   sizeof(struct strip_zone),
146 				   GFP_KERNEL);
147 	if (!conf->strip_zone)
148 		goto abort;
149 	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
150 					    conf->nr_strip_zones,
151 					    mddev->raid_disks),
152 				GFP_KERNEL);
153 	if (!conf->devlist)
154 		goto abort;
155 
156 	/* The first zone must contain all devices, so here we check that
157 	 * there is a proper alignment of slots to devices and find them all
158 	 */
159 	zone = &conf->strip_zone[0];
160 	cnt = 0;
161 	smallest = NULL;
162 	dev = conf->devlist;
163 	err = -EINVAL;
164 	rdev_for_each(rdev1, mddev) {
165 		int j = rdev1->raid_disk;
166 
167 		if (mddev->level == 10) {
168 			/* taking over a raid10-n2 array */
169 			j /= 2;
170 			rdev1->new_raid_disk = j;
171 		}
172 
173 		if (mddev->level == 1) {
174 			/* taiking over a raid1 array-
175 			 * we have only one active disk
176 			 */
177 			j = 0;
178 			rdev1->new_raid_disk = j;
179 		}
180 
181 		if (j < 0) {
182 			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
183 				mdname(mddev));
184 			goto abort;
185 		}
186 		if (j >= mddev->raid_disks) {
187 			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
188 				mdname(mddev), j);
189 			goto abort;
190 		}
191 		if (dev[j]) {
192 			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
193 				mdname(mddev), j);
194 			goto abort;
195 		}
196 		dev[j] = rdev1;
197 
198 		if (!smallest || (rdev1->sectors < smallest->sectors))
199 			smallest = rdev1;
200 		cnt++;
201 	}
202 	if (cnt != mddev->raid_disks) {
203 		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
204 			mdname(mddev), cnt, mddev->raid_disks);
205 		goto abort;
206 	}
207 	zone->nb_dev = cnt;
208 	zone->zone_end = smallest->sectors * cnt;
209 
210 	curr_zone_end = zone->zone_end;
211 
212 	/* now do the other zones */
213 	for (i = 1; i < conf->nr_strip_zones; i++)
214 	{
215 		int j;
216 
217 		zone = conf->strip_zone + i;
218 		dev = conf->devlist + i * mddev->raid_disks;
219 
220 		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
221 		zone->dev_start = smallest->sectors;
222 		smallest = NULL;
223 		c = 0;
224 
225 		for (j=0; j<cnt; j++) {
226 			rdev = conf->devlist[j];
227 			if (rdev->sectors <= zone->dev_start) {
228 				pr_debug("md/raid0:%s: checking %s ... nope\n",
229 					 mdname(mddev),
230 					 bdevname(rdev->bdev, b));
231 				continue;
232 			}
233 			pr_debug("md/raid0:%s: checking %s ..."
234 				 " contained as device %d\n",
235 				 mdname(mddev),
236 				 bdevname(rdev->bdev, b), c);
237 			dev[c] = rdev;
238 			c++;
239 			if (!smallest || rdev->sectors < smallest->sectors) {
240 				smallest = rdev;
241 				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
242 					 mdname(mddev),
243 					 (unsigned long long)rdev->sectors);
244 			}
245 		}
246 
247 		zone->nb_dev = c;
248 		sectors = (smallest->sectors - zone->dev_start) * c;
249 		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
250 			 mdname(mddev),
251 			 zone->nb_dev, (unsigned long long)sectors);
252 
253 		curr_zone_end += sectors;
254 		zone->zone_end = curr_zone_end;
255 
256 		pr_debug("md/raid0:%s: current zone start: %llu\n",
257 			 mdname(mddev),
258 			 (unsigned long long)smallest->sectors);
259 	}
260 
261 	if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
262 		conf->layout = RAID0_ORIG_LAYOUT;
263 	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
264 		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
265 		conf->layout = mddev->layout;
266 	} else if (default_layout == RAID0_ORIG_LAYOUT ||
267 		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
268 		conf->layout = default_layout;
269 	} else {
270 		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
271 		       mdname(mddev));
272 		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
273 		err = -EOPNOTSUPP;
274 		goto abort;
275 	}
276 
277 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
278 	*private_conf = conf;
279 
280 	return 0;
281 abort:
282 	kfree(conf->strip_zone);
283 	kfree(conf->devlist);
284 	kfree(conf);
285 	*private_conf = ERR_PTR(err);
286 	return err;
287 }
288 
289 /* Find the zone which holds a particular offset
290  * Update *sectorp to be an offset in that zone
291  */
find_zone(struct r0conf * conf,sector_t * sectorp)292 static struct strip_zone *find_zone(struct r0conf *conf,
293 				    sector_t *sectorp)
294 {
295 	int i;
296 	struct strip_zone *z = conf->strip_zone;
297 	sector_t sector = *sectorp;
298 
299 	for (i = 0; i < conf->nr_strip_zones; i++)
300 		if (sector < z[i].zone_end) {
301 			if (i)
302 				*sectorp = sector - z[i-1].zone_end;
303 			return z + i;
304 		}
305 	BUG();
306 }
307 
308 /*
309  * remaps the bio to the target device. we separate two flows.
310  * power 2 flow and a general flow for the sake of performance
311 */
map_sector(struct mddev * mddev,struct strip_zone * zone,sector_t sector,sector_t * sector_offset)312 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
313 				sector_t sector, sector_t *sector_offset)
314 {
315 	unsigned int sect_in_chunk;
316 	sector_t chunk;
317 	struct r0conf *conf = mddev->private;
318 	int raid_disks = conf->strip_zone[0].nb_dev;
319 	unsigned int chunk_sects = mddev->chunk_sectors;
320 
321 	if (is_power_of_2(chunk_sects)) {
322 		int chunksect_bits = ffz(~chunk_sects);
323 		/* find the sector offset inside the chunk */
324 		sect_in_chunk  = sector & (chunk_sects - 1);
325 		sector >>= chunksect_bits;
326 		/* chunk in zone */
327 		chunk = *sector_offset;
328 		/* quotient is the chunk in real device*/
329 		sector_div(chunk, zone->nb_dev << chunksect_bits);
330 	} else{
331 		sect_in_chunk = sector_div(sector, chunk_sects);
332 		chunk = *sector_offset;
333 		sector_div(chunk, chunk_sects * zone->nb_dev);
334 	}
335 	/*
336 	*  position the bio over the real device
337 	*  real sector = chunk in device + starting of zone
338 	*	+ the position in the chunk
339 	*/
340 	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
341 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
342 			     + sector_div(sector, zone->nb_dev)];
343 }
344 
raid0_size(struct mddev * mddev,sector_t sectors,int raid_disks)345 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
346 {
347 	sector_t array_sectors = 0;
348 	struct md_rdev *rdev;
349 
350 	WARN_ONCE(sectors || raid_disks,
351 		  "%s does not support generic reshape\n", __func__);
352 
353 	rdev_for_each(rdev, mddev)
354 		array_sectors += (rdev->sectors &
355 				  ~(sector_t)(mddev->chunk_sectors-1));
356 
357 	return array_sectors;
358 }
359 
360 static void raid0_free(struct mddev *mddev, void *priv);
361 
raid0_run(struct mddev * mddev)362 static int raid0_run(struct mddev *mddev)
363 {
364 	struct r0conf *conf;
365 	int ret;
366 
367 	if (mddev->chunk_sectors == 0) {
368 		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
369 		return -EINVAL;
370 	}
371 	if (md_check_no_bitmap(mddev))
372 		return -EINVAL;
373 
374 	/* if private is not null, we are here after takeover */
375 	if (mddev->private == NULL) {
376 		ret = create_strip_zones(mddev, &conf);
377 		if (ret < 0)
378 			return ret;
379 		mddev->private = conf;
380 	}
381 	conf = mddev->private;
382 	if (mddev->queue) {
383 		struct md_rdev *rdev;
384 		bool discard_supported = false;
385 
386 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
387 		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
388 		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
389 		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
390 
391 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
392 		blk_queue_io_opt(mddev->queue,
393 				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
394 
395 		rdev_for_each(rdev, mddev) {
396 			disk_stack_limits(mddev->gendisk, rdev->bdev,
397 					  rdev->data_offset << 9);
398 			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
399 				discard_supported = true;
400 		}
401 		if (!discard_supported)
402 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
403 		else
404 			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
405 	}
406 
407 	/* calculate array device size */
408 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
409 
410 	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
411 		 mdname(mddev),
412 		 (unsigned long long)mddev->array_sectors);
413 
414 	dump_zones(mddev);
415 
416 	ret = md_integrity_register(mddev);
417 
418 	return ret;
419 }
420 
raid0_free(struct mddev * mddev,void * priv)421 static void raid0_free(struct mddev *mddev, void *priv)
422 {
423 	struct r0conf *conf = priv;
424 
425 	kfree(conf->strip_zone);
426 	kfree(conf->devlist);
427 	kfree(conf);
428 }
429 
raid0_handle_discard(struct mddev * mddev,struct bio * bio)430 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
431 {
432 	struct r0conf *conf = mddev->private;
433 	struct strip_zone *zone;
434 	sector_t start = bio->bi_iter.bi_sector;
435 	sector_t end;
436 	unsigned int stripe_size;
437 	sector_t first_stripe_index, last_stripe_index;
438 	sector_t start_disk_offset;
439 	unsigned int start_disk_index;
440 	sector_t end_disk_offset;
441 	unsigned int end_disk_index;
442 	unsigned int disk;
443 
444 	zone = find_zone(conf, &start);
445 
446 	if (bio_end_sector(bio) > zone->zone_end) {
447 		struct bio *split = bio_split(bio,
448 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
449 			&mddev->bio_set);
450 		bio_chain(split, bio);
451 		submit_bio_noacct(bio);
452 		bio = split;
453 		end = zone->zone_end;
454 	} else
455 		end = bio_end_sector(bio);
456 
457 	if (zone != conf->strip_zone)
458 		end = end - zone[-1].zone_end;
459 
460 	/* Now start and end is the offset in zone */
461 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
462 
463 	first_stripe_index = start;
464 	sector_div(first_stripe_index, stripe_size);
465 	last_stripe_index = end;
466 	sector_div(last_stripe_index, stripe_size);
467 
468 	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
469 		mddev->chunk_sectors;
470 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
471 		mddev->chunk_sectors) +
472 		first_stripe_index * mddev->chunk_sectors;
473 	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
474 		mddev->chunk_sectors;
475 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
476 		mddev->chunk_sectors) +
477 		last_stripe_index * mddev->chunk_sectors;
478 
479 	for (disk = 0; disk < zone->nb_dev; disk++) {
480 		sector_t dev_start, dev_end;
481 		struct bio *discard_bio = NULL;
482 		struct md_rdev *rdev;
483 
484 		if (disk < start_disk_index)
485 			dev_start = (first_stripe_index + 1) *
486 				mddev->chunk_sectors;
487 		else if (disk > start_disk_index)
488 			dev_start = first_stripe_index * mddev->chunk_sectors;
489 		else
490 			dev_start = start_disk_offset;
491 
492 		if (disk < end_disk_index)
493 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
494 		else if (disk > end_disk_index)
495 			dev_end = last_stripe_index * mddev->chunk_sectors;
496 		else
497 			dev_end = end_disk_offset;
498 
499 		if (dev_end <= dev_start)
500 			continue;
501 
502 		rdev = conf->devlist[(zone - conf->strip_zone) *
503 			conf->strip_zone[0].nb_dev + disk];
504 		if (__blkdev_issue_discard(rdev->bdev,
505 			dev_start + zone->dev_start + rdev->data_offset,
506 			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
507 		    !discard_bio)
508 			continue;
509 		bio_chain(discard_bio, bio);
510 		bio_clone_blkg_association(discard_bio, bio);
511 		if (mddev->gendisk)
512 			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
513 				discard_bio, disk_devt(mddev->gendisk),
514 				bio->bi_iter.bi_sector);
515 		submit_bio_noacct(discard_bio);
516 	}
517 	bio_endio(bio);
518 }
519 
raid0_make_request(struct mddev * mddev,struct bio * bio)520 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
521 {
522 	struct r0conf *conf = mddev->private;
523 	struct strip_zone *zone;
524 	struct md_rdev *tmp_dev;
525 	sector_t bio_sector;
526 	sector_t sector;
527 	sector_t orig_sector;
528 	unsigned chunk_sects;
529 	unsigned sectors;
530 
531 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
532 	    && md_flush_request(mddev, bio))
533 		return true;
534 
535 	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
536 		raid0_handle_discard(mddev, bio);
537 		return true;
538 	}
539 
540 	bio_sector = bio->bi_iter.bi_sector;
541 	sector = bio_sector;
542 	chunk_sects = mddev->chunk_sectors;
543 
544 	sectors = chunk_sects -
545 		(likely(is_power_of_2(chunk_sects))
546 		 ? (sector & (chunk_sects-1))
547 		 : sector_div(sector, chunk_sects));
548 
549 	/* Restore due to sector_div */
550 	sector = bio_sector;
551 
552 	if (sectors < bio_sectors(bio)) {
553 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
554 					      &mddev->bio_set);
555 		bio_chain(split, bio);
556 		submit_bio_noacct(bio);
557 		bio = split;
558 	}
559 
560 	orig_sector = sector;
561 	zone = find_zone(mddev->private, &sector);
562 	switch (conf->layout) {
563 	case RAID0_ORIG_LAYOUT:
564 		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
565 		break;
566 	case RAID0_ALT_MULTIZONE_LAYOUT:
567 		tmp_dev = map_sector(mddev, zone, sector, &sector);
568 		break;
569 	default:
570 		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
571 		bio_io_error(bio);
572 		return true;
573 	}
574 
575 	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
576 		bio_io_error(bio);
577 		return true;
578 	}
579 
580 	bio_set_dev(bio, tmp_dev->bdev);
581 	bio->bi_iter.bi_sector = sector + zone->dev_start +
582 		tmp_dev->data_offset;
583 
584 	if (mddev->gendisk)
585 		trace_block_bio_remap(bio->bi_disk->queue, bio,
586 				disk_devt(mddev->gendisk), bio_sector);
587 	mddev_check_writesame(mddev, bio);
588 	mddev_check_write_zeroes(mddev, bio);
589 	submit_bio_noacct(bio);
590 	return true;
591 }
592 
raid0_status(struct seq_file * seq,struct mddev * mddev)593 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
594 {
595 	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
596 	return;
597 }
598 
raid0_takeover_raid45(struct mddev * mddev)599 static void *raid0_takeover_raid45(struct mddev *mddev)
600 {
601 	struct md_rdev *rdev;
602 	struct r0conf *priv_conf;
603 
604 	if (mddev->degraded != 1) {
605 		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
606 			mdname(mddev),
607 			mddev->degraded);
608 		return ERR_PTR(-EINVAL);
609 	}
610 
611 	rdev_for_each(rdev, mddev) {
612 		/* check slot number for a disk */
613 		if (rdev->raid_disk == mddev->raid_disks-1) {
614 			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
615 				mdname(mddev));
616 			return ERR_PTR(-EINVAL);
617 		}
618 		rdev->sectors = mddev->dev_sectors;
619 	}
620 
621 	/* Set new parameters */
622 	mddev->new_level = 0;
623 	mddev->new_layout = 0;
624 	mddev->new_chunk_sectors = mddev->chunk_sectors;
625 	mddev->raid_disks--;
626 	mddev->delta_disks = -1;
627 	/* make sure it will be not marked as dirty */
628 	mddev->recovery_cp = MaxSector;
629 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
630 
631 	create_strip_zones(mddev, &priv_conf);
632 
633 	return priv_conf;
634 }
635 
raid0_takeover_raid10(struct mddev * mddev)636 static void *raid0_takeover_raid10(struct mddev *mddev)
637 {
638 	struct r0conf *priv_conf;
639 
640 	/* Check layout:
641 	 *  - far_copies must be 1
642 	 *  - near_copies must be 2
643 	 *  - disks number must be even
644 	 *  - all mirrors must be already degraded
645 	 */
646 	if (mddev->layout != ((1 << 8) + 2)) {
647 		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
648 			mdname(mddev),
649 			mddev->layout);
650 		return ERR_PTR(-EINVAL);
651 	}
652 	if (mddev->raid_disks & 1) {
653 		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
654 			mdname(mddev));
655 		return ERR_PTR(-EINVAL);
656 	}
657 	if (mddev->degraded != (mddev->raid_disks>>1)) {
658 		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
659 			mdname(mddev));
660 		return ERR_PTR(-EINVAL);
661 	}
662 
663 	/* Set new parameters */
664 	mddev->new_level = 0;
665 	mddev->new_layout = 0;
666 	mddev->new_chunk_sectors = mddev->chunk_sectors;
667 	mddev->delta_disks = - mddev->raid_disks / 2;
668 	mddev->raid_disks += mddev->delta_disks;
669 	mddev->degraded = 0;
670 	/* make sure it will be not marked as dirty */
671 	mddev->recovery_cp = MaxSector;
672 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
673 
674 	create_strip_zones(mddev, &priv_conf);
675 	return priv_conf;
676 }
677 
raid0_takeover_raid1(struct mddev * mddev)678 static void *raid0_takeover_raid1(struct mddev *mddev)
679 {
680 	struct r0conf *priv_conf;
681 	int chunksect;
682 
683 	/* Check layout:
684 	 *  - (N - 1) mirror drives must be already faulty
685 	 */
686 	if ((mddev->raid_disks - 1) != mddev->degraded) {
687 		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
688 		       mdname(mddev));
689 		return ERR_PTR(-EINVAL);
690 	}
691 
692 	/*
693 	 * a raid1 doesn't have the notion of chunk size, so
694 	 * figure out the largest suitable size we can use.
695 	 */
696 	chunksect = 64 * 2; /* 64K by default */
697 
698 	/* The array must be an exact multiple of chunksize */
699 	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
700 		chunksect >>= 1;
701 
702 	if ((chunksect << 9) < PAGE_SIZE)
703 		/* array size does not allow a suitable chunk size */
704 		return ERR_PTR(-EINVAL);
705 
706 	/* Set new parameters */
707 	mddev->new_level = 0;
708 	mddev->new_layout = 0;
709 	mddev->new_chunk_sectors = chunksect;
710 	mddev->chunk_sectors = chunksect;
711 	mddev->delta_disks = 1 - mddev->raid_disks;
712 	mddev->raid_disks = 1;
713 	/* make sure it will be not marked as dirty */
714 	mddev->recovery_cp = MaxSector;
715 	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
716 
717 	create_strip_zones(mddev, &priv_conf);
718 	return priv_conf;
719 }
720 
raid0_takeover(struct mddev * mddev)721 static void *raid0_takeover(struct mddev *mddev)
722 {
723 	/* raid0 can take over:
724 	 *  raid4 - if all data disks are active.
725 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
726 	 *  raid10 - assuming we have all necessary active disks
727 	 *  raid1 - with (N -1) mirror drives faulty
728 	 */
729 
730 	if (mddev->bitmap) {
731 		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
732 			mdname(mddev));
733 		return ERR_PTR(-EBUSY);
734 	}
735 	if (mddev->level == 4)
736 		return raid0_takeover_raid45(mddev);
737 
738 	if (mddev->level == 5) {
739 		if (mddev->layout == ALGORITHM_PARITY_N)
740 			return raid0_takeover_raid45(mddev);
741 
742 		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
743 			mdname(mddev), ALGORITHM_PARITY_N);
744 	}
745 
746 	if (mddev->level == 10)
747 		return raid0_takeover_raid10(mddev);
748 
749 	if (mddev->level == 1)
750 		return raid0_takeover_raid1(mddev);
751 
752 	pr_warn("Takeover from raid%i to raid0 not supported\n",
753 		mddev->level);
754 
755 	return ERR_PTR(-EINVAL);
756 }
757 
raid0_quiesce(struct mddev * mddev,int quiesce)758 static void raid0_quiesce(struct mddev *mddev, int quiesce)
759 {
760 }
761 
762 static struct md_personality raid0_personality=
763 {
764 	.name		= "raid0",
765 	.level		= 0,
766 	.owner		= THIS_MODULE,
767 	.make_request	= raid0_make_request,
768 	.run		= raid0_run,
769 	.free		= raid0_free,
770 	.status		= raid0_status,
771 	.size		= raid0_size,
772 	.takeover	= raid0_takeover,
773 	.quiesce	= raid0_quiesce,
774 };
775 
raid0_init(void)776 static int __init raid0_init (void)
777 {
778 	return register_md_personality (&raid0_personality);
779 }
780 
raid0_exit(void)781 static void raid0_exit (void)
782 {
783 	unregister_md_personality (&raid0_personality);
784 }
785 
786 module_init(raid0_init);
787 module_exit(raid0_exit);
788 MODULE_LICENSE("GPL");
789 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
790 MODULE_ALIAS("md-personality-2"); /* RAID0 */
791 MODULE_ALIAS("md-raid0");
792 MODULE_ALIAS("md-level-0");
793