1 #ifndef _RAID5_H
2 #define _RAID5_H
3
4 #include <linux/raid/xor.h>
5 #include <linux/dmaengine.h>
6
7 /*
8 *
9 * Each stripe contains one buffer per device. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under the protection of the
12 * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by STRIPE_ACTIVE.
14 *
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
17 *
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
26 *
27 * The possible state transitions are:
28 *
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
37 *
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
42 * Want->Dirty->Clean
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
46 *
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distinguish these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
56 * complete.
57 *
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
63 *
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
72 *
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
78 *
79 * The write list and read list both act as fifos. The read list,
80 * write list and written list are protected by the device_lock.
81 * The device_lock is only for list manipulations and will only be
82 * held for a very short time. It can be claimed from interrupts.
83 *
84 *
85 * Stripes in the stripe cache can be on one of two lists (or on
86 * neither). The "inactive_list" contains stripes which are not
87 * currently being used for any request. They can freely be reused
88 * for another stripe. The "handle_list" contains stripes that need
89 * to be handled in some way. Both of these are fifo queues. Each
90 * stripe is also (potentially) linked to a hash bucket in the hash
91 * table so that it can be found by sector number. Stripes that are
92 * not hashed must be on the inactive_list, and will normally be at
93 * the front. All stripes start life this way.
94 *
95 * The inactive_list, handle_list and hash bucket lists are all protected by the
96 * device_lock.
97 * - stripes have a reference counter. If count==0, they are on a list.
98 * - If a stripe might need handling, STRIPE_HANDLE is set.
99 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
100 * handle_list else inactive_list
101 *
102 * This, combined with the fact that STRIPE_HANDLE is only ever
103 * cleared while a stripe has a non-zero count means that if the
104 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
105 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
106 * the stripe is on inactive_list.
107 *
108 * The possible transitions are:
109 * activate an unhashed/inactive stripe (get_active_stripe())
110 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
111 * activate a hashed, possibly active stripe (get_active_stripe())
112 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
113 * attach a request to an active stripe (add_stripe_bh())
114 * lockdev attach-buffer unlockdev
115 * handle a stripe (handle_stripe())
116 * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
117 * (lockdev check-buffers unlockdev) ..
118 * change-state ..
119 * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
120 * release an active stripe (release_stripe())
121 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
122 *
123 * The refcount counts each thread that have activated the stripe,
124 * plus raid5d if it is handling it, plus one for each active request
125 * on a cached buffer, and plus one if the stripe is undergoing stripe
126 * operations.
127 *
128 * The stripe operations are:
129 * -copying data between the stripe cache and user application buffers
130 * -computing blocks to save a disk access, or to recover a missing block
131 * -updating the parity on a write operation (reconstruct write and
132 * read-modify-write)
133 * -checking parity correctness
134 * -running i/o to disk
135 * These operations are carried out by raid5_run_ops which uses the async_tx
136 * api to (optionally) offload operations to dedicated hardware engines.
137 * When requesting an operation handle_stripe sets the pending bit for the
138 * operation and increments the count. raid5_run_ops is then run whenever
139 * the count is non-zero.
140 * There are some critical dependencies between the operations that prevent some
141 * from being requested while another is in flight.
142 * 1/ Parity check operations destroy the in cache version of the parity block,
143 * so we prevent parity dependent operations like writes and compute_blocks
144 * from starting while a check is in progress. Some dma engines can perform
145 * the check without damaging the parity block, in these cases the parity
146 * block is re-marked up to date (assuming the check was successful) and is
147 * not re-read from disk.
148 * 2/ When a write operation is requested we immediately lock the affected
149 * blocks, and mark them as not up to date. This causes new read requests
150 * to be held off, as well as parity checks and compute block operations.
151 * 3/ Once a compute block operation has been requested handle_stripe treats
152 * that block as if it is up to date. raid5_run_ops guaruntees that any
153 * operation that is dependent on the compute block result is initiated after
154 * the compute block completes.
155 */
156
157 /*
158 * Operations state - intermediate states that are visible outside of
159 * STRIPE_ACTIVE.
160 * In general _idle indicates nothing is running, _run indicates a data
161 * processing operation is active, and _result means the data processing result
162 * is stable and can be acted upon. For simple operations like biofill and
163 * compute that only have an _idle and _run state they are indicated with
164 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
165 */
166 /**
167 * enum check_states - handles syncing / repairing a stripe
168 * @check_state_idle - check operations are quiesced
169 * @check_state_run - check operation is running
170 * @check_state_result - set outside lock when check result is valid
171 * @check_state_compute_run - check failed and we are repairing
172 * @check_state_compute_result - set outside lock when compute result is valid
173 */
174 enum check_states {
175 check_state_idle = 0,
176 check_state_run, /* xor parity check */
177 check_state_run_q, /* q-parity check */
178 check_state_run_pq, /* pq dual parity check */
179 check_state_check_result,
180 check_state_compute_run, /* parity repair */
181 check_state_compute_result,
182 };
183
184 /**
185 * enum reconstruct_states - handles writing or expanding a stripe
186 */
187 enum reconstruct_states {
188 reconstruct_state_idle = 0,
189 reconstruct_state_prexor_drain_run, /* prexor-write */
190 reconstruct_state_drain_run, /* write */
191 reconstruct_state_run, /* expand */
192 reconstruct_state_prexor_drain_result,
193 reconstruct_state_drain_result,
194 reconstruct_state_result,
195 };
196
197 struct stripe_head {
198 struct hlist_node hash;
199 struct list_head lru; /* inactive_list or handle_list */
200 struct llist_node release_list;
201 struct r5conf *raid_conf;
202 short generation; /* increments with every
203 * reshape */
204 sector_t sector; /* sector of this row */
205 short pd_idx; /* parity disk index */
206 short qd_idx; /* 'Q' disk index for raid6 */
207 short ddf_layout;/* use DDF ordering to calculate Q */
208 short hash_lock_index;
209 unsigned long state; /* state flags */
210 atomic_t count; /* nr of active thread/requests */
211 int bm_seq; /* sequence number for bitmap flushes */
212 int disks; /* disks in stripe */
213 int overwrite_disks; /* total overwrite disks in stripe,
214 * this is only checked when stripe
215 * has STRIPE_BATCH_READY
216 */
217 enum check_states check_state;
218 enum reconstruct_states reconstruct_state;
219 spinlock_t stripe_lock;
220 int cpu;
221 struct r5worker_group *group;
222
223 struct stripe_head *batch_head; /* protected by stripe lock */
224 spinlock_t batch_lock; /* only header's lock is useful */
225 struct list_head batch_list; /* protected by head's batch lock*/
226
227 struct r5l_io_unit *log_io;
228 struct list_head log_list;
229 /**
230 * struct stripe_operations
231 * @target - STRIPE_OP_COMPUTE_BLK target
232 * @target2 - 2nd compute target in the raid6 case
233 * @zero_sum_result - P and Q verification flags
234 * @request - async service request flags for raid_run_ops
235 */
236 struct stripe_operations {
237 int target, target2;
238 enum sum_check_flags zero_sum_result;
239 } ops;
240 struct r5dev {
241 /* rreq and rvec are used for the replacement device when
242 * writing data to both devices.
243 */
244 struct bio req, rreq;
245 struct bio_vec vec, rvec;
246 struct page *page, *orig_page;
247 struct bio *toread, *read, *towrite, *written;
248 sector_t sector; /* sector of this page */
249 unsigned long flags;
250 u32 log_checksum;
251 } dev[1]; /* allocated with extra space depending of RAID geometry */
252 };
253
254 /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
255 * for handle_stripe.
256 */
257 struct stripe_head_state {
258 /* 'syncing' means that we need to read all devices, either
259 * to check/correct parity, or to reconstruct a missing device.
260 * 'replacing' means we are replacing one or more drives and
261 * the source is valid at this point so we don't need to
262 * read all devices, just the replacement targets.
263 */
264 int syncing, expanding, expanded, replacing;
265 int locked, uptodate, to_read, to_write, failed, written;
266 int to_fill, compute, req_compute, non_overwrite;
267 int failed_num[2];
268 int p_failed, q_failed;
269 int dec_preread_active;
270 unsigned long ops_request;
271
272 struct bio_list return_bi;
273 struct md_rdev *blocked_rdev;
274 int handle_bad_blocks;
275 int log_failed;
276 };
277
278 /* Flags for struct r5dev.flags */
279 enum r5dev_flags {
280 R5_UPTODATE, /* page contains current data */
281 R5_LOCKED, /* IO has been submitted on "req" */
282 R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
283 R5_OVERWRITE, /* towrite covers whole page */
284 /* and some that are internal to handle_stripe */
285 R5_Insync, /* rdev && rdev->in_sync at start */
286 R5_Wantread, /* want to schedule a read */
287 R5_Wantwrite,
288 R5_Overlap, /* There is a pending overlapping request
289 * on this block */
290 R5_ReadNoMerge, /* prevent bio from merging in block-layer */
291 R5_ReadError, /* seen a read error here recently */
292 R5_ReWrite, /* have tried to over-write the readerror */
293
294 R5_Expanded, /* This block now has post-expand data */
295 R5_Wantcompute, /* compute_block in progress treat as
296 * uptodate
297 */
298 R5_Wantfill, /* dev->toread contains a bio that needs
299 * filling
300 */
301 R5_Wantdrain, /* dev->towrite needs to be drained */
302 R5_WantFUA, /* Write should be FUA */
303 R5_SyncIO, /* The IO is sync */
304 R5_WriteError, /* got a write error - need to record it */
305 R5_MadeGood, /* A bad block has been fixed by writing to it */
306 R5_ReadRepl, /* Will/did read from replacement rather than orig */
307 R5_MadeGoodRepl,/* A bad block on the replacement device has been
308 * fixed by writing to it */
309 R5_NeedReplace, /* This device has a replacement which is not
310 * up-to-date at this stripe. */
311 R5_WantReplace, /* We need to update the replacement, we have read
312 * data in, and now is a good time to write it out.
313 */
314 R5_Discard, /* Discard the stripe */
315 R5_SkipCopy, /* Don't copy data from bio to stripe cache */
316 };
317
318 /*
319 * Stripe state
320 */
321 enum {
322 STRIPE_ACTIVE,
323 STRIPE_HANDLE,
324 STRIPE_SYNC_REQUESTED,
325 STRIPE_SYNCING,
326 STRIPE_INSYNC,
327 STRIPE_REPLACED,
328 STRIPE_PREREAD_ACTIVE,
329 STRIPE_DELAYED,
330 STRIPE_DEGRADED,
331 STRIPE_BIT_DELAY,
332 STRIPE_EXPANDING,
333 STRIPE_EXPAND_SOURCE,
334 STRIPE_EXPAND_READY,
335 STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
336 STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
337 STRIPE_BIOFILL_RUN,
338 STRIPE_COMPUTE_RUN,
339 STRIPE_OPS_REQ_PENDING,
340 STRIPE_ON_UNPLUG_LIST,
341 STRIPE_DISCARD,
342 STRIPE_ON_RELEASE_LIST,
343 STRIPE_BATCH_READY,
344 STRIPE_BATCH_ERR,
345 STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
346 * to batch yet.
347 */
348 STRIPE_LOG_TRAPPED, /* trapped into log */
349 };
350
351 #define STRIPE_EXPAND_SYNC_FLAGS \
352 ((1 << STRIPE_EXPAND_SOURCE) |\
353 (1 << STRIPE_EXPAND_READY) |\
354 (1 << STRIPE_EXPANDING) |\
355 (1 << STRIPE_SYNC_REQUESTED))
356 /*
357 * Operation request flags
358 */
359 enum {
360 STRIPE_OP_BIOFILL,
361 STRIPE_OP_COMPUTE_BLK,
362 STRIPE_OP_PREXOR,
363 STRIPE_OP_BIODRAIN,
364 STRIPE_OP_RECONSTRUCT,
365 STRIPE_OP_CHECK,
366 };
367
368 /*
369 * RAID parity calculation preferences
370 */
371 enum {
372 PARITY_DISABLE_RMW = 0,
373 PARITY_ENABLE_RMW,
374 PARITY_PREFER_RMW,
375 };
376
377 /*
378 * Pages requested from set_syndrome_sources()
379 */
380 enum {
381 SYNDROME_SRC_ALL,
382 SYNDROME_SRC_WANT_DRAIN,
383 SYNDROME_SRC_WRITTEN,
384 };
385 /*
386 * Plugging:
387 *
388 * To improve write throughput, we need to delay the handling of some
389 * stripes until there has been a chance that several write requests
390 * for the one stripe have all been collected.
391 * In particular, any write request that would require pre-reading
392 * is put on a "delayed" queue until there are no stripes currently
393 * in a pre-read phase. Further, if the "delayed" queue is empty when
394 * a stripe is put on it then we "plug" the queue and do not process it
395 * until an unplug call is made. (the unplug_io_fn() is called).
396 *
397 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
398 * it to the count of prereading stripes.
399 * When write is initiated, or the stripe refcnt == 0 (just in case) we
400 * clear the PREREAD_ACTIVE flag and decrement the count
401 * Whenever the 'handle' queue is empty and the device is not plugged, we
402 * move any strips from delayed to handle and clear the DELAYED flag and set
403 * PREREAD_ACTIVE.
404 * In stripe_handle, if we find pre-reading is necessary, we do it if
405 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
406 * HANDLE gets cleared if stripe_handle leaves nothing locked.
407 */
408
409 struct disk_info {
410 struct md_rdev *rdev, *replacement;
411 };
412
413 /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
414 * This is because we sometimes take all the spinlocks
415 * and creating that much locking depth can cause
416 * problems.
417 */
418 #define NR_STRIPE_HASH_LOCKS 8
419 #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
420
421 struct r5worker {
422 struct work_struct work;
423 struct r5worker_group *group;
424 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
425 bool working;
426 };
427
428 struct r5worker_group {
429 struct list_head handle_list;
430 struct r5conf *conf;
431 struct r5worker *workers;
432 int stripes_cnt;
433 };
434
435 struct r5conf {
436 struct hlist_head *stripe_hashtbl;
437 /* only protect corresponding hash list and inactive_list */
438 spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
439 struct mddev *mddev;
440 int chunk_sectors;
441 int level, algorithm, rmw_level;
442 int max_degraded;
443 int raid_disks;
444 int max_nr_stripes;
445 int min_nr_stripes;
446
447 /* reshape_progress is the leading edge of a 'reshape'
448 * It has value MaxSector when no reshape is happening
449 * If delta_disks < 0, it is the last sector we started work on,
450 * else is it the next sector to work on.
451 */
452 sector_t reshape_progress;
453 /* reshape_safe is the trailing edge of a reshape. We know that
454 * before (or after) this address, all reshape has completed.
455 */
456 sector_t reshape_safe;
457 int previous_raid_disks;
458 int prev_chunk_sectors;
459 int prev_algo;
460 short generation; /* increments with every reshape */
461 seqcount_t gen_lock; /* lock against generation changes */
462 unsigned long reshape_checkpoint; /* Time we last updated
463 * metadata */
464 long long min_offset_diff; /* minimum difference between
465 * data_offset and
466 * new_data_offset across all
467 * devices. May be negative,
468 * but is closest to zero.
469 */
470
471 struct list_head handle_list; /* stripes needing handling */
472 struct list_head hold_list; /* preread ready stripes */
473 struct list_head delayed_list; /* stripes that have plugged requests */
474 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
475 struct bio *retry_read_aligned; /* currently retrying aligned bios */
476 struct bio *retry_read_aligned_list; /* aligned bios retry list */
477 atomic_t preread_active_stripes; /* stripes with scheduled io */
478 atomic_t active_aligned_reads;
479 atomic_t pending_full_writes; /* full write backlog */
480 int bypass_count; /* bypassed prereads */
481 int bypass_threshold; /* preread nice */
482 int skip_copy; /* Don't copy data from bio to stripe cache */
483 struct list_head *last_hold; /* detect hold_list promotions */
484
485 /* bios to have bi_end_io called after metadata is synced */
486 struct bio_list return_bi;
487
488 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
489 /* unfortunately we need two cache names as we temporarily have
490 * two caches.
491 */
492 int active_name;
493 char cache_name[2][32];
494 struct kmem_cache *slab_cache; /* for allocating stripes */
495 struct mutex cache_size_mutex; /* Protect changes to cache size */
496
497 int seq_flush, seq_write;
498 int quiesce;
499
500 int fullsync; /* set to 1 if a full sync is needed,
501 * (fresh device added).
502 * Cleared when a sync completes.
503 */
504 int recovery_disabled;
505 /* per cpu variables */
506 struct raid5_percpu {
507 struct page *spare_page; /* Used when checking P/Q in raid6 */
508 struct flex_array *scribble; /* space for constructing buffer
509 * lists and performing address
510 * conversions
511 */
512 } __percpu *percpu;
513 int scribble_disks;
514 int scribble_sectors;
515 struct hlist_node node;
516
517 /*
518 * Free stripes pool
519 */
520 atomic_t active_stripes;
521 struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
522 atomic_t empty_inactive_list_nr;
523 struct llist_head released_stripes;
524 wait_queue_head_t wait_for_quiescent;
525 wait_queue_head_t wait_for_stripe;
526 wait_queue_head_t wait_for_overlap;
527 unsigned long cache_state;
528 #define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
529 * waiting for 25% to be free
530 */
531 #define R5_ALLOC_MORE 2 /* It might help to allocate another
532 * stripe.
533 */
534 #define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
535 * more until at least one has been
536 * released. This avoids flooding
537 * the cache.
538 */
539 struct shrinker shrinker;
540 int pool_size; /* number of disks in stripeheads in pool */
541 spinlock_t device_lock;
542 struct disk_info *disks;
543
544 /* When taking over an array from a different personality, we store
545 * the new thread here until we fully activate the array.
546 */
547 struct md_thread *thread;
548 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
549 struct r5worker_group *worker_groups;
550 int group_cnt;
551 int worker_cnt_per_group;
552 struct r5l_log *log;
553 };
554
555
556 /*
557 * Our supported algorithms
558 */
559 #define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
560 #define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
561 #define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
562 #define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
563
564 /* Define non-rotating (raid4) algorithms. These allow
565 * conversion of raid4 to raid5.
566 */
567 #define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
568 #define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
569
570 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
571 * Firstly, the exact positioning of the parity block is slightly
572 * different between the 'LEFT_*' modes of md and the "_N_*" modes
573 * of DDF.
574 * Secondly, or order of datablocks over which the Q syndrome is computed
575 * is different.
576 * Consequently we have different layouts for DDF/raid6 than md/raid6.
577 * These layouts are from the DDFv1.2 spec.
578 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
579 * leaves RLQ=3 as 'Vendor Specific'
580 */
581
582 #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
583 #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
584 #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
585
586 /* For every RAID5 algorithm we define a RAID6 algorithm
587 * with exactly the same layout for data and parity, and
588 * with the Q block always on the last device (N-1).
589 * This allows trivial conversion from RAID5 to RAID6
590 */
591 #define ALGORITHM_LEFT_ASYMMETRIC_6 16
592 #define ALGORITHM_RIGHT_ASYMMETRIC_6 17
593 #define ALGORITHM_LEFT_SYMMETRIC_6 18
594 #define ALGORITHM_RIGHT_SYMMETRIC_6 19
595 #define ALGORITHM_PARITY_0_6 20
596 #define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
597
algorithm_valid_raid5(int layout)598 static inline int algorithm_valid_raid5(int layout)
599 {
600 return (layout >= 0) &&
601 (layout <= 5);
602 }
algorithm_valid_raid6(int layout)603 static inline int algorithm_valid_raid6(int layout)
604 {
605 return (layout >= 0 && layout <= 5)
606 ||
607 (layout >= 8 && layout <= 10)
608 ||
609 (layout >= 16 && layout <= 20);
610 }
611
algorithm_is_DDF(int layout)612 static inline int algorithm_is_DDF(int layout)
613 {
614 return layout >= 8 && layout <= 10;
615 }
616
617 extern void md_raid5_kick_device(struct r5conf *conf);
618 extern int raid5_set_cache_size(struct mddev *mddev, int size);
619 extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
620 extern void raid5_release_stripe(struct stripe_head *sh);
621 extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
622 int previous, int *dd_idx,
623 struct stripe_head *sh);
624 extern struct stripe_head *
625 raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
626 int previous, int noblock, int noquiesce);
627 extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
628 extern void r5l_exit_log(struct r5l_log *log);
629 extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
630 extern void r5l_write_stripe_run(struct r5l_log *log);
631 extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
632 extern void r5l_stripe_write_finished(struct stripe_head *sh);
633 extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
634 extern void r5l_quiesce(struct r5l_log *log, int state);
635 extern bool r5l_log_disk_error(struct r5conf *conf);
636 #endif
637