| /kernel/linux/linux-5.10/crypto/async_tx/ |
| D | async_pq.c | 22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 36 const unsigned char *scfs, int disks, in do_async_gen_syndrome() argument 46 int src_cnt = disks - 2; in do_async_gen_syndrome() 76 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome() 77 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome() 107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, in do_sync_gen_syndrome() argument 112 int start = -1, stop = disks - 3; in do_sync_gen_syndrome() 119 for (i = 0; i < disks; i++) { in do_sync_gen_syndrome() 121 BUG_ON(i > disks - 3); /* P or Q can't be zero */ in do_sync_gen_syndrome() 126 if (i < disks - 2) { in do_sync_gen_syndrome() [all …]
|
| D | raid6test.c | 35 static void makedata(int disks) in makedata() argument 39 for (i = 0; i < disks; i++) { in makedata() 46 static char disk_type(int d, int disks) in disk_type() argument 48 if (d == disks - 2) in disk_type() 50 else if (d == disks - 1) in disk_type() 57 static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, in raid6_dual_recov() argument 68 if (failb == disks-1) { in raid6_dual_recov() 69 if (faila == disks-2) { in raid6_dual_recov() 73 disks, bytes, &submit); in raid6_dual_recov() 80 BUG_ON(disks > NDISKS); in raid6_dual_recov() [all …]
|
| D | async_raid6_recov.c | 153 __2data_recov_4(int disks, size_t bytes, int faila, int failb, in __2data_recov_4() argument 168 p = blocks[disks-2]; in __2data_recov_4() 169 p_off = offs[disks-2]; in __2data_recov_4() 170 q = blocks[disks-1]; in __2data_recov_4() 171 q_off = offs[disks-1]; in __2data_recov_4() 203 __2data_recov_5(int disks, size_t bytes, int faila, int failb, in __2data_recov_5() argument 221 for (i = 0; i < disks-2; i++) { in __2data_recov_5() 231 p = blocks[disks-2]; in __2data_recov_5() 232 p_off = offs[disks-2]; in __2data_recov_5() 233 q = blocks[disks-1]; in __2data_recov_5() [all …]
|
| /kernel/linux/linux-6.6/crypto/async_tx/ |
| D | async_pq.c | 22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 36 const unsigned char *scfs, int disks, in do_async_gen_syndrome() argument 46 int src_cnt = disks - 2; in do_async_gen_syndrome() 76 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome() 77 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome() 107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, in do_sync_gen_syndrome() argument 112 int start = -1, stop = disks - 3; in do_sync_gen_syndrome() 119 for (i = 0; i < disks; i++) { in do_sync_gen_syndrome() 121 BUG_ON(i > disks - 3); /* P or Q can't be zero */ in do_sync_gen_syndrome() 126 if (i < disks - 2) { in do_sync_gen_syndrome() [all …]
|
| D | raid6test.c | 35 static void makedata(int disks) in makedata() argument 39 for (i = 0; i < disks; i++) { in makedata() 46 static char disk_type(int d, int disks) in disk_type() argument 48 if (d == disks - 2) in disk_type() 50 else if (d == disks - 1) in disk_type() 57 static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, in raid6_dual_recov() argument 68 if (failb == disks-1) { in raid6_dual_recov() 69 if (faila == disks-2) { in raid6_dual_recov() 73 disks, bytes, &submit); in raid6_dual_recov() 80 BUG_ON(disks > NDISKS); in raid6_dual_recov() [all …]
|
| D | async_raid6_recov.c | 153 __2data_recov_4(int disks, size_t bytes, int faila, int failb, in __2data_recov_4() argument 168 p = blocks[disks-2]; in __2data_recov_4() 169 p_off = offs[disks-2]; in __2data_recov_4() 170 q = blocks[disks-1]; in __2data_recov_4() 171 q_off = offs[disks-1]; in __2data_recov_4() 203 __2data_recov_5(int disks, size_t bytes, int faila, int failb, in __2data_recov_5() argument 221 for (i = 0; i < disks-2; i++) { in __2data_recov_5() 231 p = blocks[disks-2]; in __2data_recov_5() 232 p_off = offs[disks-2]; in __2data_recov_5() 233 q = blocks[disks-1]; in __2data_recov_5() [all …]
|
| /kernel/linux/linux-5.10/lib/raid6/ |
| D | recov.c | 20 static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, in raid6_2data_recov_intx1() argument 28 p = (u8 *)ptrs[disks-2]; in raid6_2data_recov_intx1() 29 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_intx1() 36 ptrs[disks-2] = dp; in raid6_2data_recov_intx1() 39 ptrs[disks-1] = dq; in raid6_2data_recov_intx1() 41 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_intx1() 46 ptrs[disks-2] = p; in raid6_2data_recov_intx1() 47 ptrs[disks-1] = q; in raid6_2data_recov_intx1() 64 static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, in raid6_datap_recov_intx1() argument 70 p = (u8 *)ptrs[disks-2]; in raid6_datap_recov_intx1() [all …]
|
| D | recov_neon.c | 29 static void raid6_2data_recov_neon(int disks, size_t bytes, int faila, in raid6_2data_recov_neon() argument 36 p = (u8 *)ptrs[disks - 2]; in raid6_2data_recov_neon() 37 q = (u8 *)ptrs[disks - 1]; in raid6_2data_recov_neon() 46 ptrs[disks - 2] = dp; in raid6_2data_recov_neon() 49 ptrs[disks - 1] = dq; in raid6_2data_recov_neon() 51 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_neon() 56 ptrs[disks - 2] = p; in raid6_2data_recov_neon() 57 ptrs[disks - 1] = q; in raid6_2data_recov_neon() 69 static void raid6_datap_recov_neon(int disks, size_t bytes, int faila, in raid6_datap_recov_neon() argument 75 p = (u8 *)ptrs[disks - 2]; in raid6_datap_recov_neon() [all …]
|
| D | recov_s390xc.c | 23 static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila, in raid6_2data_recov_s390xc() argument 31 p = (u8 *)ptrs[disks-2]; in raid6_2data_recov_s390xc() 32 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_s390xc() 39 ptrs[disks-2] = dp; in raid6_2data_recov_s390xc() 42 ptrs[disks-1] = dq; in raid6_2data_recov_s390xc() 44 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_s390xc() 49 ptrs[disks-2] = p; in raid6_2data_recov_s390xc() 50 ptrs[disks-1] = q; in raid6_2data_recov_s390xc() 72 static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila, in raid6_datap_recov_s390xc() argument 79 p = (u8 *)ptrs[disks-2]; in raid6_datap_recov_s390xc() [all …]
|
| D | algos.c | 162 void *(*const dptrs)[RAID6_TEST_DISKS], const int disks) in raid6_choose_gen() argument 165 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ in raid6_choose_gen() 187 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs); in raid6_choose_gen() 197 (perf * HZ * (disks-2)) >> in raid6_choose_gen() 211 (*algo)->xor_syndrome(disks, start, stop, in raid6_choose_gen() 221 (perf * HZ * (disks-2)) >> in raid6_choose_gen() 230 (bestgenperf * HZ * (disks-2)) >> in raid6_choose_gen() 234 (bestxorperf * HZ * (disks-2)) >> in raid6_choose_gen() 252 const int disks = RAID6_TEST_DISKS; in raid6_select_algo() local 268 for (i = 0; i < disks; i++) in raid6_select_algo() [all …]
|
| D | recov_loongarch_simd.c | 29 static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila, in raid6_2data_recov_lsx() argument 36 p = (u8 *)ptrs[disks - 2]; in raid6_2data_recov_lsx() 37 q = (u8 *)ptrs[disks - 1]; in raid6_2data_recov_lsx() 46 ptrs[disks - 2] = dp; in raid6_2data_recov_lsx() 49 ptrs[disks - 1] = dq; in raid6_2data_recov_lsx() 51 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_lsx() 56 ptrs[disks - 2] = p; in raid6_2data_recov_lsx() 57 ptrs[disks - 1] = q; in raid6_2data_recov_lsx() 186 static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila, in raid6_datap_recov_lsx() argument 192 p = (u8 *)ptrs[disks - 2]; in raid6_datap_recov_lsx() [all …]
|
| /kernel/linux/linux-6.6/lib/raid6/ |
| D | recov.c | 19 static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, in raid6_2data_recov_intx1() argument 27 p = (u8 *)ptrs[disks-2]; in raid6_2data_recov_intx1() 28 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_intx1() 35 ptrs[disks-2] = dp; in raid6_2data_recov_intx1() 38 ptrs[disks-1] = dq; in raid6_2data_recov_intx1() 40 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_intx1() 45 ptrs[disks-2] = p; in raid6_2data_recov_intx1() 46 ptrs[disks-1] = q; in raid6_2data_recov_intx1() 63 static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, in raid6_datap_recov_intx1() argument 69 p = (u8 *)ptrs[disks-2]; in raid6_datap_recov_intx1() [all …]
|
| D | recov_neon.c | 23 static void raid6_2data_recov_neon(int disks, size_t bytes, int faila, in raid6_2data_recov_neon() argument 30 p = (u8 *)ptrs[disks - 2]; in raid6_2data_recov_neon() 31 q = (u8 *)ptrs[disks - 1]; in raid6_2data_recov_neon() 40 ptrs[disks - 2] = dp; in raid6_2data_recov_neon() 43 ptrs[disks - 1] = dq; in raid6_2data_recov_neon() 45 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_neon() 50 ptrs[disks - 2] = p; in raid6_2data_recov_neon() 51 ptrs[disks - 1] = q; in raid6_2data_recov_neon() 63 static void raid6_datap_recov_neon(int disks, size_t bytes, int faila, in raid6_datap_recov_neon() argument 69 p = (u8 *)ptrs[disks - 2]; in raid6_datap_recov_neon() [all …]
|
| D | recov_s390xc.c | 23 static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila, in raid6_2data_recov_s390xc() argument 31 p = (u8 *)ptrs[disks-2]; in raid6_2data_recov_s390xc() 32 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_s390xc() 39 ptrs[disks-2] = dp; in raid6_2data_recov_s390xc() 42 ptrs[disks-1] = dq; in raid6_2data_recov_s390xc() 44 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_s390xc() 49 ptrs[disks-2] = p; in raid6_2data_recov_s390xc() 50 ptrs[disks-1] = q; in raid6_2data_recov_s390xc() 72 static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila, in raid6_datap_recov_s390xc() argument 79 p = (u8 *)ptrs[disks-2]; in raid6_datap_recov_s390xc() [all …]
|
| D | neon.h | 3 void raid6_neon1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); 4 void raid6_neon1_xor_syndrome_real(int disks, int start, int stop, 6 void raid6_neon2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); 7 void raid6_neon2_xor_syndrome_real(int disks, int start, int stop, 9 void raid6_neon4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); 10 void raid6_neon4_xor_syndrome_real(int disks, int start, int stop, 12 void raid6_neon8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); 13 void raid6_neon8_xor_syndrome_real(int disks, int start, int stop,
|
| D | algos.c | 160 void *(*const dptrs)[RAID6_TEST_DISKS], const int disks) in raid6_choose_gen() argument 163 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ in raid6_choose_gen() 185 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs); in raid6_choose_gen() 195 (perf * HZ * (disks-2)) >> in raid6_choose_gen() 215 (bestgenperf * HZ * (disks - 2)) >> in raid6_choose_gen() 227 best->xor_syndrome(disks, start, stop, in raid6_choose_gen() 234 (perf * HZ * (disks - 2)) >> in raid6_choose_gen() 248 const int disks = RAID6_TEST_DISKS; in raid6_select_algo() local 264 for (i = 0; i < disks; i++) in raid6_select_algo() 267 cycle = ((disks - 2) * PAGE_SIZE) / 65536; in raid6_select_algo() [all …]
|
| D | recov_loongarch_simd.c | 29 static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila, in raid6_2data_recov_lsx() argument 36 p = (u8 *)ptrs[disks - 2]; in raid6_2data_recov_lsx() 37 q = (u8 *)ptrs[disks - 1]; in raid6_2data_recov_lsx() 46 ptrs[disks - 2] = dp; in raid6_2data_recov_lsx() 49 ptrs[disks - 1] = dq; in raid6_2data_recov_lsx() 51 raid6_call.gen_syndrome(disks, bytes, ptrs); in raid6_2data_recov_lsx() 56 ptrs[disks - 2] = p; in raid6_2data_recov_lsx() 57 ptrs[disks - 1] = q; in raid6_2data_recov_lsx() 186 static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila, in raid6_datap_recov_lsx() argument 192 p = (u8 *)ptrs[disks - 2]; in raid6_datap_recov_lsx() [all …]
|
| /kernel/linux/linux-5.10/Documentation/driver-api/md/ |
| D | raid5-cache.rst | 6 disks. The role of RAID disks isn't changed with the cache disk. The cache disk 7 caches data to the RAID disks. The cache can be in write-through (supported 28 disks and it's possible the writes don't hit all RAID disks yet before the 35 is safe on the cache disk, the data will be flushed onto RAID disks. The 40 filesystems) after the data is safe on RAID disks, so cache disk failure 52 write. If a write crosses all RAID disks of a stripe, we call it full-stripe 57 RAID disks only after the data becomes a full stripe write. This will 63 disks later after specific conditions met. So cache disk failure will cause 90 order in which MD writes data to cache disk and RAID disks. Specifically, in 92 parity to the log, writes the data and parity to RAID disks after the data and [all …]
|
| D | raid5-ppl.rst | 7 may become inconsistent with data on other member disks. If the array is also 9 disks is missing. This can lead to silent data corruption when rebuilding the 19 which chunk writes have completed. If one of the not modified data disks of 22 unclean shutdown and all disks are available, eliminating the need to resync 27 parity are dispatched to disks. PPL is a distributed log - it is stored on 44 There is a limitation of maximum 64 disks in the array for PPL. It allows to 45 keep data structures and implementation simple. RAID5 arrays with so many disks 46 are not likely due to high risk of multiple disks failure. Such restriction
|
| /kernel/linux/linux-6.6/Documentation/driver-api/md/ |
| D | raid5-cache.rst | 6 disks. The role of RAID disks isn't changed with the cache disk. The cache disk 7 caches data to the RAID disks. The cache can be in write-through (supported 28 disks and it's possible the writes don't hit all RAID disks yet before the 35 is safe on the cache disk, the data will be flushed onto RAID disks. The 40 filesystems) after the data is safe on RAID disks, so cache disk failure 52 write. If a write crosses all RAID disks of a stripe, we call it full-stripe 57 RAID disks only after the data becomes a full stripe write. This will 63 disks later after specific conditions met. So cache disk failure will cause 90 order in which MD writes data to cache disk and RAID disks. Specifically, in 92 parity to the log, writes the data and parity to RAID disks after the data and [all …]
|
| D | raid5-ppl.rst | 7 may become inconsistent with data on other member disks. If the array is also 9 disks is missing. This can lead to silent data corruption when rebuilding the 19 which chunk writes have completed. If one of the not modified data disks of 22 unclean shutdown and all disks are available, eliminating the need to resync 27 parity are dispatched to disks. PPL is a distributed log - it is stored on 44 There is a limitation of maximum 64 disks in the array for PPL. It allows to 45 keep data structures and implementation simple. RAID5 arrays with so many disks 46 are not likely due to high risk of multiple disks failure. Such restriction
|
| /kernel/linux/linux-6.6/block/partitions/ |
| D | Kconfig | 10 Say Y here if you would like to use hard disks under Linux which 24 Support hard disks partitioned under Acorn operating systems. 31 Say Y here if you would like to use hard disks under Linux which 44 Say Y here if you would like to use hard disks under Linux which 72 to read disks partitioned under RISCiX. 80 "logical volumes" can be spread across one or multiple disks, 89 Say Y here if you would like to use hard disks under Linux which 96 Say Y here if you would like to use hard disks under Linux which 103 Say Y here if you would like to use hard disks under Linux which 111 partition table format used by IBM DASD disks operating under CMS. [all …]
|
| /kernel/linux/linux-5.10/block/partitions/ |
| D | Kconfig | 8 Say Y here if you would like to use hard disks under Linux which 22 Support hard disks partitioned under Acorn operating systems. 29 Say Y here if you would like to use hard disks under Linux which 42 Say Y here if you would like to use hard disks under Linux which 70 to read disks partitioned under RISCiX. 78 "logical volumes" can be spread across one or multiple disks, 87 Say Y here if you would like to use hard disks under Linux which 94 Say Y here if you would like to use hard disks under Linux which 101 Say Y here if you would like to use hard disks under Linux which 109 partition table format used by IBM DASD disks operating under CMS. [all …]
|
| /kernel/linux/linux-6.6/drivers/md/ |
| D | md-linear.c | 40 if (sector < conf->disks[mid].end_sector) in which_dev() 46 return conf->disks + lo; in which_dev() 68 conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL); in linear_conf() 77 struct dev_info *disk = conf->disks + j; in linear_conf() 108 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf() 111 conf->disks[i].end_sector = in linear_conf() 112 conf->disks[i-1].end_sector + in linear_conf() 113 conf->disks[i].rdev->sectors; in linear_conf() 119 * conf->disks[] when it is updated in linear_add() and used to in linear_conf() 120 * iterate old conf->disks[] earray in linear_congested(). in linear_conf() [all …]
|
| /kernel/linux/linux-5.10/drivers/md/ |
| D | md-linear.c | 40 if (sector < conf->disks[mid].end_sector) in which_dev() 46 return conf->disks + lo; in which_dev() 69 conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL); in linear_conf() 78 struct dev_info *disk = conf->disks + j; in linear_conf() 117 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf() 120 conf->disks[i].end_sector = in linear_conf() 121 conf->disks[i-1].end_sector + in linear_conf() 122 conf->disks[i].rdev->sectors; in linear_conf() 128 * conf->disks[] when it is updated in linear_add() and used to in linear_conf() 129 * iterate old conf->disks[] earray in linear_congested(). in linear_conf() [all …]
|