/lib/raid6/ |
D | avx2.c | 40 int d, z, z0; in raid6_avx21_gen_syndrome() local 51 for (d = 0; d < bytes; d += 32) { in raid6_avx21_gen_syndrome() 52 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_avx21_gen_syndrome() 53 asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ in raid6_avx21_gen_syndrome() 54 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome() 56 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome() 58 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 65 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 74 asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); in raid6_avx21_gen_syndrome() 76 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx21_gen_syndrome() [all …]
|
D | sse2.c | 40 int d, z, z0; in raid6_sse21_gen_syndrome() local 51 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_sse21_gen_syndrome() 52 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_sse21_gen_syndrome() 53 asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_sse21_gen_syndrome() 54 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_sse21_gen_syndrome() 56 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d])); in raid6_sse21_gen_syndrome() 58 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome() 66 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome() 76 asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); in raid6_sse21_gen_syndrome() 78 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); in raid6_sse21_gen_syndrome() [all …]
|
D | avx512.c | 48 int d, z, z0; in raid6_avx5121_gen_syndrome() local 61 for (d = 0; d < bytes; d += 64) { in raid6_avx5121_gen_syndrome() 68 : "m" (dptr[z0][d]), "m" (dptr[z0-1][d])); in raid6_avx5121_gen_syndrome() 80 : "m" (dptr[z][d])); in raid6_avx5121_gen_syndrome() 94 : "m" (p[d]), "m" (q[d])); in raid6_avx5121_gen_syndrome() 106 int d, z, z0; in raid6_avx5121_xor_syndrome() local 117 for (d = 0 ; d < bytes ; d += 64) { in raid6_avx5121_xor_syndrome() 122 : "m" (dptr[z0][d]), "m" (p[d])); in raid6_avx5121_xor_syndrome() 135 : "m" (dptr[z][d])); in raid6_avx5121_xor_syndrome() 153 : "m" (q[d]), "m" (p[d])); in raid6_avx5121_xor_syndrome() [all …]
|
D | sse1.c | 44 int d, z, z0; in raid6_sse11_gen_syndrome() local 55 for ( d = 0 ; d < bytes ; d += 8 ) { in raid6_sse11_gen_syndrome() 56 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_sse11_gen_syndrome() 57 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_sse11_gen_syndrome() 58 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_sse11_gen_syndrome() 60 asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d])); in raid6_sse11_gen_syndrome() 62 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome() 70 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome() 80 asm volatile("movntq %%mm2,%0" : "=m" (p[d])); in raid6_sse11_gen_syndrome() 81 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse11_gen_syndrome() [all …]
|
D | mmx.c | 39 int d, z, z0; in raid6_mmx1_gen_syndrome() local 50 for ( d = 0 ; d < bytes ; d += 8 ) { in raid6_mmx1_gen_syndrome() 51 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_mmx1_gen_syndrome() 54 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_mmx1_gen_syndrome() 63 asm volatile("movq %%mm2,%0" : "=m" (p[d])); in raid6_mmx1_gen_syndrome() 65 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx1_gen_syndrome() 87 int d, z, z0; in raid6_mmx2_gen_syndrome() local 99 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_mmx2_gen_syndrome() 100 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_mmx2_gen_syndrome() 101 asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); in raid6_mmx2_gen_syndrome() [all …]
|
D | neon.uc | 60 int d, z, z0; 69 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { 70 wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 72 wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); 81 vst1q_u8(&p[d+NSIZE*$$], wp$$); 82 vst1q_u8(&q[d+NSIZE*$$], wq$$); 91 int d, z, z0; 100 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { 101 wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 102 wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$); [all …]
|
D | s390vx.uc | 87 int d, z, z0; 97 for (d = 0; d < bytes; d += $#*NSIZE) { 98 LOAD_DATA(0,&dptr[z0][d]); 105 LOAD_DATA(16,&dptr[z][d]); 109 STORE_DATA(0,&p[d]); 110 STORE_DATA(8,&q[d]); 120 int d, z, z0; 130 for (d = 0; d < bytes; d += $#*NSIZE) { 132 LOAD_DATA(0,&dptr[z0][d]); 139 LOAD_DATA(16,&dptr[z][d]); [all …]
|
D | int.uc | 86 int d, z, z0; 94 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { 95 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 97 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; 105 *(unative_t *)&p[d+NSIZE*$$] = wp$$; 106 *(unative_t *)&q[d+NSIZE*$$] = wq$$; 115 int d, z, z0; 123 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { 125 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 127 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; [all …]
|
D | vpermxor.uc | 48 int d, z, z0; 55 for (d = 0; d < bytes; d += NSIZE*$#) { 56 wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 59 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; 67 *(unative_t *)&p[d+NSIZE*$$] = wp$$; 68 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
|
D | altivec.uc | 75 int d, z, z0; 84 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { 85 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 87 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; 95 *(unative_t *)&p[d+NSIZE*$$] = wp$$; 96 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
|
/lib/crypto/ |
D | des.c | 582 #define ROUND(L, R, A, B, K, d) \ argument 583 B = K[0]; A = K[1]; K += d; \ 609 #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) argument 626 unsigned long a, b, c, d, w; in des_ekey() local 629 d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; in des_ekey() 634 pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; in des_ekey() 635 pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; in des_ekey() 636 pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; in des_ekey() 637 pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; in des_ekey() 638 pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; in des_ekey() [all …]
|
D | sha256.c | 48 u32 a, b, c, d, e, f, g, h, t1, t2; in sha256_transform() local 61 a = state[0]; b = state[1]; c = state[2]; d = state[3]; in sha256_transform() 66 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; in sha256_transform() 67 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; in sha256_transform() 69 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; in sha256_transform() 71 t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3]; in sha256_transform() 73 t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4]; in sha256_transform() 74 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; in sha256_transform() 76 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; in sha256_transform() 78 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; in sha256_transform() [all …]
|
/lib/mpi/ |
D | mpiutil.c | 39 a->d = mpi_alloc_limb_space(nlimbs); in mpi_alloc() 40 if (!a->d) { in mpi_alloc() 45 a->d = NULL; in mpi_alloc() 77 mpi_free_limb_space(a->d); in mpi_assign_limb_space() 78 a->d = ap; in mpi_assign_limb_space() 93 if (a->d) { in mpi_resize() 97 memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); in mpi_resize() 98 kzfree(a->d); in mpi_resize() 99 a->d = p; in mpi_resize() 101 a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); in mpi_resize() [all …]
|
D | mpi-pow.c | 46 rp = res->d; in mpi_powm() 47 ep = exp->d; in mpi_powm() 55 res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; in mpi_powm() 59 rp = res->d; in mpi_powm() 73 mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]); in mpi_powm() 75 mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); in mpi_powm() 77 MPN_COPY(mp, mod->d, msize); in mpi_powm() 87 MPN_COPY(bp, base->d, bsize); in mpi_powm() 96 bp = base->d; in mpi_powm() 116 rp = res->d; in mpi_powm() [all …]
|
D | longlong.h | 144 #define udiv_qrnnd(q, r, n1, n0, d) \ argument 150 "r" ((USItype)(d))) 162 #define udiv_qrnnd(q, r, n1, n0, d) \ argument 164 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ 287 #define udiv_qrnnd(q, r, nh, nl, d) \ argument 293 "g" ((USItype)(d))) 339 #define udiv_qrnnd(q, r, n1, n0, d) \ 341 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ 379 #define sdiv_qrnnd(q, r, n1, n0, d) \ argument 387 : "0" (__xx.__ll), "r" (d)); \ [all …]
|
D | mpi-internal.h | 56 #define MPN_COPY(d, s, n) \ argument 60 (d)[_i] = (s)[_i]; \ 63 #define MPN_COPY_DECR(d, s, n) \ argument 67 (d)[_i] = (s)[_i]; \ 71 #define MPN_ZERO(d, n) \ argument 75 (d)[_i] = 0; \ 78 #define MPN_NORMALIZE(d, n) \ argument 81 if ((d)[(n)-1]) \
|
D | mpi-cmp.c | 35 if (u->d[0] == limb) in mpi_cmp_ui() 37 else if (u->d[0] > limb) in mpi_cmp_ui() 63 cmp = mpihelp_cmp(u->d, v->d, usize); in mpi_cmp()
|
D | mpih-div.c | 64 mpi_limb_t d; in mpihelp_divrem() local 66 d = dp[0]; in mpihelp_divrem() 69 if (n1 >= d) { in mpihelp_divrem() 70 n1 -= d; in mpihelp_divrem() 76 udiv_qrnnd(qp[i], n1, n1, np[i], d); in mpihelp_divrem() 80 udiv_qrnnd(qp[i], n1, n1, 0, d); in mpihelp_divrem()
|
D | mpicoder.c | 74 val->d[j - 1] = a; in mpi_read_raw_data() 118 alimb = a->d[i]; in count_lzeros() 177 alimb = cpu_to_be32(a->d[i]); in mpi_read_buffer() 179 alimb = cpu_to_be64(a->d[i]); in mpi_read_buffer() 294 alimb = a->d[i] ? cpu_to_be32(a->d[i]) : 0; in mpi_write_to_sgl() 296 alimb = a->d[i] ? cpu_to_be64(a->d[i]) : 0; in mpi_write_to_sgl() 406 val->d[j--] = a; in mpi_read_raw_from_sgl()
|
/lib/math/ |
D | reciprocal_div.c | 13 struct reciprocal_value reciprocal_value(u32 d) in reciprocal_value() argument 19 l = fls(d - 1); in reciprocal_value() 20 m = ((1ULL << 32) * ((1ULL << l) - d)); in reciprocal_value() 21 do_div(m, d); in reciprocal_value() 31 struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec) in reciprocal_value_adv() argument 38 l = fls(d - 1); in reciprocal_value_adv() 45 d, __func__); in reciprocal_value_adv() 48 do_div(mlow, d); in reciprocal_value_adv() 50 do_div(mhigh, d); in reciprocal_value_adv()
|
D | rational.c | 36 unsigned long n, d, n0, d0, n1, d1; in rational_best_approximation() local 38 d = given_denominator; in rational_best_approximation() 48 if (d == 0) in rational_best_approximation() 50 t = d; in rational_best_approximation() 51 a = n / d; in rational_best_approximation() 52 d = n % d; in rational_best_approximation()
|
D | div64.c | 33 uint64_t res, d = 1; in __div64_32() local 46 d = d+d; in __div64_32() 52 res += d; in __div64_32() 55 d >>= 1; in __div64_32() 56 } while (d); in __div64_32()
|
/lib/842/ |
D | 842_decompress.c | 59 #define beN_to_cpu(d, s) \ argument 60 ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ 61 (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ 62 (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ 65 static int next_bits(struct sw842_param *p, u64 *d, u8 n); 67 static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s) in __split_next_bits() argument 80 ret = next_bits(p, d, s); in __split_next_bits() 83 *d |= tmp << s; in __split_next_bits() 87 static int next_bits(struct sw842_param *p, u64 *d, u8 n) in next_bits() argument 100 return __split_next_bits(p, d, n, 32); in next_bits() [all …]
|
D | 842_compress.c | 139 #define replace_hash(p, b, i, d) do { \ argument 140 struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \ 142 _n->data = (p)->data##b[d]; \ 152 static int add_bits(struct sw842_param *p, u64 d, u8 n); 154 static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) in __split_add_bits() argument 161 ret = add_bits(p, d >> s, n - s); in __split_add_bits() 164 return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); in __split_add_bits() 167 static int add_bits(struct sw842_param *p, u64 d, u8 n) in add_bits() argument 173 pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); in add_bits() 182 return __split_add_bits(p, d, n, 32); in add_bits() [all …]
|
/lib/ |
D | glob.c | 57 unsigned char d = *pat++; in glob_match() local 59 switch (d) { in glob_match() 104 d = *pat++; in glob_match() 108 if (c == d) { in glob_match() 109 if (d == '\0') in glob_match()
|