Home
last modified time | relevance | path

Searched refs:d (Results 1 – 25 of 34) sorted by relevance

12

/lib/raid6/
Dsse2.c45 int d, z, z0; in raid6_sse21_gen_syndrome() local
56 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_sse21_gen_syndrome()
57 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_sse21_gen_syndrome()
58 asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_sse21_gen_syndrome()
59 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_sse21_gen_syndrome()
61 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d])); in raid6_sse21_gen_syndrome()
63 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome()
71 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome()
81 asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); in raid6_sse21_gen_syndrome()
83 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); in raid6_sse21_gen_syndrome()
[all …]
Davx2.c46 int d, z, z0; in raid6_avx21_gen_syndrome() local
57 for (d = 0; d < bytes; d += 32) { in raid6_avx21_gen_syndrome()
58 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_avx21_gen_syndrome()
59 asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ in raid6_avx21_gen_syndrome()
60 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome()
62 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d])); in raid6_avx21_gen_syndrome()
64 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome()
71 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome()
80 asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); in raid6_avx21_gen_syndrome()
82 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx21_gen_syndrome()
[all …]
Dsse1.c49 int d, z, z0; in raid6_sse11_gen_syndrome() local
60 for ( d = 0 ; d < bytes ; d += 8 ) { in raid6_sse11_gen_syndrome()
61 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); in raid6_sse11_gen_syndrome()
62 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_sse11_gen_syndrome()
63 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); in raid6_sse11_gen_syndrome()
65 asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d])); in raid6_sse11_gen_syndrome()
67 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome()
75 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome()
85 asm volatile("movntq %%mm2,%0" : "=m" (p[d])); in raid6_sse11_gen_syndrome()
86 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse11_gen_syndrome()
[all …]
Dmmx.c44 int d, z, z0; in raid6_mmx1_gen_syndrome() local
55 for ( d = 0 ; d < bytes ; d += 8 ) { in raid6_mmx1_gen_syndrome()
56 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_mmx1_gen_syndrome()
59 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_mmx1_gen_syndrome()
68 asm volatile("movq %%mm2,%0" : "=m" (p[d])); in raid6_mmx1_gen_syndrome()
70 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx1_gen_syndrome()
92 int d, z, z0; in raid6_mmx2_gen_syndrome() local
104 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_mmx2_gen_syndrome()
105 asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ in raid6_mmx2_gen_syndrome()
106 asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); in raid6_mmx2_gen_syndrome()
[all …]
Dneon.uc57 int d, z, z0;
66 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
67 wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
69 wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
78 vst1q_u8(&p[d+NSIZE*$$], wp$$);
79 vst1q_u8(&q[d+NSIZE*$$], wq$$);
88 int d, z, z0;
97 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
98 wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
99 wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$);
[all …]
Dint.uc86 int d, z, z0;
94 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
95 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
97 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
105 *(unative_t *)&p[d+NSIZE*$$] = wp$$;
106 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
115 int d, z, z0;
123 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
125 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
127 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
[all …]
Daltivec.uc72 int d, z, z0;
81 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
82 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
84 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
92 *(unative_t *)&p[d+NSIZE*$$] = wp$$;
93 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
Dtilegx.uc54 int d, z, z0;
65 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
68 wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
/lib/
Dmd5.c15 u32 a, b, c, d; in md5_transform() local
20 d = hash[3]; in md5_transform()
22 MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); in md5_transform()
23 MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); in md5_transform()
24 MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); in md5_transform()
25 MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); in md5_transform()
26 MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); in md5_transform()
27 MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); in md5_transform()
28 MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); in md5_transform()
29 MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); in md5_transform()
[all …]
Dhalfmd4.c17 #define ROUND(f, a, b, c, d, x, s) \ argument
18 (a += f(b, c, d) + x, a = rol32(a, s))
28 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; in half_md4_transform() local
31 ROUND(F, a, b, c, d, in[0] + K1, 3); in half_md4_transform()
32 ROUND(F, d, a, b, c, in[1] + K1, 7); in half_md4_transform()
33 ROUND(F, c, d, a, b, in[2] + K1, 11); in half_md4_transform()
34 ROUND(F, b, c, d, a, in[3] + K1, 19); in half_md4_transform()
35 ROUND(F, a, b, c, d, in[4] + K1, 3); in half_md4_transform()
36 ROUND(F, d, a, b, c, in[5] + K1, 7); in half_md4_transform()
37 ROUND(F, c, d, a, b, in[6] + K1, 11); in half_md4_transform()
[all …]
Drational.c35 unsigned long n, d, n0, d0, n1, d1; in rational_best_approximation() local
37 d = given_denominator; in rational_best_approximation()
47 if (d == 0) in rational_best_approximation()
49 t = d; in rational_best_approximation()
50 a = n / d; in rational_best_approximation()
51 d = n % d; in rational_best_approximation()
Dreciprocal_div.c11 struct reciprocal_value reciprocal_value(u32 d) in reciprocal_value() argument
17 l = fls(d - 1); in reciprocal_value()
18 m = ((1ULL << 32) * ((1ULL << l) - d)); in reciprocal_value()
19 do_div(m, d); in reciprocal_value()
Ddiv64.c30 uint64_t res, d = 1; in __div64_32() local
43 d = d+d; in __div64_32()
49 res += d; in __div64_32()
52 d >>= 1; in __div64_32()
53 } while (d); in __div64_32()
Dglob.c57 unsigned char d = *pat++; in glob_match() local
59 switch (d) { in glob_match()
104 d = *pat++; in glob_match()
108 if (c == d) { in glob_match()
109 if (d == '\0') in glob_match()
Dnlattr.c298 int d = nla_len(nla) - size; in nla_memcmp() local
300 if (d == 0) in nla_memcmp()
301 d = memcmp(nla_data(nla), data, size); in nla_memcmp()
303 return d; in nla_memcmp()
317 int d; in nla_strcmp() local
322 d = attrlen - len; in nla_strcmp()
323 if (d == 0) in nla_strcmp()
324 d = memcmp(nla_data(nla), str, len); in nla_strcmp()
326 return d; in nla_strcmp()
Dbch.c381 unsigned int i, j, tmp, l, pd = 1, d = syn[0]; in compute_error_locator_polynomial() local
397 if (d) { in compute_error_locator_polynomial()
401 tmp = a_log(bch, d)+n-a_log(bch, pd); in compute_error_locator_polynomial()
413 pd = d; in compute_error_locator_polynomial()
419 d = syn[2*i+2]; in compute_error_locator_polynomial()
421 d ^= gf_mul(bch, elp->c[j], syn[2*i+2-j]); in compute_error_locator_polynomial()
639 unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4; in find_poly_deg4_roots() local
646 d = gf_div(bch, poly->c[0], e4); in find_poly_deg4_roots()
667 d = a_pow(bch, 2*l)^gf_mul(bch, b, f)^d; in find_poly_deg4_roots()
671 if (d == 0) in find_poly_deg4_roots()
[all …]
/lib/lz4/
Dlz4defs.h35 #define PUT4(s, d) (A32(d) = A32(s)) argument
36 #define PUT8(s, d) (A64(d) = A64(s)) argument
38 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ argument
39 (d = s - A16(p))
52 #define PUT4(s, d) \ argument
53 put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
54 #define PUT8(s, d) \ argument
55 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
57 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ argument
58 (d = s - get_unaligned_le16(p))
[all …]
/lib/mpi/
Dmpiutil.c39 a->d = mpi_alloc_limb_space(nlimbs); in mpi_alloc()
40 if (!a->d) { in mpi_alloc()
45 a->d = NULL; in mpi_alloc()
77 mpi_free_limb_space(a->d); in mpi_assign_limb_space()
78 a->d = ap; in mpi_assign_limb_space()
93 if (a->d) { in mpi_resize()
97 memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); in mpi_resize()
98 kzfree(a->d); in mpi_resize()
99 a->d = p; in mpi_resize()
101 a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); in mpi_resize()
[all …]
Dmpi-internal.h76 #define MPN_COPY(d, s, n) \ argument
80 (d)[_i] = (s)[_i]; \
83 #define MPN_COPY_INCR(d, s, n) \ argument
87 (d)[_i] = (s)[_i]; \
90 #define MPN_COPY_DECR(d, s, n) \ argument
94 (d)[_i] = (s)[_i]; \
98 #define MPN_ZERO(d, n) \ argument
102 (d)[_i] = 0; \
105 #define MPN_NORMALIZE(d, n) \ argument
108 if ((d)[(n)-1]) \
[all …]
Dmpi-pow.c60 rp = res->d; in mpi_powm()
61 ep = exp->d; in mpi_powm()
69 res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; in mpi_powm()
73 rp = res->d; in mpi_powm()
87 mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]); in mpi_powm()
89 mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); in mpi_powm()
91 MPN_COPY(mp, mod->d, msize); in mpi_powm()
101 MPN_COPY(bp, base->d, bsize); in mpi_powm()
110 bp = base->d; in mpi_powm()
130 rp = res->d; in mpi_powm()
[all …]
Dlonglong.h144 #define udiv_qrnnd(q, r, n1, n0, d) \ argument
150 "r" ((USItype)(d)))
162 #define udiv_qrnnd(q, r, n1, n0, d) \ argument
164 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
287 #define udiv_qrnnd(q, r, nh, nl, d) \ argument
293 "g" ((USItype)(d)))
339 #define udiv_qrnnd(q, r, n1, n0, d) \
341 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
379 #define sdiv_qrnnd(q, r, n1, n0, d) \ argument
387 : "0" (__xx.__ll), "r" (d)); \
[all …]
Dmpi-cmp.c35 if (u->d[0] == limb) in mpi_cmp_ui()
37 else if (u->d[0] > limb) in mpi_cmp_ui()
63 cmp = mpihelp_cmp(u->d, v->d, usize); in mpi_cmp()
Dmpih-div.c77 mpi_limb_t d; in mpihelp_divrem() local
79 d = dp[0]; in mpihelp_divrem()
82 if (n1 >= d) { in mpihelp_divrem()
83 n1 -= d; in mpihelp_divrem()
89 udiv_qrnnd(qp[i], n1, n1, np[i], d); in mpihelp_divrem()
93 udiv_qrnnd(qp[i], n1, n1, 0, d); in mpihelp_divrem()
/lib/842/
D842_decompress.c68 #define beN_to_cpu(d, s) \ argument
69 ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \
70 (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \
71 (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \
74 static int next_bits(struct sw842_param *p, u64 *d, u8 n);
76 static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s) in __split_next_bits() argument
89 ret = next_bits(p, d, s); in __split_next_bits()
92 *d |= tmp << s; in __split_next_bits()
96 static int next_bits(struct sw842_param *p, u64 *d, u8 n) in next_bits() argument
109 return __split_next_bits(p, d, n, 32); in next_bits()
[all …]
D842_compress.c148 #define replace_hash(p, b, i, d) do { \ argument
149 struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \
151 _n->data = (p)->data##b[d]; \
161 static int add_bits(struct sw842_param *p, u64 d, u8 n);
163 static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) in __split_add_bits() argument
170 ret = add_bits(p, d >> s, n - s); in __split_add_bits()
173 return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); in __split_add_bits()
176 static int add_bits(struct sw842_param *p, u64 d, u8 n) in add_bits() argument
182 pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); in add_bits()
191 return __split_add_bits(p, d, n, 32); in add_bits()
[all …]

12