/lib/raid6/ |
D | avx2.c | 38 int d, z, z0; in raid6_avx21_gen_syndrome() local 55 for (z = z0-2; z >= 0; z--) { in raid6_avx21_gen_syndrome() 56 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 63 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d])); in raid6_avx21_gen_syndrome() 87 int d, z, z0; in raid6_avx21_xor_syndrome() local 102 for (z = z0-1 ; z >= start ; z--) { in raid6_avx21_xor_syndrome() 108 asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d])); in raid6_avx21_xor_syndrome() 113 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx21_xor_syndrome() 145 int d, z, z0; in raid6_avx22_gen_syndrome() local 164 for (z = z0-1; z >= 0; z--) { in raid6_avx22_gen_syndrome() [all …]
|
D | sse2.c | 40 int d, z, z0; in raid6_sse21_gen_syndrome() local 57 for ( z = z0-2 ; z >= 0 ; z-- ) { in raid6_sse21_gen_syndrome() 58 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome() 66 asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d])); in raid6_sse21_gen_syndrome() 92 int d, z, z0; in raid6_sse21_xor_syndrome() local 107 for ( z = z0-1 ; z >= start ; z-- ) { in raid6_sse21_xor_syndrome() 113 asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); in raid6_sse21_xor_syndrome() 118 for ( z = start-1 ; z >= 0 ; z-- ) { in raid6_sse21_xor_syndrome() 150 int d, z, z0; in raid6_sse22_gen_syndrome() local 169 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_sse22_gen_syndrome() [all …]
|
D | avx512.c | 48 int d, z, z0; in raid6_avx5121_gen_syndrome() local 69 for (z = z0-2; z >= 0; z--) { in raid6_avx5121_gen_syndrome() 80 : "m" (dptr[z][d])); in raid6_avx5121_gen_syndrome() 106 int d, z, z0; in raid6_avx5121_xor_syndrome() local 124 for (z = z0-1 ; z >= start ; z--) { in raid6_avx5121_xor_syndrome() 135 : "m" (dptr[z][d])); in raid6_avx5121_xor_syndrome() 138 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx5121_xor_syndrome() 175 int d, z, z0; in raid6_avx5122_gen_syndrome() local 198 for (z = z0-1; z >= 0; z--) { in raid6_avx5122_gen_syndrome() 218 : "m" (dptr[z][d]), "m" (dptr[z][d+64])); in raid6_avx5122_gen_syndrome() [all …]
|
D | sse1.c | 44 int d, z, z0; in raid6_sse11_gen_syndrome() local 61 for ( z = z0-2 ; z >= 0 ; z-- ) { in raid6_sse11_gen_syndrome() 62 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome() 70 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome() 103 int d, z, z0; in raid6_sse12_gen_syndrome() local 122 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_sse12_gen_syndrome() 123 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse12_gen_syndrome() 132 asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); in raid6_sse12_gen_syndrome() 133 asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); in raid6_sse12_gen_syndrome()
|
D | mmx.c | 39 int d, z, z0; in raid6_mmx1_gen_syndrome() local 53 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_mmx1_gen_syndrome() 54 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_mmx1_gen_syndrome() 87 int d, z, z0; in raid6_mmx2_gen_syndrome() local 104 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_mmx2_gen_syndrome() 113 asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); in raid6_mmx2_gen_syndrome() 114 asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); in raid6_mmx2_gen_syndrome()
|
D | s390vx.uc | 48 static inline void AND(int x, int y, int z) 50 asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); 53 static inline void XOR(int x, int y, int z) 55 asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); 87 int d, z, z0; 100 for (z = z0 - 1; z >= 0; z--) { 105 LOAD_DATA(16,&dptr[z][d]); 120 int d, z, z0; 134 for (z = z0 - 1; z >= start; z--) { 139 LOAD_DATA(16,&dptr[z][d]); [all …]
|
D | neon.uc | 60 int d, z, z0; 71 for ( z = z0-1 ; z >= 0 ; z-- ) { 72 wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); 91 int d, z, z0; 105 for ( z = z0-1 ; z >= start ; z-- ) { 106 wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); 116 for ( z = start-1 ; z >= 3 ; z -= 4 ) { 124 switch (z) {
|
D | int.uc | 86 int d, z, z0; 96 for ( z = z0-1 ; z >= 0 ; z-- ) { 97 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; 115 int d, z, z0; 126 for ( z = z0-1 ; z >= start ; z-- ) { 127 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; 136 for ( z = start-1 ; z >= 0 ; z-- ) {
|
D | vpermxor.uc | 48 int d, z, z0; 58 for (z = z0-1; z>=0; z--) { 59 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
|
D | altivec.uc | 75 int d, z, z0; 86 for ( z = z0-1 ; z >= 0 ; z-- ) { 87 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
|
/lib/mpi/ |
D | ec.c | 67 p->z = mpi_new(0); in mpi_point_init() 76 mpi_free(p->z); p->z = NULL; in mpi_point_free_parts() 85 mpi_set(d->z, s->z); in point_set() 94 mpi_resize(p->z, nlimbs); in point_resize() 95 p->z->nlimbs = nlimbs; in point_resize() 109 mpi_swap_cond(d->z, s->z, swap); in point_swap_cond() 683 if (!mpi_cmp_ui(point->z, 0)) in mpi_ec_get_affine() 693 ec_invm(z1, point->z, ctx); /* z1 = z^(-1) mod p */ in mpi_ec_get_affine() 726 MPI z; in mpi_ec_get_affine() local 728 z = mpi_new(0); in mpi_ec_get_affine() [all …]
|
D | mpicoder.c | 441 int x, j, z, lzeros, ents; in mpi_read_raw_from_sgl() local 502 z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; in mpi_read_raw_from_sgl() 503 z %= BYTES_PER_MPI_LIMB; in mpi_read_raw_from_sgl() 513 if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { in mpi_read_raw_from_sgl() 518 z += x; in mpi_read_raw_from_sgl()
|
/lib/zlib_inflate/ |
D | inflate.c | 785 int zlib_inflateIncomp(z_stream *z) in zlib_inflateIncomp() argument 787 struct inflate_state *state = (struct inflate_state *)z->state; in zlib_inflateIncomp() 788 Byte *saved_no = z->next_out; in zlib_inflateIncomp() 789 uInt saved_ao = z->avail_out; in zlib_inflateIncomp() 795 z->avail_out = 0; in zlib_inflateIncomp() 796 z->next_out = (unsigned char*)z->next_in + z->avail_in; in zlib_inflateIncomp() 798 zlib_updatewindow(z, z->avail_in); in zlib_inflateIncomp() 801 z->avail_out = saved_ao; in zlib_inflateIncomp() 802 z->next_out = saved_no; in zlib_inflateIncomp() 804 z->adler = state->check = in zlib_inflateIncomp() [all …]
|
/lib/ |
D | test_bits.c | 45 int z, w; in genmask_input_check_test() local 52 KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0)); in genmask_input_check_test() 53 KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z)); in genmask_input_check_test() 54 KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w)); in genmask_input_check_test()
|
D | inflate.c | 351 unsigned z; /* number of entries in current table */ in huft_build() local 450 z = 0; /* ditto */ in huft_build() 470 z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ in huft_build() 476 if (j < z) in huft_build() 477 while (++j < z) /* try smaller tables up to z bits */ in huft_build() 485 z = 1 << j; /* table entries for j-bit table */ in huft_build() 488 if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == in huft_build() 497 hufts += z + 1; /* track memory usage */ in huft_build() 536 for (j = i >> w; j < z; j += f) in huft_build()
|
D | bch.c | 863 const struct gf_poly *f, struct gf_poly *z, in compute_trace_bk_mod() argument 870 z->deg = 1; in compute_trace_bk_mod() 871 z->c[0] = 0; in compute_trace_bk_mod() 872 z->c[1] = bch->a_pow_tab[k]; in compute_trace_bk_mod() 882 for (j = z->deg; j >= 0; j--) { in compute_trace_bk_mod() 883 out->c[j] ^= z->c[j]; in compute_trace_bk_mod() 884 z->c[2*j] = gf_sqr(bch, z->c[j]); in compute_trace_bk_mod() 885 z->c[2*j+1] = 0; in compute_trace_bk_mod() 887 if (z->deg > out->deg) in compute_trace_bk_mod() 888 out->deg = z->deg; in compute_trace_bk_mod() [all …]
|
/lib/crypto/ |
D | curve25519-hacl64.c | 134 u64 z = output[ctr - 1]; in fmul_shift_reduce() local 135 output[ctr] = z; in fmul_shift_reduce() 139 u64 z = output[ctr - 1]; in fmul_shift_reduce() local 140 output[ctr] = z; in fmul_shift_reduce() 144 u64 z = output[ctr - 1]; in fmul_shift_reduce() local 145 output[ctr] = z; in fmul_shift_reduce() 149 u64 z = output[ctr - 1]; in fmul_shift_reduce() local 150 output[ctr] = z; in fmul_shift_reduce() 298 static __always_inline void crecip_crecip(u64 *out, u64 *z) in crecip_crecip() argument 311 fsquare_fsquare_times(a0, z, 1); in crecip_crecip() [all …]
|
D | sha256.c | 22 static inline u32 Ch(u32 x, u32 y, u32 z) in Ch() argument 24 return z ^ (x & (y ^ z)); in Ch() 27 static inline u32 Maj(u32 x, u32 y, u32 z) in Maj() argument 29 return (x & y) | (z & (x | y)); in Maj()
|
D | aes.c | 105 u32 z = w & 0x40404040; in mul_by_x2() local 108 return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b; in mul_by_x2()
|
D | curve25519-fiat32.c | 102 static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz) in cmovznz32() argument 105 return (t&nz) | ((~t)&z); in cmovznz32() 560 static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) in fe_loose_invert() argument 568 fe_sq_tl(&t0, z); in fe_loose_invert() 572 fe_mul_tlt(&t1, z, &t1); in fe_loose_invert() 610 static __always_inline void fe_invert(fe *out, const fe *z) in fe_invert() argument 613 fe_copy_lt(&l, z); in fe_invert()
|