Home
last modified time | relevance | path

Searched refs:b (Results 1 – 25 of 29) sorted by relevance

12

/crypto/
Dmd5.c42 u32 a, b, c, d; in md5_transform() local
45 b = hash[1]; in md5_transform()
49 MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); in md5_transform()
50 MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); in md5_transform()
51 MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); in md5_transform()
52 MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); in md5_transform()
53 MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); in md5_transform()
54 MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); in md5_transform()
55 MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); in md5_transform()
56 MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); in md5_transform()
[all …]
Dmd4.c63 #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) argument
64 #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) argument
65 #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) argument
69 u32 a, b, c, d; in md4_transform() local
72 b = hash[1]; in md4_transform()
76 ROUND1(a, b, c, d, in[0], 3); in md4_transform()
77 ROUND1(d, a, b, c, in[1], 7); in md4_transform()
78 ROUND1(c, d, a, b, in[2], 11); in md4_transform()
79 ROUND1(b, c, d, a, in[3], 19); in md4_transform()
80 ROUND1(a, b, c, d, in[4], 3); in md4_transform()
[all …]
Dtwofish_generic.c44 #define G2(b) \ argument
45 (ctx->s[1][(b) & 0xFF]) ^ (ctx->s[2][((b) >> 8) & 0xFF]) \
46 ^ (ctx->s[3][((b) >> 16) & 0xFF]) ^ (ctx->s[0][(b) >> 24])
53 #define ENCROUND(n, a, b, c, d) \ argument
54 x = G1 (a); y = G2 (b); \
60 #define DECROUND(n, a, b, c, d) \ argument
61 x = G1 (a); y = G2 (b); \
72 ENCROUND (2 * (n), a, b, c, d); \
73 ENCROUND (2 * (n) + 1, c, d, a, b)
76 DECROUND (2 * (n) + 1, c, d, a, b); \
[all …]
Dtwofish_common.c468 #define CALC_S(a, b, c, d, i, w, x, y, z) \ argument
472 (b) ^= exp_to_poly[tmp + (x)]; \
483 #define CALC_SB_2(i, a, b) \ argument
485 ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \
487 ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh]
491 #define CALC_SB192_2(i, a, b) \ argument
492 ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \
493 ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \
499 #define CALC_SB256_2(i, a, b) \ argument
500 ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \
[all …]
Dsha512_generic.c101 u64 a, b, c, d, e, f, g, h, t1, t2; in sha512_transform() local
107 a=state[0]; b=state[1]; c=state[2]; d=state[3]; in sha512_transform()
127 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; in sha512_transform()
129 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; in sha512_transform()
131 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; in sha512_transform()
132 t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; in sha512_transform()
134 t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4]; in sha512_transform()
136 t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; in sha512_transform()
138 t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6]; in sha512_transform()
139 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; in sha512_transform()
[all …]
Dsm3_generic.c39 static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) in ff() argument
41 return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); in ff()
78 u32 a, b, c, d, e, f, g, h; in sm3_compress() local
82 b = m[1]; in sm3_compress()
96 tt1 = ff(i, a, b, c) + d + ss2 + *wt; in sm3_compress()
103 c = rol32(b, 9); in sm3_compress()
104 b = a; in sm3_compress()
113 m[1] = b ^ m[1]; in sm3_compress()
121 a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; in sm3_compress()
Dgf128mul.c142 u64 b = be64_to_cpu(x->b); in gf128mul_x8_lle() local
143 u64 _tt = gf128mul_table_le[b & 0xff]; in gf128mul_x8_lle()
145 x->b = cpu_to_be64((b >> 8) | (a << 56)); in gf128mul_x8_lle()
152 u64 b = be64_to_cpu(x->b); in gf128mul_x8_bbe() local
155 x->a = cpu_to_be64((a << 8) | (b >> 56)); in gf128mul_x8_bbe()
156 x->b = cpu_to_be64((b << 8) ^ _tt); in gf128mul_x8_bbe()
162 u64 b = le64_to_cpu(x->b); in gf128mul_x8_ble() local
165 r->a = cpu_to_le64((a << 8) | (b >> 56)); in gf128mul_x8_ble()
166 r->b = cpu_to_le64((b << 8) ^ _tt); in gf128mul_x8_ble()
170 void gf128mul_lle(be128 *r, const be128 *b) in gf128mul_lle() argument
[all …]
Dxts.c146 le128 b; in xts_cts_done() local
151 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); in xts_cts_done()
152 le128_xor(&b, &rctx->t, &b); in xts_cts_done()
153 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); in xts_cts_done()
168 le128 b[2]; in xts_cts_final() local
174 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); in xts_cts_final()
175 b[1] = b[0]; in xts_cts_final()
176 scatterwalk_map_and_copy(b, req->src, offset, tail, 0); in xts_cts_final()
178 le128_xor(b, &rctx->t, b); in xts_cts_final()
180 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); in xts_cts_final()
[all …]
Dblake2b_generic.c46 #define G(r,i,a,b,c,d) \ argument
48 a = a + b + m[blake2b_sigma[r][2*i+0]]; \
51 b = ror64(b ^ c, 24); \
52 a = a + b + m[blake2b_sigma[r][2*i+1]]; \
55 b = ror64(b ^ c, 63); \
Decrdsa_defs.h58 .b = cp256a_b
91 .b = cp256b_b
128 .b = cp256c_b
177 .b = tc512a_b
222 .b = tc512b_b
Dsm2.c39 const char *a, *b; /* The coefficients. For Twisted Edwards member
56 .b = "0x28e9fa9e9d9f5e344d5a9e4bcf6509a7f39789f515ab8f92ddbcbd414d940e93",
66 MPI p, a, b; in sm2_ec_ctx_init() local
72 b = mpi_scanval(ecp->b); in sm2_ec_ctx_init()
73 if (!p || !a || !b) in sm2_ec_ctx_init()
108 mpi_ec_init(ec, ecp->model, ecp->dialect, 0, p, a, b); in sm2_ec_ctx_init()
118 mpi_free(b); in sm2_ec_ctx_init()
286 sm2_z_digest_update(desc, ec->b, pbytes) || in sm2_compute_z_digest()
Dpolyval-generic.c73 u64 b = get_unaligned((const u64 *)&src[8]); in copy_and_reverse() local
76 put_unaligned(swab64(b), (u64 *)&dst[0]); in copy_and_reverse()
89 be128 a, b; in polyval_mul_non4k() local
93 copy_and_reverse((u8 *)&b, op2); in polyval_mul_non4k()
95 gf128mul_lle(&a, &b); in polyval_mul_non4k()
Decc_curve_defs.h28 .b = nist_p192_b
54 .b = nist_p256_b
86 .b = nist_p384_b
Dwp512.c1012 u32 b, carry; in wp512_update() local
1026 b = ((source[sourcePos] << sourceGap) & 0xff) | in wp512_update()
1028 buffer[bufferPos++] |= (u8)(b >> bufferRem); in wp512_update()
1034 buffer[bufferPos] = b << (8 - bufferRem); in wp512_update()
1040 b = (source[sourcePos] << sourceGap) & 0xff; in wp512_update()
1041 buffer[bufferPos] |= b >> bufferRem; in wp512_update()
1043 b = 0; in wp512_update()
1055 buffer[bufferPos] = b << (8 - bufferRem); in wp512_update()
Daegis128-neon-inner.c181 static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b) in vqtbl1q_u8() argument
188 return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)), in vqtbl1q_u8()
189 vtbl2_u8(__a.pair, vget_high_u8(b))); in vqtbl1q_u8()
192 static uint8x16_t vqtbx1q_u8(uint8x16_t v, uint8x16_t a, uint8x16_t b) in vqtbx1q_u8() argument
199 return vcombine_u8(vtbx2_u8(vget_low_u8(v), __a.pair, vget_low_u8(b)), in vqtbx1q_u8()
200 vtbx2_u8(vget_high_u8(v), __a.pair, vget_high_u8(b))); in vqtbx1q_u8()
Dblowfish_common.c304 #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) argument
Dblowfish_generic.c34 #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) argument
Dadiantum.c188 u64 x = le64_to_cpu(v1->b); in le128_add()
189 u64 y = le64_to_cpu(v2->b); in le128_add()
191 r->b = cpu_to_le64(x + y); in le128_add()
199 u64 x = le64_to_cpu(v1->b); in le128_sub()
200 u64 y = le64_to_cpu(v2->b); in le128_sub()
202 r->b = cpu_to_le64(x - y); in le128_sub()
Decc.c378 static uint128_t add_128_128(uint128_t a, uint128_t b) in add_128_128() argument
382 result.m_low = a.m_low + b.m_low; in add_128_128()
383 result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low); in add_128_128()
990 u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS]; in vli_mod_inv() local
1001 vli_set(b, mod, ndigits); in vli_mod_inv()
1006 while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) { in vli_mod_inv()
1018 } else if (EVEN(b)) { in vli_mod_inv()
1019 vli_rshift1(b, ndigits); in vli_mod_inv()
1028 vli_sub(a, a, b, ndigits); in vli_mod_inv()
1042 vli_sub(b, b, a, ndigits); in vli_mod_inv()
[all …]
Dalgapi.c976 u8 *b = (a + size); in crypto_inc_byte() local
980 c = *--b + 1; in crypto_inc_byte()
981 *b = c; in crypto_inc_byte()
989 __be32 *b = (__be32 *)(a + size); in crypto_inc() local
993 IS_ALIGNED((unsigned long)b, __alignof__(*b))) in crypto_inc()
995 c = be32_to_cpu(*--b) + 1; in crypto_inc()
996 *b = cpu_to_be32(c); in crypto_inc()
Dserpent_generic.c24 #define keyiter(a, b, c, d, i, j) \ argument
25 ({ b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b, 11); k[j] = b; })
Dlrw.c57 static inline void lrw_setbit128_bbe(void *b, int bit) in lrw_setbit128_bbe() argument
65 ), b); in lrw_setbit128_bbe()
/crypto/asymmetric_keys/
Dx509_public_key.c120 bool b = asymmetric_key_id_same(cert->id, cert->sig->auth_ids[0]); in x509_check_for_self_signed() local
122 if (!a && !b) in x509_check_for_self_signed()
126 if (((a && !b) || (b && !a)) && in x509_check_for_self_signed()
/crypto/async_tx/
Dasync_raid6_recov.c28 u8 *a, *b, *c; in async_sum_product() local
74 b = page_address(srcs[1]) + src_offs[1]; in async_sum_product()
79 bx = bmul[*b++]; in async_sum_product()
158 struct page *p, *q, *a, *b; in __2data_recov_4() local
175 b = blocks[failb]; in __2data_recov_4()
187 tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit); in __2data_recov_4()
192 srcs[1] = b; in __2data_recov_4()
Dasync_pq.c26 #define P(b, d) (b[d-2]) argument
27 #define Q(b, d) (b[d-1]) argument

12