/crypto/ |
D | memneq.c | 68 __crypto_memneq_generic(const void *a, const void *b, size_t size) in __crypto_memneq_generic() argument 74 neq |= *(unsigned long *)a ^ *(unsigned long *)b; in __crypto_memneq_generic() 77 b += sizeof(unsigned long); in __crypto_memneq_generic() 82 neq |= *(unsigned char *)a ^ *(unsigned char *)b; in __crypto_memneq_generic() 85 b += 1; in __crypto_memneq_generic() 92 static inline unsigned long __crypto_memneq_16(const void *a, const void *b) in __crypto_memneq_16() argument 98 neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); in __crypto_memneq_16() 100 neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); in __crypto_memneq_16() 103 neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); in __crypto_memneq_16() 105 neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); in __crypto_memneq_16() [all …]
|
D | md5.c | 45 u32 a, b, c, d; in md5_transform() local 48 b = hash[1]; in md5_transform() 52 MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); in md5_transform() 53 MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); in md5_transform() 54 MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); in md5_transform() 55 MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); in md5_transform() 56 MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); in md5_transform() 57 MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); in md5_transform() 58 MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); in md5_transform() 59 MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); in md5_transform() [all …]
|
D | md4.c | 63 #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) argument 64 #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) argument 65 #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) argument 69 u32 a, b, c, d; in md4_transform() local 72 b = hash[1]; in md4_transform() 76 ROUND1(a, b, c, d, in[0], 3); in md4_transform() 77 ROUND1(d, a, b, c, in[1], 7); in md4_transform() 78 ROUND1(c, d, a, b, in[2], 11); in md4_transform() 79 ROUND1(b, c, d, a, in[3], 19); in md4_transform() 80 ROUND1(a, b, c, d, in[4], 3); in md4_transform() [all …]
|
D | twofish_generic.c | 44 #define G2(b) \ argument 45 (ctx->s[1][(b) & 0xFF]) ^ (ctx->s[2][((b) >> 8) & 0xFF]) \ 46 ^ (ctx->s[3][((b) >> 16) & 0xFF]) ^ (ctx->s[0][(b) >> 24]) 53 #define ENCROUND(n, a, b, c, d) \ argument 54 x = G1 (a); y = G2 (b); \ 60 #define DECROUND(n, a, b, c, d) \ argument 61 x = G1 (a); y = G2 (b); \ 72 ENCROUND (2 * (n), a, b, c, d); \ 73 ENCROUND (2 * (n) + 1, c, d, a, b) 76 DECROUND (2 * (n) + 1, c, d, a, b); \ [all …]
|
D | tgr192.c | 34 u64 a, b, c; member 400 u64 b = *rb; in tgr192_round() local 406 b += sbox4[(c >> 8) & 0xff] ^ sbox3[(c >> 24) & 0xff] in tgr192_round() 408 b *= mul; in tgr192_round() 411 *rb = b; in tgr192_round() 419 u64 b = *rb; in tgr192_pass() local 422 tgr192_round(&a, &b, &c, x[0], mul); in tgr192_pass() 423 tgr192_round(&b, &c, &a, x[1], mul); in tgr192_pass() 424 tgr192_round(&c, &a, &b, x[2], mul); in tgr192_pass() 425 tgr192_round(&a, &b, &c, x[3], mul); in tgr192_pass() [all …]
|
D | sha512_generic.c | 101 u64 a, b, c, d, e, f, g, h, t1, t2; in sha512_transform() local 107 a=state[0]; b=state[1]; c=state[2]; d=state[3]; in sha512_transform() 127 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; in sha512_transform() 129 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; in sha512_transform() 131 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; in sha512_transform() 132 t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; in sha512_transform() 134 t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4]; in sha512_transform() 136 t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; in sha512_transform() 138 t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6]; in sha512_transform() 139 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; in sha512_transform() [all …]
|
D | twofish_common.c | 468 #define CALC_S(a, b, c, d, i, w, x, y, z) \ argument 472 (b) ^= exp_to_poly[tmp + (x)]; \ 483 #define CALC_SB_2(i, a, b) \ argument 485 ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \ 487 ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh] 491 #define CALC_SB192_2(i, a, b) \ argument 492 ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \ 493 ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \ 499 #define CALC_SB256_2(i, a, b) \ argument 500 ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \ [all …]
|
D | sm3_generic.c | 39 static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) in ff() argument 41 return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); in ff() 78 u32 a, b, c, d, e, f, g, h; in sm3_compress() local 82 b = m[1]; in sm3_compress() 96 tt1 = ff(i, a, b, c) + d + ss2 + *wt; in sm3_compress() 103 c = rol32(b, 9); in sm3_compress() 104 b = a; in sm3_compress() 113 m[1] = b ^ m[1]; in sm3_compress() 121 a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; in sm3_compress()
|
D | gf128mul.c | 142 u64 b = be64_to_cpu(x->b); in gf128mul_x8_lle() local 143 u64 _tt = gf128mul_table_le[b & 0xff]; in gf128mul_x8_lle() 145 x->b = cpu_to_be64((b >> 8) | (a << 56)); in gf128mul_x8_lle() 152 u64 b = be64_to_cpu(x->b); in gf128mul_x8_bbe() local 155 x->a = cpu_to_be64((a << 8) | (b >> 56)); in gf128mul_x8_bbe() 156 x->b = cpu_to_be64((b << 8) ^ _tt); in gf128mul_x8_bbe() 162 u64 b = le64_to_cpu(x->b); in gf128mul_x8_ble() local 165 r->a = cpu_to_le64((a << 8) | (b >> 56)); in gf128mul_x8_ble() 166 r->b = cpu_to_le64((b << 8) ^ _tt); in gf128mul_x8_ble() 170 void gf128mul_lle(be128 *r, const be128 *b) in gf128mul_lle() argument [all …]
|
D | xts.c | 150 le128 b; in cts_done() local 155 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); in cts_done() 156 le128_xor(&b, &rctx->t, &b); in cts_done() 157 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); in cts_done() 171 le128 b[2]; in cts_final() local 177 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); in cts_final() 178 memcpy(b + 1, b, tail); in cts_final() 179 scatterwalk_map_and_copy(b, req->src, offset, tail, 0); in cts_final() 181 le128_xor(b, &rctx->t, b); in cts_final() 183 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); in cts_final() [all …]
|
D | ecrdsa_defs.h | 58 .b = cp256a_b 91 .b = cp256b_b 128 .b = cp256c_b 177 .b = tc512a_b 222 .b = tc512b_b
|
D | wp512.c | 1012 u32 b, carry; in wp512_update() local 1026 b = ((source[sourcePos] << sourceGap) & 0xff) | in wp512_update() 1028 buffer[bufferPos++] |= (u8)(b >> bufferRem); in wp512_update() 1034 buffer[bufferPos] = b << (8 - bufferRem); in wp512_update() 1040 b = (source[sourcePos] << sourceGap) & 0xff; in wp512_update() 1041 buffer[bufferPos] |= b >> bufferRem; in wp512_update() 1043 b = 0; in wp512_update() 1055 buffer[bufferPos] = b << (8 - bufferRem); in wp512_update()
|
D | ecc_curve_defs.h | 28 .b = nist_p192_b 54 .b = nist_p256_b
|
D | blowfish_common.c | 304 #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) argument
|
D | blowfish_generic.c | 34 #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) argument
|
D | sm4_generic.c | 72 u8 *b = (u8 *)&x; in sm4_t_non_lin_sub() local 75 b[i] = sbox[b[i]]; in sm4_t_non_lin_sub()
|
D | adiantum.c | 196 u64 x = le64_to_cpu(v1->b); in le128_add() 197 u64 y = le64_to_cpu(v2->b); in le128_add() 199 r->b = cpu_to_le64(x + y); in le128_add() 207 u64 x = le64_to_cpu(v1->b); in le128_sub() 208 u64 y = le64_to_cpu(v2->b); in le128_sub() 210 r->b = cpu_to_le64(x - y); in le128_sub()
|
D | ecc.c | 367 static uint128_t add_128_128(uint128_t a, uint128_t b) in add_128_128() argument 371 result.m_low = a.m_low + b.m_low; in add_128_128() 372 result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low); in add_128_128() 861 u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS]; in vli_mod_inv() local 872 vli_set(b, mod, ndigits); in vli_mod_inv() 877 while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) { in vli_mod_inv() 889 } else if (EVEN(b)) { in vli_mod_inv() 890 vli_rshift1(b, ndigits); in vli_mod_inv() 899 vli_sub(a, a, b, ndigits); in vli_mod_inv() 913 vli_sub(b, b, a, ndigits); in vli_mod_inv() [all …]
|
D | algapi.c | 938 u8 *b = (a + size); in crypto_inc_byte() local 942 c = *--b + 1; in crypto_inc_byte() 943 *b = c; in crypto_inc_byte() 951 __be32 *b = (__be32 *)(a + size); in crypto_inc() local 955 IS_ALIGNED((unsigned long)b, __alignof__(*b))) in crypto_inc() 957 c = be32_to_cpu(*--b) + 1; in crypto_inc() 958 *b = cpu_to_be32(c); in crypto_inc()
|
D | ecc.h | 70 u64 *b; member
|
D | lrw.c | 57 static inline void setbit128_bbe(void *b, int bit) in setbit128_bbe() argument 65 ), b); in setbit128_bbe()
|
/crypto/asymmetric_keys/ |
D | x509_public_key.c | 117 bool b = asymmetric_key_id_same(cert->id, cert->sig->auth_ids[0]); in x509_check_for_self_signed() local 119 if (!a && !b) in x509_check_for_self_signed() 123 if (((a && !b) || (b && !a)) && in x509_check_for_self_signed()
|
D | verify_pefile.c | 204 static int pefile_compare_shdrs(const void *a, const void *b) in pefile_compare_shdrs() argument 207 const struct section_header *shdrb = b; in pefile_compare_shdrs()
|
/crypto/async_tx/ |
D | async_raid6_recov.c | 27 u8 *a, *b, *c; in async_sum_product() local 70 b = page_address(srcs[1]); in async_sum_product() 75 bx = bmul[*b++]; in async_sum_product() 150 struct page *p, *q, *a, *b; in __2data_recov_4() local 162 b = blocks[failb]; in __2data_recov_4() 171 tx = async_sum_product(b, srcs, coef, bytes, submit); in __2data_recov_4() 175 srcs[1] = b; in __2data_recov_4()
|
D | async_pq.c | 26 #define P(b, d) (b[d-2]) argument 27 #define Q(b, d) (b[d-1]) argument
|