/crypto/ |
D | sm3_generic.c | 54 static void sm3_expand(u32 *t, u32 *w, u32 *wt) in sm3_expand() argument 61 w[i] = get_unaligned_be32((__u32 *)t + i); in sm3_expand() 64 tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15); in sm3_expand() 65 w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6]; in sm3_expand() 69 wt[i] = w[i] ^ w[i + 4]; in sm3_expand() 72 static void sm3_compress(u32 *w, u32 *wt, u32 *m) in sm3_compress() argument 99 tt2 = gg(i, e, f, g) + h + ss1 + *w; in sm3_compress() 100 w++; in sm3_compress() 126 unsigned int w[68]; in sm3_transform() local 129 sm3_expand((u32 *)src, w, wt); in sm3_transform() [all …]
|
D | aegis128-neon-inner.c | 51 uint8x16_t aegis_aes_round(uint8x16_t w) in aegis_aes_round() argument 68 w = vqtbl1q_u8(w, vld1q_u8(shift_rows)); in aegis_aes_round() 72 v = vqtbl4q_u8(vld1q_u8_x4(crypto_aes_sbox), w); in aegis_aes_round() 73 v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x40), w - 0x40); in aegis_aes_round() 74 v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x80), w - 0x80); in aegis_aes_round() 75 v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0xc0), w - 0xc0); in aegis_aes_round() 77 asm("tbl %0.16b, {v16.16b-v19.16b}, %1.16b" : "=w"(v) : "w"(w)); in aegis_aes_round() 78 w -= 0x40; in aegis_aes_round() 79 asm("tbx %0.16b, {v20.16b-v23.16b}, %1.16b" : "+w"(v) : "w"(w)); in aegis_aes_round() 80 w -= 0x40; in aegis_aes_round() [all …]
|
D | lrw.c | 147 struct skcipher_walk w; in lrw_xor_tweak() local 158 err = skcipher_walk_virt(&w, req, false); in lrw_xor_tweak() 162 iv = (__be32 *)w.iv; in lrw_xor_tweak() 168 while (w.nbytes) { in lrw_xor_tweak() 169 unsigned int avail = w.nbytes; in lrw_xor_tweak() 173 wsrc = w.src.virt.addr; in lrw_xor_tweak() 174 wdst = w.dst.virt.addr; in lrw_xor_tweak() 185 if (second_pass && w.nbytes == w.total) { in lrw_xor_tweak() 192 err = skcipher_walk_done(&w, avail); in lrw_xor_tweak()
|
D | xts.c | 89 struct skcipher_walk w; in xts_xor_tweak() local 98 err = skcipher_walk_virt(&w, req, false); in xts_xor_tweak() 100 while (w.nbytes) { in xts_xor_tweak() 101 unsigned int avail = w.nbytes; in xts_xor_tweak() 105 wsrc = w.src.virt.addr; in xts_xor_tweak() 106 wdst = w.dst.virt.addr; in xts_xor_tweak() 110 w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) { in xts_xor_tweak() 119 skcipher_walk_done(&w, avail - bs); in xts_xor_tweak() 127 err = skcipher_walk_done(&w, avail); in xts_xor_tweak()
|
D | twofish_common.c | 468 #define CALC_S(a, b, c, d, i, w, x, y, z) \ argument 471 (a) ^= exp_to_poly[tmp + (w)]; \ 649 CALC_K256 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]); in __twofish_setkey() 662 CALC_K192 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]); in __twofish_setkey() 675 CALC_K (w, i, q0[i], q1[i], q0[i+1], q1[i+1]); in __twofish_setkey()
|
D | md5.c | 37 #define MD5STEP(f, w, x, y, z, in, s) \ argument 38 (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
|
D | twofish_generic.c | 86 x = get_unaligned_le32(in + (n) * 4) ^ ctx->w[m] 89 x ^= ctx->w[m]; \
|
D | ecc.c | 1547 u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS]; in ecc_is_pubkey_valid_partial() local 1566 vli_mod_mult_fast(w, curve->a, pk->x, curve); /* a·x */ in ecc_is_pubkey_valid_partial() 1567 vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */ in ecc_is_pubkey_valid_partial() 1568 vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */ in ecc_is_pubkey_valid_partial() 1569 if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */ in ecc_is_pubkey_valid_partial()
|
D | Kconfig | 616 combined with ESSIV the only feasible mode for h/w accelerated
|