/crypto/ |
D | aegis128-core.c | 67 const u8 *src, unsigned int size); 69 const u8 *src, unsigned int size); 135 const u8 *src, unsigned int size) in crypto_aegis128_ad() argument 141 while (size >= AEGIS_BLOCK_SIZE) { in crypto_aegis128_ad() 144 size -= AEGIS_BLOCK_SIZE; in crypto_aegis128_ad() 148 while (size >= AEGIS_BLOCK_SIZE) { in crypto_aegis128_ad() 151 size -= AEGIS_BLOCK_SIZE; in crypto_aegis128_ad() 158 const u8 *src, unsigned int size) in crypto_aegis128_encrypt_chunk() argument 163 while (size >= AEGIS_BLOCK_SIZE) { in crypto_aegis128_encrypt_chunk() 179 size -= AEGIS_BLOCK_SIZE; in crypto_aegis128_encrypt_chunk() [all …]
|
D | aegis128-neon.c | 14 unsigned int size); 16 unsigned int size); 48 const u8 *src, unsigned int size) in crypto_aegis128_encrypt_chunk_simd() argument 51 crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); in crypto_aegis128_encrypt_chunk_simd() 56 const u8 *src, unsigned int size) in crypto_aegis128_decrypt_chunk_simd() argument 59 crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); in crypto_aegis128_decrypt_chunk_simd()
|
D | aegis128-neon-inner.c | 177 unsigned int size) in crypto_aegis128_encrypt_chunk_neon() argument 184 while (size >= AEGIS_BLOCK_SIZE) { in crypto_aegis128_encrypt_chunk_neon() 191 size -= AEGIS_BLOCK_SIZE; in crypto_aegis128_encrypt_chunk_neon() 196 if (size > 0) { in crypto_aegis128_encrypt_chunk_neon() 200 memcpy(buf, src, size); in crypto_aegis128_encrypt_chunk_neon() 204 memcpy(dst, buf, size); in crypto_aegis128_encrypt_chunk_neon() 211 unsigned int size) in crypto_aegis128_decrypt_chunk_neon() argument 218 while (size >= AEGIS_BLOCK_SIZE) { in crypto_aegis128_decrypt_chunk_neon() 223 size -= AEGIS_BLOCK_SIZE; in crypto_aegis128_decrypt_chunk_neon() 228 if (size > 0) { in crypto_aegis128_decrypt_chunk_neon() [all …]
|
D | dh_helper.c | 15 static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size) in dh_pack_data() argument 17 if (!dst || size > end - dst) in dh_pack_data() 19 memcpy(dst, src, size); in dh_pack_data() 20 return dst + size; in dh_pack_data() 23 static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size) in dh_unpack_data() argument 25 memcpy(dst, src, size); in dh_unpack_data() 26 return src + size; in dh_unpack_data()
|
D | algif_hash.c | 132 int offset, size_t size, int flags) in hash_sendpage() argument 144 sg_set_page(ctx->sgl.sg, page, size, offset); in hash_sendpage() 153 ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); in hash_sendpage() 180 return err ?: size; in hash_sendpage() 328 size_t size) in hash_sendmsg_nokey() argument 336 return hash_sendmsg(sock, msg, size); in hash_sendmsg_nokey() 340 int offset, size_t size, int flags) in hash_sendpage_nokey() argument 348 return hash_sendpage(sock, page, offset, size, flags); in hash_sendpage_nokey()
|
D | algif_skcipher.c | 38 size_t size) in skcipher_sendmsg() argument 47 return af_alg_sendmsg(sock, msg, size, ivsize); in skcipher_sendmsg() 238 size_t size) in skcipher_sendmsg_nokey() argument 246 return skcipher_sendmsg(sock, msg, size); in skcipher_sendmsg_nokey() 250 int offset, size_t size, int flags) in skcipher_sendpage_nokey() argument 258 return af_alg_sendpage(sock, page, offset, size, flags); in skcipher_sendpage_nokey()
|
D | algif_aead.c | 61 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) in aead_sendmsg() argument 71 return af_alg_sendmsg(sock, msg, size, ivsize); in aead_sendmsg() 412 size_t size) in aead_sendmsg_nokey() argument 420 return aead_sendmsg(sock, msg, size); in aead_sendmsg_nokey() 424 int offset, size_t size, int flags) in aead_sendpage_nokey() argument 432 return af_alg_sendpage(sock, page, offset, size, flags); in aead_sendpage_nokey()
|
D | algapi.c | 966 static inline void crypto_inc_byte(u8 *a, unsigned int size) in crypto_inc_byte() argument 968 u8 *b = (a + size); in crypto_inc_byte() 971 for (; size; size--) { in crypto_inc_byte() 979 void crypto_inc(u8 *a, unsigned int size) in crypto_inc() argument 981 __be32 *b = (__be32 *)(a + size); in crypto_inc() 986 for (; size >= 4; size -= 4) { in crypto_inc() 993 crypto_inc_byte(a, size); in crypto_inc() 1002 int size = sizeof(unsigned long); in __crypto_xor() local 1005 (size - 1); in __crypto_xor() 1007 relalign = d ? 1 << __ffs(d) : size; in __crypto_xor()
|
D | af_alg.c | 830 int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, in af_alg_sendmsg() argument 885 while (size) { in af_alg_sendmsg() 887 size_t len = size; in af_alg_sendmsg() 910 size -= len; in af_alg_sendmsg() 956 size -= plen; in af_alg_sendmsg() 960 if (!size) in af_alg_sendmsg() 984 int offset, size_t size, int flags) in af_alg_sendpage() argument 999 if (!size) in af_alg_sendpage() 1021 sg_set_page(sgl->sg + sgl->cur, page, size, offset); in af_alg_sendpage() 1023 ctx->used += size; in af_alg_sendpage() [all …]
|
D | rsa-pkcs1pad.c | 67 size_t size; member 391 digest_size = digest_info->size; in pkcs1pad_sign() 413 digest_info->size); in pkcs1pad_sign() 476 if (digest_info->size > dst_len - pos) in pkcs1pad_verify_complete() 479 digest_info->size)) in pkcs1pad_verify_complete() 482 pos += digest_info->size; in pkcs1pad_verify_complete()
|
D | fips140-module.c | 596 void crypto_inc(u8 *a, unsigned int size) in crypto_inc() argument 598 a += size; in crypto_inc() 600 while (size--) in crypto_inc()
|
D | skcipher.c | 404 unsigned size; in skcipher_copy_iv() local 410 size = alignmask & ~a; in skcipher_copy_iv() 413 size += ivsize; in skcipher_copy_iv() 415 size += aligned_bs + ivsize; in skcipher_copy_iv() 418 size += (bs - 1) & ~(alignmask | a); in skcipher_copy_iv() 421 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); in skcipher_copy_iv()
|
D | ansi_cprng.c | 69 unsigned char *out, unsigned int size) in xor_vectors() argument 73 for (i = 0; i < size; i++) in xor_vectors()
|
D | Kconfig | 488 key size 256, 384 or 512 bits. This implementation currently 700 optimized for 64bit platforms and can produce digests of any size 718 optimized for 8-32bit platforms and can produce digests of any size 1229 size of the AES tables from 16KB to 8KB + 256 bytes and mitigates 1483 on 32-bit processors. Khazad uses an 128 bit key size. 1548 It is a 16 round block cipher with the key size of 128 bit. 1961 - encrypt/decrypt size and numbers of symmeric operations 1962 - compress/decompress size and numbers of compress operations 1963 - size and numbers of hash operations
|
D | testmgr.c | 836 static void flip_random_bit(u8 *buf, size_t size) in flip_random_bit() argument 840 bitpos = prandom_u32() % (size * 8); in flip_random_bit() 845 static void flip_random_byte(u8 *buf, size_t size) in flip_random_byte() argument 847 buf[prandom_u32() % size] ^= 0xff; in flip_random_byte() 851 static void mutate_buffer(u8 *buf, size_t size) in mutate_buffer() argument 858 num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8); in mutate_buffer() 860 flip_random_bit(buf, size); in mutate_buffer() 865 num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size); in mutate_buffer() 867 flip_random_byte(buf, size); in mutate_buffer()
|
/crypto/asymmetric_keys/ |
D | verify_pefile.c | 95 ctx->certs_size = ddir->certs.size; in pefile_parse_binary() 97 if (!ddir->certs.virtual_address || !ddir->certs.size) { in pefile_parse_binary() 103 ddir->certs.size); in pefile_parse_binary() 105 ctx->sig_len = ddir->certs.size; in pefile_parse_binary()
|
D | asym_tpm.c | 600 size_t size; member 652 asn1_wrapped = kzalloc(in_len + asn1->size, GFP_KERNEL); in tpm_key_sign() 657 memcpy(asn1_wrapped, asn1->data, asn1->size); in tpm_key_sign() 658 memcpy(asn1_wrapped + asn1->size, in, in_len); in tpm_key_sign() 661 in_len += asn1->size; in tpm_key_sign()
|