Home
last modified time | relevance | path

Searched refs:blocks (Results 1 – 9 of 9) sorted by relevance

/crypto/
Daegis128-core.c31 union aegis_block blocks[AEGIS128_STATE_BLOCKS]; member
80 tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; in crypto_aegis128_update()
82 crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], in crypto_aegis128_update()
83 &state->blocks[i]); in crypto_aegis128_update()
84 crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); in crypto_aegis128_update()
96 crypto_aegis_block_xor(&state->blocks[0], msg); in crypto_aegis128_update_a()
107 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); in crypto_aegis128_update_u()
120 state->blocks[0] = key_iv; in crypto_aegis128_init()
121 state->blocks[1] = crypto_aegis_const[1]; in crypto_aegis128_init()
122 state->blocks[2] = crypto_aegis_const[0]; in crypto_aegis128_init()
[all …]
Dsha1_generic.c32 int blocks) in sha1_generic_block_fn() argument
36 while (blocks--) { in sha1_generic_block_fn()
Dsm3_generic.c137 int blocks) in sm3_generic_block_fn() argument
139 while (blocks--) { in sm3_generic_block_fn()
Dsha512_generic.c152 int blocks) in sha512_generic_block_fn() argument
154 while (blocks--) { in sha512_generic_block_fn()
Dvmac.c400 const __le64 *mptr, unsigned int blocks) in vhash_blocks() argument
415 blocks--; in vhash_blocks()
418 while (blocks--) { in vhash_blocks()
DKconfig414 stream cipher. It generates keystream blocks, which are then XORed
415 with the plaintext blocks to get the ciphertext. Flipping a bit in the
1316 sixteen blocks parallel using the AVX instruction set.
1340 eight blocks parallel using the AVX instruction set.
1372 one that processes three blocks parallel.
1477 blocks parallel using SSE2 instruction set.
1496 blocks parallel using SSE2 instruction set.
1516 eight blocks parallel using the AVX instruction set.
1532 blocks parallel using AVX2 instruction set.
1648 blocks parallel, utilizing resources of out-of-order CPUs better.
[all …]
/crypto/async_tx/
Dasync_pq.c107 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in do_sync_gen_syndrome() argument
117 srcs = (void **) blocks; in do_sync_gen_syndrome()
120 if (blocks[i] == NULL) { in do_sync_gen_syndrome()
124 srcs[i] = page_address(blocks[i]) + offset; in do_sync_gen_syndrome()
163 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in async_gen_syndrome() argument
168 &P(blocks, disks), 2, in async_gen_syndrome()
169 blocks, src_cnt, len); in async_gen_syndrome()
173 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks))); in async_gen_syndrome()
197 if (blocks[i] == NULL) in async_gen_syndrome()
199 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, in async_gen_syndrome()
[all …]
Dasync_raid6_recov.c147 struct page **blocks, struct async_submit_ctl *submit) in __2data_recov_4() argument
158 p = blocks[disks-2]; in __2data_recov_4()
159 q = blocks[disks-1]; in __2data_recov_4()
161 a = blocks[faila]; in __2data_recov_4()
162 b = blocks[failb]; in __2data_recov_4()
186 struct page **blocks, struct async_submit_ctl *submit) in __2data_recov_5() argument
201 if (blocks[i] == NULL) in __2data_recov_5()
210 p = blocks[disks-2]; in __2data_recov_5()
211 q = blocks[disks-1]; in __2data_recov_5()
212 g = blocks[good]; in __2data_recov_5()
[all …]
Draid6test.c71 struct page *blocks[NDISKS]; in raid6_dual_recov() local
84 blocks[count++] = ptrs[i]; in raid6_dual_recov()
89 tx = async_xor(dest, blocks, 0, count, bytes, &submit); in raid6_dual_recov()