/crypto/ |
D | cts.c | 61 unsigned offset; member 106 unsigned int offset; in cts_cbc_encrypt() local 109 offset = rctx->offset; in cts_cbc_encrypt() 110 lastn = req->cryptlen - offset; in cts_cbc_encrypt() 112 sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize); in cts_cbc_encrypt() 116 scatterwalk_map_and_copy(d, req->src, offset, lastn, 0); in cts_cbc_encrypt() 151 unsigned int offset; in crypto_cts_encrypt() local 167 offset = rounddown(nbytes - 1, bsize); in crypto_cts_encrypt() 168 rctx->offset = offset; in crypto_cts_encrypt() 173 offset, req->iv); in crypto_cts_encrypt() [all …]
|
D | ahash.c | 44 unsigned int offset = walk->offset; in hash_walk_next() local 46 ((unsigned int)(PAGE_SIZE)) - offset); in hash_walk_next() 49 walk->data += offset; in hash_walk_next() 51 if (offset & alignmask) { in hash_walk_next() 52 unsigned int unaligned = alignmask + 1 - (offset & alignmask); in hash_walk_next() 67 walk->offset = sg->offset; in hash_walk_new_entry() 68 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); in hash_walk_new_entry() 69 walk->offset = offset_in_page(walk->offset); in hash_walk_new_entry() 83 walk->data -= walk->offset; in crypto_hash_walk_done() 85 if (walk->entrylen && (walk->offset & alignmask) && !err) { in crypto_hash_walk_done() [all …]
|
D | af_alg.c | 538 unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) in af_alg_count_tsgl() argument 556 if (offset >= sg[i].length) { in af_alg_count_tsgl() 557 offset -= sg[i].length; in af_alg_count_tsgl() 562 bytes_count = sg[i].length - offset; in af_alg_count_tsgl() 564 offset = 0; in af_alg_count_tsgl() 628 sg[i].offset + dst_offset); in af_alg_pull_tsgl() 635 sg[i].offset += plen; in af_alg_pull_tsgl() 896 PAGE_SIZE - sg->offset - sg->length); in af_alg_sendmsg() 899 sg->offset + sg->length, in af_alg_sendmsg() 905 ctx->merge = (sg->offset + sg->length) & in af_alg_sendmsg() [all …]
|
D | skcipher.c | 56 offset_in_page(walk->offset); in skcipher_map() 323 walk->src.phys.offset = offset_in_page(walk->in.offset); in skcipher_next_fast() 325 walk->dst.phys.offset = offset_in_page(walk->out.offset); in skcipher_next_fast() 330 diff = walk->src.phys.offset - walk->dst.phys.offset; in skcipher_next_fast() 367 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { in skcipher_walk_next() 391 walk->src.phys.offset &= PAGE_SIZE - 1; in skcipher_walk_next() 392 walk->dst.phys.offset &= PAGE_SIZE - 1; in skcipher_walk_next()
|
D | xcbc.c | 143 unsigned int offset = 0; in crypto_xcbc_digest_final() local 156 offset += bs; in crypto_xcbc_digest_final() 160 crypto_xor(prev, consts + offset, bs); in crypto_xcbc_digest_final()
|
D | cmac.c | 177 unsigned int offset = 0; in crypto_cmac_digest_final() local 190 offset += bs; in crypto_cmac_digest_final() 194 crypto_xor(prev, consts + offset, bs); in crypto_cmac_digest_final()
|
D | fips140-module.c | 351 u32 offset; member 384 offset_to_ptr(&fips140_rela_text.offset), in check_fips140_module_hmac() 388 offset_to_ptr(&fips140_rela_rodata.offset), in check_fips140_module_hmac()
|
D | algif_hash.c | 132 int offset, size_t size, int flags) in hash_sendpage() argument 144 sg_set_page(ctx->sgl.sg, page, size, offset); in hash_sendpage() 340 int offset, size_t size, int flags) in hash_sendpage_nokey() argument 348 return hash_sendpage(sock, page, offset, size, flags); in hash_sendpage_nokey()
|
D | shash.c | 325 unsigned int offset; in shash_ahash_digest() local 329 (sg = req->src, offset = sg->offset, in shash_ahash_digest() 330 nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { in shash_ahash_digest() 334 err = crypto_shash_digest(desc, data + offset, nbytes, in shash_ahash_digest()
|
D | testmgr.c | 258 unsigned int offset; member 321 .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, 329 .offset = 1, 347 { .proportion_of_total = 1900, .offset = 33 }, 348 { .proportion_of_total = 3300, .offset = 7 }, 349 { .proportion_of_total = 4800, .offset = 18 }, 359 .offset = PAGE_SIZE - 32 362 .offset = PAGE_SIZE - 7 383 .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, 391 .offset = 1, [all …]
|
D | md4.c | 187 const unsigned int offset = mctx->byte_count & 0x3f; in md4_final() local 188 char *p = (char *)mctx->block + offset; in md4_final() 189 int padding = 56 - (offset + 1); in md4_final()
|
D | md5.c | 177 const unsigned int offset = mctx->byte_count & 0x3f; in md5_final() local 178 char *p = (char *)mctx->block + offset; in md5_final() 179 int padding = 56 - (offset + 1); in md5_final()
|
D | xts.c | 164 int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); in xts_cts_final() local 172 offset - XTS_BLOCK_SIZE); in xts_cts_final() 176 scatterwalk_map_and_copy(b, req->src, offset, tail, 0); in xts_cts_final()
|
D | scatterwalk.c | 88 sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); in scatterwalk_ffwd()
|
D | algif_skcipher.c | 250 int offset, size_t size, int flags) in skcipher_sendpage_nokey() argument 258 return af_alg_sendpage(sock, page, offset, size, flags); in skcipher_sendpage_nokey()
|
D | algif_aead.c | 424 int offset, size_t size, int flags) in aead_sendpage_nokey() argument 432 return af_alg_sendpage(sock, page, offset, size, flags); in aead_sendpage_nokey()
|
D | essiv.c | 235 req->src->offset); in essiv_aead_crypt()
|
/crypto/async_tx/ |
D | async_xor.c | 100 do_sync_xor_offs(struct page *dest, unsigned int offset, in do_sync_xor_offs() argument 119 (src_offs ? src_offs[i] : offset); in do_sync_xor_offs() 122 dest_buf = page_address(dest) + offset; in do_sync_xor_offs() 141 dma_xor_aligned_offsets(struct dma_device *device, unsigned int offset, in dma_xor_aligned_offsets() argument 146 if (!is_dma_xor_aligned(device, offset, 0, len)) in dma_xor_aligned_offsets() 182 async_xor_offs(struct page *dest, unsigned int offset, in async_xor_offs() argument 197 if (unmap && dma_xor_aligned_offsets(device, offset, in async_xor_offs() 211 src_offs ? src_offs[i] : offset, in async_xor_offs() 216 unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, in async_xor_offs() 243 do_sync_xor_offs(dest, offset, src_list, src_offs, in async_xor_offs() [all …]
|