/crypto/ |
D | cts.c | 61 unsigned offset; member 110 unsigned int offset; in cts_cbc_encrypt() local 113 offset = rctx->offset; in cts_cbc_encrypt() 114 lastn = req->cryptlen - offset; in cts_cbc_encrypt() 116 sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize); in cts_cbc_encrypt() 120 scatterwalk_map_and_copy(d, req->src, offset, lastn, 0); in cts_cbc_encrypt() 155 unsigned int offset; in crypto_cts_encrypt() local 171 offset = rounddown(nbytes - 1, bsize); in crypto_cts_encrypt() 172 rctx->offset = offset; in crypto_cts_encrypt() 177 offset, req->iv); in crypto_cts_encrypt() [all …]
|
D | ahash.c | 43 unsigned int offset = walk->offset; in hash_walk_next() local 45 ((unsigned int)(PAGE_SIZE)) - offset); in hash_walk_next() 51 walk->data += offset; in hash_walk_next() 53 if (offset & alignmask) { in hash_walk_next() 54 unsigned int unaligned = alignmask + 1 - (offset & alignmask); in hash_walk_next() 69 walk->offset = sg->offset; in hash_walk_new_entry() 70 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); in hash_walk_new_entry() 71 walk->offset = offset_in_page(walk->offset); in hash_walk_new_entry() 85 walk->data -= walk->offset; in crypto_hash_walk_done() 87 if (walk->entrylen && (walk->offset & alignmask) && !err) { in crypto_hash_walk_done() [all …]
|
D | ablkcipher.c | 206 walk->src.offset = offset_in_page(walk->in.offset); in ablkcipher_next_fast() 208 walk->dst.offset = offset_in_page(walk->out.offset); in ablkcipher_next_fast() 251 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); in ablkcipher_walk_next() 252 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); in ablkcipher_walk_next()
|
D | blkcipher.c | 195 walk->src.phys.offset = offset_in_page(walk->in.offset); in blkcipher_next_fast() 197 walk->dst.phys.offset = offset_in_page(walk->out.offset); in blkcipher_next_fast() 202 diff = walk->src.phys.offset - walk->dst.phys.offset; in blkcipher_next_fast() 263 walk->src.phys.offset &= PAGE_SIZE - 1; in blkcipher_walk_next() 264 walk->dst.phys.offset &= PAGE_SIZE - 1; in blkcipher_walk_next()
|
D | af_alg.c | 527 unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) in af_alg_count_tsgl() argument 545 if (offset >= sg[i].length) { in af_alg_count_tsgl() 546 offset -= sg[i].length; in af_alg_count_tsgl() 551 bytes_count = sg[i].length - offset; in af_alg_count_tsgl() 553 offset = 0; in af_alg_count_tsgl() 617 sg[i].offset + dst_offset); in af_alg_pull_tsgl() 624 sg[i].offset += plen; in af_alg_pull_tsgl() 874 PAGE_SIZE - sg->offset - sg->length); in af_alg_sendmsg() 877 sg->offset + sg->length, in af_alg_sendmsg() 883 ctx->merge = (sg->offset + sg->length) & in af_alg_sendmsg() [all …]
|
D | skcipher.c | 55 offset_in_page(walk->offset); in skcipher_map() 322 walk->src.phys.offset = offset_in_page(walk->in.offset); in skcipher_next_fast() 324 walk->dst.phys.offset = offset_in_page(walk->out.offset); in skcipher_next_fast() 329 diff = walk->src.phys.offset - walk->dst.phys.offset; in skcipher_next_fast() 366 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { in skcipher_walk_next() 390 walk->src.phys.offset &= PAGE_SIZE - 1; in skcipher_walk_next() 391 walk->dst.phys.offset &= PAGE_SIZE - 1; in skcipher_walk_next()
|
D | xcbc.c | 142 unsigned int offset = 0; in crypto_xcbc_digest_final() local 155 offset += bs; in crypto_xcbc_digest_final() 159 crypto_xor(prev, consts + offset, bs); in crypto_xcbc_digest_final()
|
D | testmgr.c | 240 unsigned int offset; member 298 .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, 305 .offset = 1, 321 { .proportion_of_total = 1900, .offset = 33 }, 322 { .proportion_of_total = 3300, .offset = 7 }, 323 { .proportion_of_total = 4800, .offset = 18 }, 332 .offset = PAGE_SIZE - 32 335 .offset = PAGE_SIZE - 7 356 .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, 363 .offset = 1, [all …]
|
D | shash.c | 298 unsigned int offset; in shash_ahash_digest() local 302 (sg = req->src, offset = sg->offset, in shash_ahash_digest() 303 nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { in shash_ahash_digest() 307 err = crypto_shash_digest(desc, data + offset, nbytes, in shash_ahash_digest()
|
D | cmac.c | 176 unsigned int offset = 0; in crypto_cmac_digest_final() local 189 offset += bs; in crypto_cmac_digest_final() 193 crypto_xor(prev, consts + offset, bs); in crypto_cmac_digest_final()
|
D | algif_hash.c | 132 int offset, size_t size, int flags) in hash_sendpage() argument 144 sg_set_page(ctx->sgl.sg, page, size, offset); in hash_sendpage() 345 int offset, size_t size, int flags) in hash_sendpage_nokey() argument 353 return hash_sendpage(sock, page, offset, size, flags); in hash_sendpage_nokey()
|
D | md4.c | 187 const unsigned int offset = mctx->byte_count & 0x3f; in md4_final() local 188 char *p = (char *)mctx->block + offset; in md4_final() 189 int padding = 56 - (offset + 1); in md4_final()
|
D | md5.c | 180 const unsigned int offset = mctx->byte_count & 0x3f; in md5_final() local 181 char *p = (char *)mctx->block + offset; in md5_final() 182 int padding = 56 - (offset + 1); in md5_final()
|
D | xts.c | 167 int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); in cts_final() local 175 offset - XTS_BLOCK_SIZE); in cts_final() 179 scatterwalk_map_and_copy(b, req->src, offset, tail, 0); in cts_final()
|
D | scatterwalk.c | 88 sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); in scatterwalk_ffwd()
|
D | algif_skcipher.c | 259 int offset, size_t size, int flags) in skcipher_sendpage_nokey() argument 267 return af_alg_sendpage(sock, page, offset, size, flags); in skcipher_sendpage_nokey()
|
D | algif_aead.c | 428 int offset, size_t size, int flags) in aead_sendpage_nokey() argument 436 return af_alg_sendpage(sock, page, offset, size, flags); in aead_sendpage_nokey()
|
D | essiv.c | 248 req->src->offset); in essiv_aead_crypt()
|
/crypto/async_tx/ |
D | async_pq.c | 107 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in do_sync_gen_syndrome() argument 124 srcs[i] = page_address(blocks[i]) + offset; in do_sync_gen_syndrome() 163 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in async_gen_syndrome() argument 182 is_dma_pq_aligned(device, offset, 0, len)) { in async_gen_syndrome() 199 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, in async_gen_syndrome() 213 offset, len, DMA_BIDIRECTIONAL); in async_gen_syndrome() 222 offset, len, DMA_BIDIRECTIONAL); in async_gen_syndrome() 243 BUG_ON(len + offset > PAGE_SIZE); in async_gen_syndrome() 247 BUG_ON(len + offset > PAGE_SIZE); in async_gen_syndrome() 249 do_sync_gen_syndrome(blocks, offset, disks, len, submit); in async_gen_syndrome() [all …]
|
D | async_xor.c | 100 do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, in do_sync_xor() argument 117 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; in do_sync_xor() 120 dest_buf = page_address(dest) + offset; in do_sync_xor() 160 async_xor(struct page *dest, struct page **src_list, unsigned int offset, in async_xor() argument 174 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { in async_xor() 187 offset, len, DMA_TO_DEVICE); in async_xor() 191 unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, in async_xor() 216 do_sync_xor(dest, src_list, offset, src_cnt, len, submit); in async_xor() 223 static int page_is_zero(struct page *p, unsigned int offset, size_t len) in page_is_zero() argument 225 return !memchr_inv(page_address(p) + offset, 0, len); in page_is_zero() [all …]
|