/crypto/ |
D | scatterwalk.c | 39 walk->offset = sg->offset; in scatterwalk_start() 46 offset_in_page(walk->offset); in scatterwalk_map() 56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); in scatterwalk_pagedone() 62 walk->offset += PAGE_SIZE - 1; in scatterwalk_pagedone() 63 walk->offset &= PAGE_MASK; in scatterwalk_pagedone() 64 if (walk->offset >= walk->sg->offset + walk->sg->length) in scatterwalk_pagedone() 107 unsigned int offset = 0; in scatterwalk_map_and_copy() local 115 if (start < offset + sg->length) in scatterwalk_map_and_copy() 118 offset += sg->length; in scatterwalk_map_and_copy() 122 scatterwalk_advance(&walk, start - offset); in scatterwalk_map_and_copy()
|
D | ahash.c | 45 unsigned int offset = walk->offset; in hash_walk_next() local 47 ((unsigned int)(PAGE_SIZE)) - offset); in hash_walk_next() 50 walk->data += offset; in hash_walk_next() 52 if (offset & alignmask) { in hash_walk_next() 53 unsigned int unaligned = alignmask + 1 - (offset & alignmask); in hash_walk_next() 68 walk->offset = sg->offset; in hash_walk_new_entry() 83 walk->data -= walk->offset; in crypto_hash_walk_done() 85 if (nbytes && walk->offset & alignmask && !err) { in crypto_hash_walk_done() 86 walk->offset = ALIGN(walk->offset, alignmask + 1); in crypto_hash_walk_done() 87 walk->data += walk->offset; in crypto_hash_walk_done() [all …]
|
D | cts.c | 77 unsigned int offset, in cts_cbc_encrypt() argument 96 scatterwalk_map_and_copy(s, src, offset, nbytes, 0); in cts_cbc_encrypt() 118 scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); in cts_cbc_encrypt() 163 unsigned int offset, in cts_cbc_decrypt() argument 181 scatterwalk_map_and_copy(s, src, offset, nbytes, 0); in cts_cbc_decrypt() 213 scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); in cts_cbc_decrypt()
|
D | md5.c | 98 const unsigned int offset = mctx->byte_count & 0x3f; in md5_final() local 99 char *p = (char *)mctx->block + offset; in md5_final() 100 int padding = 56 - (offset + 1); in md5_final()
|
D | shash.c | 277 unsigned int offset = sg->offset; in shash_ahash_digest() local 281 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { in shash_ahash_digest() 285 err = crypto_shash_digest(desc, data + offset, nbytes, in shash_ahash_digest() 413 unsigned int offset = sg->offset; in shash_compat_digest() local 416 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { in shash_compat_digest() 424 err = crypto_shash_digest(desc, data + offset, nbytes, out); in shash_compat_digest()
|
D | blkcipher.c | 202 walk->src.phys.offset = offset_in_page(walk->in.offset); in blkcipher_next_fast() 204 walk->dst.phys.offset = offset_in_page(walk->out.offset); in blkcipher_next_fast() 209 diff = walk->src.phys.offset - walk->dst.phys.offset; in blkcipher_next_fast() 269 walk->src.phys.offset &= PAGE_SIZE - 1; in blkcipher_walk_next() 270 walk->dst.phys.offset &= PAGE_SIZE - 1; in blkcipher_walk_next()
|
D | algif_skcipher.c | 119 sg[i].offset += plen; in skcipher_pull_sgl() 299 PAGE_SIZE - sg->offset - sg->length); in skcipher_sendmsg() 302 sg->offset + sg->length, in skcipher_sendmsg() 308 ctx->merge = (sg->offset + sg->length) & in skcipher_sendmsg() 373 int offset, size_t size, int flags) in skcipher_sendpage() argument 402 sg_set_page(sgl->sg + sgl->cur, page, size, offset); in skcipher_sendpage()
|
D | xcbc.c | 153 unsigned int offset = 0; in crypto_xcbc_digest_final() local 166 offset += bs; in crypto_xcbc_digest_final() 170 crypto_xor(prev, consts + offset, bs); in crypto_xcbc_digest_final()
|
D | cmac.c | 179 unsigned int offset = 0; in crypto_cmac_digest_final() local 192 offset += bs; in crypto_cmac_digest_final() 196 crypto_xor(prev, consts + offset, bs); in crypto_cmac_digest_final()
|
D | authencesn.c | 423 vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; in crypto_authenc_esn_genicv() 445 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); in crypto_authenc_esn_genicv() 446 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); in crypto_authenc_esn_genicv() 449 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); in crypto_authenc_esn_genicv() 591 vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; in crypto_authenc_esn_iverify() 613 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); in crypto_authenc_esn_iverify() 614 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); in crypto_authenc_esn_iverify() 617 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); in crypto_authenc_esn_iverify()
|
D | ablkcipher.c | 221 walk->src.offset = offset_in_page(walk->in.offset); in ablkcipher_next_fast() 223 walk->dst.offset = offset_in_page(walk->out.offset); in ablkcipher_next_fast() 266 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); in ablkcipher_walk_next() 267 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); in ablkcipher_walk_next()
|
D | md4.c | 204 const unsigned int offset = mctx->byte_count & 0x3f; in md4_final() local 205 char *p = (char *)mctx->block + offset; in md4_final() 206 int padding = 56 - (offset + 1); in md4_final()
|
D | eseqiv.c | 96 vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset; in eseqiv_givencrypt() 97 vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset; in eseqiv_givencrypt()
|
D | algif_hash.c | 110 int offset, size_t size, int flags) in hash_sendpage() argument 119 sg_set_page(ctx->sgl.sg, page, size, offset); in hash_sendpage()
|
D | authenc.c | 329 vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; in crypto_authenc_genicv() 342 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); in crypto_authenc_genicv() 483 vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; in crypto_authenc_iverify() 496 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); in crypto_authenc_iverify()
|
D | algapi.c | 869 void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) in __crypto_dequeue_request() argument 885 offset; in __crypto_dequeue_request()
|
D | gcm.c | 1151 vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; in crypto_rfc4543_crypt() 1161 req->assoc->offset); in crypto_rfc4543_crypt()
|
D | testmgr.c | 603 if (WARN_ON(sg[k - 1].offset + in __test_aead()
|
/crypto/async_tx/ |
D | async_pq.c | 50 const unsigned char *scfs, unsigned int offset, int disks, in do_async_gen_syndrome() argument 70 dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, in do_async_gen_syndrome() 75 dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, in do_async_gen_syndrome() 86 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, in do_async_gen_syndrome() 149 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in do_sync_gen_syndrome() argument 165 srcs[i] = page_address(blocks[i]) + offset; in do_sync_gen_syndrome() 197 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in async_gen_syndrome() argument 217 is_dma_pq_aligned(device, offset, 0, len)) { in async_gen_syndrome() 221 return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, in async_gen_syndrome() 233 BUG_ON(len + offset > PAGE_SIZE); in async_gen_syndrome() [all …]
|
D | async_xor.c | 37 unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, in do_async_xor() argument 52 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); in do_async_xor() 61 dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, in do_async_xor() 126 do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, in do_sync_xor() argument 143 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; in do_sync_xor() 146 dest_buf = page_address(dest) + offset; in do_sync_xor() 186 async_xor(struct page *dest, struct page **src_list, unsigned int offset, in async_xor() argument 201 if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { in async_xor() 205 return do_async_xor(chan, dest, src_list, offset, src_cnt, len, in async_xor() 224 do_sync_xor(dest, src_list, offset, src_cnt, len, submit); in async_xor() [all …]
|
D | async_memset.c | 43 async_memset(struct page *dest, int val, unsigned int offset, size_t len, in async_memset() argument 51 if (device && is_dma_fill_aligned(device, offset, 0, len)) { in async_memset() 59 dma_dest = dma_map_page(device->dev, dest, offset, len, in async_memset() 73 dest_buf = page_address(dest) + offset; in async_memset()
|