Home
last modified time | relevance | path

Searched refs:offset (Results 1 – 20 of 20) sorted by relevance

/crypto/
Dcts.c61 unsigned offset; member
110 unsigned int offset; in cts_cbc_encrypt() local
113 offset = rctx->offset; in cts_cbc_encrypt()
114 lastn = req->cryptlen - offset; in cts_cbc_encrypt()
116 sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize); in cts_cbc_encrypt()
120 scatterwalk_map_and_copy(d, req->src, offset, lastn, 0); in cts_cbc_encrypt()
155 unsigned int offset; in crypto_cts_encrypt() local
171 offset = rounddown(nbytes - 1, bsize); in crypto_cts_encrypt()
172 rctx->offset = offset; in crypto_cts_encrypt()
177 offset, req->iv); in crypto_cts_encrypt()
[all …]
Dahash.c43 unsigned int offset = walk->offset; in hash_walk_next() local
45 ((unsigned int)(PAGE_SIZE)) - offset); in hash_walk_next()
51 walk->data += offset; in hash_walk_next()
53 if (offset & alignmask) { in hash_walk_next()
54 unsigned int unaligned = alignmask + 1 - (offset & alignmask); in hash_walk_next()
69 walk->offset = sg->offset; in hash_walk_new_entry()
70 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); in hash_walk_new_entry()
71 walk->offset = offset_in_page(walk->offset); in hash_walk_new_entry()
85 walk->data -= walk->offset; in crypto_hash_walk_done()
87 if (walk->entrylen && (walk->offset & alignmask) && !err) { in crypto_hash_walk_done()
[all …]
Dablkcipher.c206 walk->src.offset = offset_in_page(walk->in.offset); in ablkcipher_next_fast()
208 walk->dst.offset = offset_in_page(walk->out.offset); in ablkcipher_next_fast()
251 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); in ablkcipher_walk_next()
252 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); in ablkcipher_walk_next()
Dblkcipher.c195 walk->src.phys.offset = offset_in_page(walk->in.offset); in blkcipher_next_fast()
197 walk->dst.phys.offset = offset_in_page(walk->out.offset); in blkcipher_next_fast()
202 diff = walk->src.phys.offset - walk->dst.phys.offset; in blkcipher_next_fast()
263 walk->src.phys.offset &= PAGE_SIZE - 1; in blkcipher_walk_next()
264 walk->dst.phys.offset &= PAGE_SIZE - 1; in blkcipher_walk_next()
Daf_alg.c527 unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) in af_alg_count_tsgl() argument
545 if (offset >= sg[i].length) { in af_alg_count_tsgl()
546 offset -= sg[i].length; in af_alg_count_tsgl()
551 bytes_count = sg[i].length - offset; in af_alg_count_tsgl()
553 offset = 0; in af_alg_count_tsgl()
617 sg[i].offset + dst_offset); in af_alg_pull_tsgl()
624 sg[i].offset += plen; in af_alg_pull_tsgl()
874 PAGE_SIZE - sg->offset - sg->length); in af_alg_sendmsg()
877 sg->offset + sg->length, in af_alg_sendmsg()
883 ctx->merge = (sg->offset + sg->length) & in af_alg_sendmsg()
[all …]
Dskcipher.c55 offset_in_page(walk->offset); in skcipher_map()
322 walk->src.phys.offset = offset_in_page(walk->in.offset); in skcipher_next_fast()
324 walk->dst.phys.offset = offset_in_page(walk->out.offset); in skcipher_next_fast()
329 diff = walk->src.phys.offset - walk->dst.phys.offset; in skcipher_next_fast()
366 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { in skcipher_walk_next()
390 walk->src.phys.offset &= PAGE_SIZE - 1; in skcipher_walk_next()
391 walk->dst.phys.offset &= PAGE_SIZE - 1; in skcipher_walk_next()
Dxcbc.c142 unsigned int offset = 0; in crypto_xcbc_digest_final() local
155 offset += bs; in crypto_xcbc_digest_final()
159 crypto_xor(prev, consts + offset, bs); in crypto_xcbc_digest_final()
Dtestmgr.c240 unsigned int offset; member
298 .src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
305 .offset = 1,
321 { .proportion_of_total = 1900, .offset = 33 },
322 { .proportion_of_total = 3300, .offset = 7 },
323 { .proportion_of_total = 4800, .offset = 18 },
332 .offset = PAGE_SIZE - 32
335 .offset = PAGE_SIZE - 7
356 .src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
363 .offset = 1,
[all …]
Dshash.c298 unsigned int offset; in shash_ahash_digest() local
302 (sg = req->src, offset = sg->offset, in shash_ahash_digest()
303 nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { in shash_ahash_digest()
307 err = crypto_shash_digest(desc, data + offset, nbytes, in shash_ahash_digest()
Dcmac.c176 unsigned int offset = 0; in crypto_cmac_digest_final() local
189 offset += bs; in crypto_cmac_digest_final()
193 crypto_xor(prev, consts + offset, bs); in crypto_cmac_digest_final()
Dalgif_hash.c132 int offset, size_t size, int flags) in hash_sendpage() argument
144 sg_set_page(ctx->sgl.sg, page, size, offset); in hash_sendpage()
345 int offset, size_t size, int flags) in hash_sendpage_nokey() argument
353 return hash_sendpage(sock, page, offset, size, flags); in hash_sendpage_nokey()
Dmd4.c187 const unsigned int offset = mctx->byte_count & 0x3f; in md4_final() local
188 char *p = (char *)mctx->block + offset; in md4_final()
189 int padding = 56 - (offset + 1); in md4_final()
Dmd5.c180 const unsigned int offset = mctx->byte_count & 0x3f; in md5_final() local
181 char *p = (char *)mctx->block + offset; in md5_final()
182 int padding = 56 - (offset + 1); in md5_final()
Dxts.c167 int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); in cts_final() local
175 offset - XTS_BLOCK_SIZE); in cts_final()
179 scatterwalk_map_and_copy(b, req->src, offset, tail, 0); in cts_final()
Dscatterwalk.c88 sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); in scatterwalk_ffwd()
Dalgif_skcipher.c259 int offset, size_t size, int flags) in skcipher_sendpage_nokey() argument
267 return af_alg_sendpage(sock, page, offset, size, flags); in skcipher_sendpage_nokey()
Dalgif_aead.c428 int offset, size_t size, int flags) in aead_sendpage_nokey() argument
436 return af_alg_sendpage(sock, page, offset, size, flags); in aead_sendpage_nokey()
Dessiv.c248 req->src->offset); in essiv_aead_crypt()
/crypto/async_tx/
Dasync_pq.c107 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in do_sync_gen_syndrome() argument
124 srcs[i] = page_address(blocks[i]) + offset; in do_sync_gen_syndrome()
163 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, in async_gen_syndrome() argument
182 is_dma_pq_aligned(device, offset, 0, len)) { in async_gen_syndrome()
199 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, in async_gen_syndrome()
213 offset, len, DMA_BIDIRECTIONAL); in async_gen_syndrome()
222 offset, len, DMA_BIDIRECTIONAL); in async_gen_syndrome()
243 BUG_ON(len + offset > PAGE_SIZE); in async_gen_syndrome()
247 BUG_ON(len + offset > PAGE_SIZE); in async_gen_syndrome()
249 do_sync_gen_syndrome(blocks, offset, disks, len, submit); in async_gen_syndrome()
[all …]
Dasync_xor.c100 do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, in do_sync_xor() argument
117 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; in do_sync_xor()
120 dest_buf = page_address(dest) + offset; in do_sync_xor()
160 async_xor(struct page *dest, struct page **src_list, unsigned int offset, in async_xor() argument
174 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { in async_xor()
187 offset, len, DMA_TO_DEVICE); in async_xor()
191 unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, in async_xor()
216 do_sync_xor(dest, src_list, offset, src_cnt, len, submit); in async_xor()
223 static int page_is_zero(struct page *p, unsigned int offset, size_t len) in page_is_zero() argument
225 return !memchr_inv(page_address(p) + offset, 0, len); in page_is_zero()
[all …]