Lines Matching +full:multi +full:- +full:block
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Shared glue code for 128bit block ciphers
5 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
10 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
39 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, in glue_ecb_req_128bit()
41 for (i = 0; i < gctx->num_funcs; i++) { in glue_ecb_req_128bit()
42 func_bytes = bsize * gctx->funcs[i].num_blocks; in glue_ecb_req_128bit()
47 /* Process multi-block batch */ in glue_ecb_req_128bit()
49 gctx->funcs[i].fn_u.ecb(ctx, dst, src); in glue_ecb_req_128bit()
52 nbytes -= func_bytes; in glue_ecb_req_128bit()
88 nbytes -= bsize; in glue_cbc_encrypt_req_128bit()
117 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, in glue_cbc_decrypt_req_128bit()
119 /* Start of the last block. */ in glue_cbc_decrypt_req_128bit()
120 src += nbytes / bsize - 1; in glue_cbc_decrypt_req_128bit()
121 dst += nbytes / bsize - 1; in glue_cbc_decrypt_req_128bit()
125 for (i = 0; i < gctx->num_funcs; i++) { in glue_cbc_decrypt_req_128bit()
126 num_blocks = gctx->funcs[i].num_blocks; in glue_cbc_decrypt_req_128bit()
132 /* Process multi-block batch */ in glue_cbc_decrypt_req_128bit()
134 src -= num_blocks - 1; in glue_cbc_decrypt_req_128bit()
135 dst -= num_blocks - 1; in glue_cbc_decrypt_req_128bit()
137 gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, in glue_cbc_decrypt_req_128bit()
140 nbytes -= func_bytes; in glue_cbc_decrypt_req_128bit()
144 u128_xor(dst, dst, --src); in glue_cbc_decrypt_req_128bit()
145 dst--; in glue_cbc_decrypt_req_128bit()
178 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, in glue_ctr_req_128bit()
183 for (i = 0; i < gctx->num_funcs; i++) { in glue_ctr_req_128bit()
184 num_blocks = gctx->funcs[i].num_blocks; in glue_ctr_req_128bit()
190 /* Process multi-block batch */ in glue_ctr_req_128bit()
192 gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst, in glue_ctr_req_128bit()
197 nbytes -= func_bytes; in glue_ctr_req_128bit()
216 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp, in glue_ctr_req_128bit()
234 unsigned int nbytes = walk->nbytes; in __glue_xts_req_128bit()
235 u128 *src = walk->src.virt.addr; in __glue_xts_req_128bit()
236 u128 *dst = walk->dst.virt.addr; in __glue_xts_req_128bit()
240 /* Process multi-block batch */ in __glue_xts_req_128bit()
241 for (i = 0; i < gctx->num_funcs; i++) { in __glue_xts_req_128bit()
242 num_blocks = gctx->funcs[i].num_blocks; in __glue_xts_req_128bit()
247 gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst, in __glue_xts_req_128bit()
249 walk->iv); in __glue_xts_req_128bit()
253 nbytes -= func_bytes; in __glue_xts_req_128bit()
270 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); in glue_xts_req_128bit()
278 if (req->cryptlen < XTS_BLOCK_SIZE) in glue_xts_req_128bit()
279 return -EINVAL; in glue_xts_req_128bit()
284 tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE; in glue_xts_req_128bit()
290 skcipher_request_set_crypt(&subreq, req->src, req->dst, in glue_xts_req_128bit()
291 req->cryptlen - tail, req->iv); in glue_xts_req_128bit()
301 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, in glue_xts_req_128bit()
316 u8 *next_tweak, *final_tweak = req->iv; in glue_xts_req_128bit()
321 dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen); in glue_xts_req_128bit()
322 if (req->dst != req->src) in glue_xts_req_128bit()
323 dst = scatterwalk_ffwd(d, req->dst, req->cryptlen); in glue_xts_req_128bit()
326 next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE); in glue_xts_req_128bit()
329 next_tweak = req->iv; in glue_xts_req_128bit()
342 memcpy(b + 1, b, tail - XTS_BLOCK_SIZE); in glue_xts_req_128bit()
344 tail - XTS_BLOCK_SIZE, 0); in glue_xts_req_128bit()
370 /* CC <- T xor C */ in glue_xts_crypt_128bit_one()
373 /* PP <- D(Key2,CC) */ in glue_xts_crypt_128bit_one()
376 /* P <- T xor PP */ in glue_xts_crypt_128bit_one()