• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4  *
5  * Copyright (C) 2012 International Business Machines Inc.
6  *
7  * Author: Kent Yoder <yoder1@us.ibm.com>
8  */
9 
10 #include <crypto/internal/aead.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <crypto/gcm.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <asm/vio.h>
18 
19 #include "nx_csbcpb.h"
20 #include "nx.h"
21 
22 
gcm_aes_nx_set_key(struct crypto_aead * tfm,const u8 * in_key,unsigned int key_len)23 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
24 			      const u8           *in_key,
25 			      unsigned int        key_len)
26 {
27 	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
28 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
29 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
30 
31 	nx_ctx_init(nx_ctx, HCOP_FC_AES);
32 
33 	switch (key_len) {
34 	case AES_KEYSIZE_128:
35 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
36 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
37 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
38 		break;
39 	case AES_KEYSIZE_192:
40 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
41 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
42 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
43 		break;
44 	case AES_KEYSIZE_256:
45 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
46 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
47 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
48 		break;
49 	default:
50 		return -EINVAL;
51 	}
52 
53 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
54 	memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
55 
56 	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
57 	memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
58 
59 	return 0;
60 }
61 
gcm4106_aes_nx_set_key(struct crypto_aead * tfm,const u8 * in_key,unsigned int key_len)62 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
63 				  const u8           *in_key,
64 				  unsigned int        key_len)
65 {
66 	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
67 	char *nonce = nx_ctx->priv.gcm.nonce;
68 	int rc;
69 
70 	if (key_len < 4)
71 		return -EINVAL;
72 
73 	key_len -= 4;
74 
75 	rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
76 	if (rc)
77 		goto out;
78 
79 	memcpy(nonce, in_key + key_len, 4);
80 out:
81 	return rc;
82 }
83 
gcm4106_aes_nx_setauthsize(struct crypto_aead * tfm,unsigned int authsize)84 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
85 				      unsigned int authsize)
86 {
87 	switch (authsize) {
88 	case 8:
89 	case 12:
90 	case 16:
91 		break;
92 	default:
93 		return -EINVAL;
94 	}
95 
96 	return 0;
97 }
98 
nx_gca(struct nx_crypto_ctx * nx_ctx,struct aead_request * req,u8 * out,unsigned int assoclen)99 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
100 		  struct aead_request   *req,
101 		  u8                    *out,
102 		  unsigned int assoclen)
103 {
104 	int rc;
105 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
106 	struct scatter_walk walk;
107 	struct nx_sg *nx_sg = nx_ctx->in_sg;
108 	unsigned int nbytes = assoclen;
109 	unsigned int processed = 0, to_process;
110 	unsigned int max_sg_len;
111 
112 	if (nbytes <= AES_BLOCK_SIZE) {
113 		scatterwalk_start(&walk, req->src);
114 		scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
115 		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
116 		return 0;
117 	}
118 
119 	NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
120 
121 	/* page_limit: number of sg entries that fit on one page */
122 	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
123 			   nx_ctx->ap->sglen);
124 	max_sg_len = min_t(u64, max_sg_len,
125 			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
126 
127 	do {
128 		/*
129 		 * to_process: the data chunk to process in this update.
130 		 * This value is bound by sg list limits.
131 		 */
132 		to_process = min_t(u64, nbytes - processed,
133 				   nx_ctx->ap->databytelen);
134 		to_process = min_t(u64, to_process,
135 				   NX_PAGE_SIZE * (max_sg_len - 1));
136 
137 		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
138 					  req->src, processed, &to_process);
139 
140 		if ((to_process + processed) < nbytes)
141 			NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
142 		else
143 			NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
144 
145 		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
146 					* sizeof(struct nx_sg);
147 
148 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
149 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
150 		if (rc)
151 			return rc;
152 
153 		memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
154 				csbcpb_aead->cpb.aes_gca.out_pat,
155 				AES_BLOCK_SIZE);
156 		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
157 
158 		atomic_inc(&(nx_ctx->stats->aes_ops));
159 		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
160 
161 		processed += to_process;
162 	} while (processed < nbytes);
163 
164 	memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
165 
166 	return rc;
167 }
168 
gmac(struct aead_request * req,const u8 * iv,unsigned int assoclen)169 static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
170 {
171 	int rc;
172 	struct nx_crypto_ctx *nx_ctx =
173 		crypto_aead_ctx(crypto_aead_reqtfm(req));
174 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
175 	struct nx_sg *nx_sg;
176 	unsigned int nbytes = assoclen;
177 	unsigned int processed = 0, to_process;
178 	unsigned int max_sg_len;
179 
180 	/* Set GMAC mode */
181 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
182 
183 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
184 
185 	/* page_limit: number of sg entries that fit on one page */
186 	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
187 			   nx_ctx->ap->sglen);
188 	max_sg_len = min_t(u64, max_sg_len,
189 			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
190 
191 	/* Copy IV */
192 	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
193 
194 	do {
195 		/*
196 		 * to_process: the data chunk to process in this update.
197 		 * This value is bound by sg list limits.
198 		 */
199 		to_process = min_t(u64, nbytes - processed,
200 				   nx_ctx->ap->databytelen);
201 		to_process = min_t(u64, to_process,
202 				   NX_PAGE_SIZE * (max_sg_len - 1));
203 
204 		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
205 					  req->src, processed, &to_process);
206 
207 		if ((to_process + processed) < nbytes)
208 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
209 		else
210 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
211 
212 		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
213 					* sizeof(struct nx_sg);
214 
215 		csbcpb->cpb.aes_gcm.bit_length_data = 0;
216 		csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
217 
218 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
219 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
220 		if (rc)
221 			goto out;
222 
223 		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
224 			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
225 		memcpy(csbcpb->cpb.aes_gcm.in_s0,
226 			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
227 
228 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
229 
230 		atomic_inc(&(nx_ctx->stats->aes_ops));
231 		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
232 
233 		processed += to_process;
234 	} while (processed < nbytes);
235 
236 out:
237 	/* Restore GCM mode */
238 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
239 	return rc;
240 }
241 
gcm_empty(struct aead_request * req,const u8 * iv,int enc)242 static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
243 {
244 	int rc;
245 	struct nx_crypto_ctx *nx_ctx =
246 		crypto_aead_ctx(crypto_aead_reqtfm(req));
247 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
248 	char out[AES_BLOCK_SIZE];
249 	struct nx_sg *in_sg, *out_sg;
250 	int len;
251 
252 	/* For scenarios where the input message is zero length, AES CTR mode
253 	 * may be used. Set the source data to be a single block (16B) of all
254 	 * zeros, and set the input IV value to be the same as the GMAC IV
255 	 * value. - nx_wb 4.8.1.3 */
256 
257 	/* Change to ECB mode */
258 	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
259 	memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
260 			sizeof(csbcpb->cpb.aes_ecb.key));
261 	if (enc)
262 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
263 	else
264 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
265 
266 	len = AES_BLOCK_SIZE;
267 
268 	/* Encrypt the counter/IV */
269 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
270 				 &len, nx_ctx->ap->sglen);
271 
272 	if (len != AES_BLOCK_SIZE)
273 		return -EINVAL;
274 
275 	len = sizeof(out);
276 	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
277 				  nx_ctx->ap->sglen);
278 
279 	if (len != sizeof(out))
280 		return -EINVAL;
281 
282 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
283 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
284 
285 	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
286 			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
287 	if (rc)
288 		goto out;
289 	atomic_inc(&(nx_ctx->stats->aes_ops));
290 
291 	/* Copy out the auth tag */
292 	memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
293 			crypto_aead_authsize(crypto_aead_reqtfm(req)));
294 out:
295 	/* Restore XCBC mode */
296 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
297 
298 	/*
299 	 * ECB key uses the same region that GCM AAD and counter, so it's safe
300 	 * to just fill it with zeroes.
301 	 */
302 	memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
303 
304 	return rc;
305 }
306 
gcm_aes_nx_crypt(struct aead_request * req,int enc,unsigned int assoclen)307 static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
308 			    unsigned int assoclen)
309 {
310 	struct nx_crypto_ctx *nx_ctx =
311 		crypto_aead_ctx(crypto_aead_reqtfm(req));
312 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
313 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
314 	unsigned int nbytes = req->cryptlen;
315 	unsigned int processed = 0, to_process;
316 	unsigned long irq_flags;
317 	int rc = -EINVAL;
318 
319 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
320 
321 	/* initialize the counter */
322 	*(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
323 
324 	if (nbytes == 0) {
325 		if (assoclen == 0)
326 			rc = gcm_empty(req, rctx->iv, enc);
327 		else
328 			rc = gmac(req, rctx->iv, assoclen);
329 		if (rc)
330 			goto out;
331 		else
332 			goto mac;
333 	}
334 
335 	/* Process associated data */
336 	csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
337 	if (assoclen) {
338 		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
339 			    assoclen);
340 		if (rc)
341 			goto out;
342 	}
343 
344 	/* Set flags for encryption */
345 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
346 	if (enc) {
347 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
348 	} else {
349 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
350 		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
351 	}
352 
353 	do {
354 		to_process = nbytes - processed;
355 
356 		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
357 		rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
358 				       req->src, &to_process,
359 				       processed + req->assoclen,
360 				       csbcpb->cpb.aes_gcm.iv_or_cnt);
361 
362 		if (rc)
363 			goto out;
364 
365 		if ((to_process + processed) < nbytes)
366 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
367 		else
368 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
369 
370 
371 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
372 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
373 		if (rc)
374 			goto out;
375 
376 		memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
377 		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
378 			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
379 		memcpy(csbcpb->cpb.aes_gcm.in_s0,
380 			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
381 
382 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
383 
384 		atomic_inc(&(nx_ctx->stats->aes_ops));
385 		atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
386 			     &(nx_ctx->stats->aes_bytes));
387 
388 		processed += to_process;
389 	} while (processed < nbytes);
390 
391 mac:
392 	if (enc) {
393 		/* copy out the auth tag */
394 		scatterwalk_map_and_copy(
395 			csbcpb->cpb.aes_gcm.out_pat_or_mac,
396 			req->dst, req->assoclen + nbytes,
397 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
398 			SCATTERWALK_TO_SG);
399 	} else {
400 		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
401 		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
402 
403 		scatterwalk_map_and_copy(
404 			itag, req->src, req->assoclen + nbytes,
405 			crypto_aead_authsize(crypto_aead_reqtfm(req)),
406 			SCATTERWALK_FROM_SG);
407 		rc = crypto_memneq(itag, otag,
408 			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
409 		     -EBADMSG : 0;
410 	}
411 out:
412 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
413 	return rc;
414 }
415 
gcm_aes_nx_encrypt(struct aead_request * req)416 static int gcm_aes_nx_encrypt(struct aead_request *req)
417 {
418 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
419 	char *iv = rctx->iv;
420 
421 	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
422 
423 	return gcm_aes_nx_crypt(req, 1, req->assoclen);
424 }
425 
gcm_aes_nx_decrypt(struct aead_request * req)426 static int gcm_aes_nx_decrypt(struct aead_request *req)
427 {
428 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
429 	char *iv = rctx->iv;
430 
431 	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
432 
433 	return gcm_aes_nx_crypt(req, 0, req->assoclen);
434 }
435 
gcm4106_aes_nx_encrypt(struct aead_request * req)436 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
437 {
438 	struct nx_crypto_ctx *nx_ctx =
439 		crypto_aead_ctx(crypto_aead_reqtfm(req));
440 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
441 	char *iv = rctx->iv;
442 	char *nonce = nx_ctx->priv.gcm.nonce;
443 
444 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
445 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
446 
447 	if (req->assoclen < 8)
448 		return -EINVAL;
449 
450 	return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
451 }
452 
gcm4106_aes_nx_decrypt(struct aead_request * req)453 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
454 {
455 	struct nx_crypto_ctx *nx_ctx =
456 		crypto_aead_ctx(crypto_aead_reqtfm(req));
457 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
458 	char *iv = rctx->iv;
459 	char *nonce = nx_ctx->priv.gcm.nonce;
460 
461 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
462 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
463 
464 	if (req->assoclen < 8)
465 		return -EINVAL;
466 
467 	return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
468 }
469 
470 struct aead_alg nx_gcm_aes_alg = {
471 	.base = {
472 		.cra_name        = "gcm(aes)",
473 		.cra_driver_name = "gcm-aes-nx",
474 		.cra_priority    = 300,
475 		.cra_blocksize   = 1,
476 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
477 		.cra_module      = THIS_MODULE,
478 	},
479 	.init        = nx_crypto_ctx_aes_gcm_init,
480 	.exit        = nx_crypto_ctx_aead_exit,
481 	.ivsize      = GCM_AES_IV_SIZE,
482 	.maxauthsize = AES_BLOCK_SIZE,
483 	.setkey      = gcm_aes_nx_set_key,
484 	.encrypt     = gcm_aes_nx_encrypt,
485 	.decrypt     = gcm_aes_nx_decrypt,
486 };
487 
488 struct aead_alg nx_gcm4106_aes_alg = {
489 	.base = {
490 		.cra_name        = "rfc4106(gcm(aes))",
491 		.cra_driver_name = "rfc4106-gcm-aes-nx",
492 		.cra_priority    = 300,
493 		.cra_blocksize   = 1,
494 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
495 		.cra_module      = THIS_MODULE,
496 	},
497 	.init        = nx_crypto_ctx_aes_gcm_init,
498 	.exit        = nx_crypto_ctx_aead_exit,
499 	.ivsize      = GCM_RFC4106_IV_SIZE,
500 	.maxauthsize = AES_BLOCK_SIZE,
501 	.setkey      = gcm4106_aes_nx_set_key,
502 	.setauthsize = gcm4106_aes_nx_setauthsize,
503 	.encrypt     = gcm4106_aes_nx_encrypt,
504 	.decrypt     = gcm4106_aes_nx_decrypt,
505 };
506