• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * caam - Freescale FSL CAAM support for Public Key Cryptography
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9  * all the desired key parameters, input and output pointers.
10  */
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "error.h"
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
18 #include "caampkc.h"
19 
20 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22 				 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24 				 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26 				 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
28 
29 /* buffer filled with zeros, used for padding */
30 static u8 *zero_buffer;
31 
32 /*
33  * variable used to avoid double free of resources in case
34  * algorithm registration was unsuccessful
35  */
36 static bool init_done;
37 
38 struct caam_akcipher_alg {
39 	struct akcipher_alg akcipher;
40 	bool registered;
41 };
42 
rsa_io_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 			 struct akcipher_request *req)
45 {
46 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47 
48 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
50 
51 	if (edesc->sec4_sg_bytes)
52 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53 				 DMA_TO_DEVICE);
54 }
55 
rsa_pub_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 			  struct akcipher_request *req)
58 {
59 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 	struct caam_rsa_key *key = &ctx->key;
62 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63 
64 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66 }
67 
rsa_priv_f1_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 			      struct akcipher_request *req)
70 {
71 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 	struct caam_rsa_key *key = &ctx->key;
74 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75 
76 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78 }
79 
rsa_priv_f2_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 			      struct akcipher_request *req)
82 {
83 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 	struct caam_rsa_key *key = &ctx->key;
86 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 	size_t p_sz = key->p_sz;
88 	size_t q_sz = key->q_sz;
89 
90 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95 }
96 
rsa_priv_f3_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 			      struct akcipher_request *req)
99 {
100 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 	struct caam_rsa_key *key = &ctx->key;
103 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 	size_t p_sz = key->p_sz;
105 	size_t q_sz = key->q_sz;
106 
107 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
114 }
115 
116 /* RSA Job Completion handler */
rsa_pub_done(struct device * dev,u32 * desc,u32 err,void * context)117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118 {
119 	struct akcipher_request *req = context;
120 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
122 	struct rsa_edesc *edesc;
123 	int ecode = 0;
124 	bool has_bklog;
125 
126 	if (err)
127 		ecode = caam_jr_strstatus(dev, err);
128 
129 	edesc = req_ctx->edesc;
130 	has_bklog = edesc->bklog;
131 
132 	rsa_pub_unmap(dev, edesc, req);
133 	rsa_io_unmap(dev, edesc, req);
134 	kfree(edesc);
135 
136 	/*
137 	 * If no backlog flag, the completion of the request is done
138 	 * by CAAM, not crypto engine.
139 	 */
140 	if (!has_bklog)
141 		akcipher_request_complete(req, ecode);
142 	else
143 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
144 }
145 
rsa_priv_f_done(struct device * dev,u32 * desc,u32 err,void * context)146 static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
147 			    void *context)
148 {
149 	struct akcipher_request *req = context;
150 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
151 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
152 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
153 	struct caam_rsa_key *key = &ctx->key;
154 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
155 	struct rsa_edesc *edesc;
156 	int ecode = 0;
157 	bool has_bklog;
158 
159 	if (err)
160 		ecode = caam_jr_strstatus(dev, err);
161 
162 	edesc = req_ctx->edesc;
163 	has_bklog = edesc->bklog;
164 
165 	switch (key->priv_form) {
166 	case FORM1:
167 		rsa_priv_f1_unmap(dev, edesc, req);
168 		break;
169 	case FORM2:
170 		rsa_priv_f2_unmap(dev, edesc, req);
171 		break;
172 	case FORM3:
173 		rsa_priv_f3_unmap(dev, edesc, req);
174 	}
175 
176 	rsa_io_unmap(dev, edesc, req);
177 	kfree(edesc);
178 
179 	/*
180 	 * If no backlog flag, the completion of the request is done
181 	 * by CAAM, not crypto engine.
182 	 */
183 	if (!has_bklog)
184 		akcipher_request_complete(req, ecode);
185 	else
186 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
187 }
188 
189 /**
190  * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
191  *                                from a given scatterlist
192  *
193  * @sgl   : scatterlist to count zeros from
194  * @nbytes: number of zeros, in bytes, to strip
195  * @flags : operation flags
196  */
caam_rsa_count_leading_zeros(struct scatterlist * sgl,unsigned int nbytes,unsigned int flags)197 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
198 					unsigned int nbytes,
199 					unsigned int flags)
200 {
201 	struct sg_mapping_iter miter;
202 	int lzeros, ents;
203 	unsigned int len;
204 	unsigned int tbytes = nbytes;
205 	const u8 *buff;
206 
207 	ents = sg_nents_for_len(sgl, nbytes);
208 	if (ents < 0)
209 		return ents;
210 
211 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
212 
213 	lzeros = 0;
214 	len = 0;
215 	while (nbytes > 0) {
216 		/* do not strip more than given bytes */
217 		while (len && !*buff && lzeros < nbytes) {
218 			lzeros++;
219 			len--;
220 			buff++;
221 		}
222 
223 		if (len && *buff)
224 			break;
225 
226 		if (!sg_miter_next(&miter))
227 			break;
228 
229 		buff = miter.addr;
230 		len = miter.length;
231 
232 		nbytes -= lzeros;
233 		lzeros = 0;
234 	}
235 
236 	miter.consumed = lzeros;
237 	sg_miter_stop(&miter);
238 	nbytes -= lzeros;
239 
240 	return tbytes - nbytes;
241 }
242 
rsa_edesc_alloc(struct akcipher_request * req,size_t desclen)243 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
244 					 size_t desclen)
245 {
246 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
247 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
248 	struct device *dev = ctx->dev;
249 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
250 	struct caam_rsa_key *key = &ctx->key;
251 	struct rsa_edesc *edesc;
252 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
253 		       GFP_KERNEL : GFP_ATOMIC;
254 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
255 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
256 	int src_nents, dst_nents;
257 	int mapped_src_nents, mapped_dst_nents;
258 	unsigned int diff_size = 0;
259 	int lzeros;
260 
261 	if (req->src_len > key->n_sz) {
262 		/*
263 		 * strip leading zeros and
264 		 * return the number of zeros to skip
265 		 */
266 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
267 						      key->n_sz, sg_flags);
268 		if (lzeros < 0)
269 			return ERR_PTR(lzeros);
270 
271 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
272 						      lzeros);
273 		req_ctx->fixup_src_len = req->src_len - lzeros;
274 	} else {
275 		/*
276 		 * input src is less then n key modulus,
277 		 * so there will be zero padding
278 		 */
279 		diff_size = key->n_sz - req->src_len;
280 		req_ctx->fixup_src = req->src;
281 		req_ctx->fixup_src_len = req->src_len;
282 	}
283 
284 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
285 				     req_ctx->fixup_src_len);
286 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
287 
288 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
289 				      DMA_TO_DEVICE);
290 	if (unlikely(!mapped_src_nents)) {
291 		dev_err(dev, "unable to map source\n");
292 		return ERR_PTR(-ENOMEM);
293 	}
294 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
295 				      DMA_FROM_DEVICE);
296 	if (unlikely(!mapped_dst_nents)) {
297 		dev_err(dev, "unable to map destination\n");
298 		goto src_fail;
299 	}
300 
301 	if (!diff_size && mapped_src_nents == 1)
302 		sec4_sg_len = 0; /* no need for an input hw s/g table */
303 	else
304 		sec4_sg_len = mapped_src_nents + !!diff_size;
305 	sec4_sg_index = sec4_sg_len;
306 
307 	if (mapped_dst_nents > 1)
308 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
309 	else
310 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
311 
312 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
313 
314 	/* allocate space for base edesc, hw desc commands and link tables */
315 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
316 			GFP_DMA | flags);
317 	if (!edesc)
318 		goto dst_fail;
319 
320 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
321 	if (diff_size)
322 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
323 				   0);
324 
325 	if (sec4_sg_index)
326 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
327 				   edesc->sec4_sg + !!diff_size, 0);
328 
329 	if (mapped_dst_nents > 1)
330 		sg_to_sec4_sg_last(req->dst, req->dst_len,
331 				   edesc->sec4_sg + sec4_sg_index, 0);
332 
333 	/* Save nents for later use in Job Descriptor */
334 	edesc->src_nents = src_nents;
335 	edesc->dst_nents = dst_nents;
336 
337 	req_ctx->edesc = edesc;
338 
339 	if (!sec4_sg_bytes)
340 		return edesc;
341 
342 	edesc->mapped_src_nents = mapped_src_nents;
343 	edesc->mapped_dst_nents = mapped_dst_nents;
344 
345 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
346 					    sec4_sg_bytes, DMA_TO_DEVICE);
347 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
348 		dev_err(dev, "unable to map S/G table\n");
349 		goto sec4_sg_fail;
350 	}
351 
352 	edesc->sec4_sg_bytes = sec4_sg_bytes;
353 
354 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
355 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
356 			     edesc->sec4_sg_bytes, 1);
357 
358 	return edesc;
359 
360 sec4_sg_fail:
361 	kfree(edesc);
362 dst_fail:
363 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
364 src_fail:
365 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
366 	return ERR_PTR(-ENOMEM);
367 }
368 
akcipher_do_one_req(struct crypto_engine * engine,void * areq)369 static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
370 {
371 	struct akcipher_request *req = container_of(areq,
372 						    struct akcipher_request,
373 						    base);
374 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
375 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
376 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
377 	struct device *jrdev = ctx->dev;
378 	u32 *desc = req_ctx->edesc->hw_desc;
379 	int ret;
380 
381 	req_ctx->edesc->bklog = true;
382 
383 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
384 
385 	if (ret == -ENOSPC && engine->retry_support)
386 		return ret;
387 
388 	if (ret != -EINPROGRESS) {
389 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
390 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
391 		kfree(req_ctx->edesc);
392 	} else {
393 		ret = 0;
394 	}
395 
396 	return ret;
397 }
398 
set_rsa_pub_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)399 static int set_rsa_pub_pdb(struct akcipher_request *req,
400 			   struct rsa_edesc *edesc)
401 {
402 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
403 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
404 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
405 	struct caam_rsa_key *key = &ctx->key;
406 	struct device *dev = ctx->dev;
407 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
408 	int sec4_sg_index = 0;
409 
410 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
411 	if (dma_mapping_error(dev, pdb->n_dma)) {
412 		dev_err(dev, "Unable to map RSA modulus memory\n");
413 		return -ENOMEM;
414 	}
415 
416 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
417 	if (dma_mapping_error(dev, pdb->e_dma)) {
418 		dev_err(dev, "Unable to map RSA public exponent memory\n");
419 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
420 		return -ENOMEM;
421 	}
422 
423 	if (edesc->mapped_src_nents > 1) {
424 		pdb->sgf |= RSA_PDB_SGF_F;
425 		pdb->f_dma = edesc->sec4_sg_dma;
426 		sec4_sg_index += edesc->mapped_src_nents;
427 	} else {
428 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
429 	}
430 
431 	if (edesc->mapped_dst_nents > 1) {
432 		pdb->sgf |= RSA_PDB_SGF_G;
433 		pdb->g_dma = edesc->sec4_sg_dma +
434 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
435 	} else {
436 		pdb->g_dma = sg_dma_address(req->dst);
437 	}
438 
439 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
440 	pdb->f_len = req_ctx->fixup_src_len;
441 
442 	return 0;
443 }
444 
set_rsa_priv_f1_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)445 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
446 			       struct rsa_edesc *edesc)
447 {
448 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
449 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
450 	struct caam_rsa_key *key = &ctx->key;
451 	struct device *dev = ctx->dev;
452 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
453 	int sec4_sg_index = 0;
454 
455 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
456 	if (dma_mapping_error(dev, pdb->n_dma)) {
457 		dev_err(dev, "Unable to map modulus memory\n");
458 		return -ENOMEM;
459 	}
460 
461 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
462 	if (dma_mapping_error(dev, pdb->d_dma)) {
463 		dev_err(dev, "Unable to map RSA private exponent memory\n");
464 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
465 		return -ENOMEM;
466 	}
467 
468 	if (edesc->mapped_src_nents > 1) {
469 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
470 		pdb->g_dma = edesc->sec4_sg_dma;
471 		sec4_sg_index += edesc->mapped_src_nents;
472 
473 	} else {
474 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
475 
476 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
477 	}
478 
479 	if (edesc->mapped_dst_nents > 1) {
480 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
481 		pdb->f_dma = edesc->sec4_sg_dma +
482 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
483 	} else {
484 		pdb->f_dma = sg_dma_address(req->dst);
485 	}
486 
487 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
488 
489 	return 0;
490 }
491 
set_rsa_priv_f2_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)492 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
493 			       struct rsa_edesc *edesc)
494 {
495 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
496 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
497 	struct caam_rsa_key *key = &ctx->key;
498 	struct device *dev = ctx->dev;
499 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
500 	int sec4_sg_index = 0;
501 	size_t p_sz = key->p_sz;
502 	size_t q_sz = key->q_sz;
503 
504 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
505 	if (dma_mapping_error(dev, pdb->d_dma)) {
506 		dev_err(dev, "Unable to map RSA private exponent memory\n");
507 		return -ENOMEM;
508 	}
509 
510 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
511 	if (dma_mapping_error(dev, pdb->p_dma)) {
512 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
513 		goto unmap_d;
514 	}
515 
516 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
517 	if (dma_mapping_error(dev, pdb->q_dma)) {
518 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
519 		goto unmap_p;
520 	}
521 
522 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
523 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
524 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
525 		goto unmap_q;
526 	}
527 
528 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
529 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
530 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
531 		goto unmap_tmp1;
532 	}
533 
534 	if (edesc->mapped_src_nents > 1) {
535 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
536 		pdb->g_dma = edesc->sec4_sg_dma;
537 		sec4_sg_index += edesc->mapped_src_nents;
538 	} else {
539 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
540 
541 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
542 	}
543 
544 	if (edesc->mapped_dst_nents > 1) {
545 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
546 		pdb->f_dma = edesc->sec4_sg_dma +
547 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
548 	} else {
549 		pdb->f_dma = sg_dma_address(req->dst);
550 	}
551 
552 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
553 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
554 
555 	return 0;
556 
557 unmap_tmp1:
558 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
559 unmap_q:
560 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
561 unmap_p:
562 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
563 unmap_d:
564 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
565 
566 	return -ENOMEM;
567 }
568 
set_rsa_priv_f3_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)569 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
570 			       struct rsa_edesc *edesc)
571 {
572 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
573 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
574 	struct caam_rsa_key *key = &ctx->key;
575 	struct device *dev = ctx->dev;
576 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
577 	int sec4_sg_index = 0;
578 	size_t p_sz = key->p_sz;
579 	size_t q_sz = key->q_sz;
580 
581 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
582 	if (dma_mapping_error(dev, pdb->p_dma)) {
583 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
584 		return -ENOMEM;
585 	}
586 
587 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
588 	if (dma_mapping_error(dev, pdb->q_dma)) {
589 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
590 		goto unmap_p;
591 	}
592 
593 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
594 	if (dma_mapping_error(dev, pdb->dp_dma)) {
595 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
596 		goto unmap_q;
597 	}
598 
599 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
600 	if (dma_mapping_error(dev, pdb->dq_dma)) {
601 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
602 		goto unmap_dp;
603 	}
604 
605 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
606 	if (dma_mapping_error(dev, pdb->c_dma)) {
607 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
608 		goto unmap_dq;
609 	}
610 
611 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
612 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
613 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
614 		goto unmap_qinv;
615 	}
616 
617 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
618 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
619 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
620 		goto unmap_tmp1;
621 	}
622 
623 	if (edesc->mapped_src_nents > 1) {
624 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
625 		pdb->g_dma = edesc->sec4_sg_dma;
626 		sec4_sg_index += edesc->mapped_src_nents;
627 	} else {
628 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
629 
630 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
631 	}
632 
633 	if (edesc->mapped_dst_nents > 1) {
634 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
635 		pdb->f_dma = edesc->sec4_sg_dma +
636 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
637 	} else {
638 		pdb->f_dma = sg_dma_address(req->dst);
639 	}
640 
641 	pdb->sgf |= key->n_sz;
642 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
643 
644 	return 0;
645 
646 unmap_tmp1:
647 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
648 unmap_qinv:
649 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
650 unmap_dq:
651 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
652 unmap_dp:
653 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
654 unmap_q:
655 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
656 unmap_p:
657 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
658 
659 	return -ENOMEM;
660 }
661 
akcipher_enqueue_req(struct device * jrdev,void (* cbk)(struct device * jrdev,u32 * desc,u32 err,void * context),struct akcipher_request * req)662 static int akcipher_enqueue_req(struct device *jrdev,
663 				void (*cbk)(struct device *jrdev, u32 *desc,
664 					    u32 err, void *context),
665 				struct akcipher_request *req)
666 {
667 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
668 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
669 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
670 	struct caam_rsa_key *key = &ctx->key;
671 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
672 	struct rsa_edesc *edesc = req_ctx->edesc;
673 	u32 *desc = edesc->hw_desc;
674 	int ret;
675 
676 	req_ctx->akcipher_op_done = cbk;
677 	/*
678 	 * Only the backlog request are sent to crypto-engine since the others
679 	 * can be handled by CAAM, if free, especially since JR has up to 1024
680 	 * entries (more than the 10 entries from crypto-engine).
681 	 */
682 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
683 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
684 								 req);
685 	else
686 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
687 
688 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
689 		switch (key->priv_form) {
690 		case FORM1:
691 			rsa_priv_f1_unmap(jrdev, edesc, req);
692 			break;
693 		case FORM2:
694 			rsa_priv_f2_unmap(jrdev, edesc, req);
695 			break;
696 		case FORM3:
697 			rsa_priv_f3_unmap(jrdev, edesc, req);
698 			break;
699 		default:
700 			rsa_pub_unmap(jrdev, edesc, req);
701 		}
702 		rsa_io_unmap(jrdev, edesc, req);
703 		kfree(edesc);
704 	}
705 
706 	return ret;
707 }
708 
caam_rsa_enc(struct akcipher_request * req)709 static int caam_rsa_enc(struct akcipher_request *req)
710 {
711 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
712 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
713 	struct caam_rsa_key *key = &ctx->key;
714 	struct device *jrdev = ctx->dev;
715 	struct rsa_edesc *edesc;
716 	int ret;
717 
718 	if (unlikely(!key->n || !key->e))
719 		return -EINVAL;
720 
721 	if (req->dst_len < key->n_sz) {
722 		req->dst_len = key->n_sz;
723 		dev_err(jrdev, "Output buffer length less than parameter n\n");
724 		return -EOVERFLOW;
725 	}
726 
727 	/* Allocate extended descriptor */
728 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
729 	if (IS_ERR(edesc))
730 		return PTR_ERR(edesc);
731 
732 	/* Set RSA Encrypt Protocol Data Block */
733 	ret = set_rsa_pub_pdb(req, edesc);
734 	if (ret)
735 		goto init_fail;
736 
737 	/* Initialize Job Descriptor */
738 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
739 
740 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
741 
742 init_fail:
743 	rsa_io_unmap(jrdev, edesc, req);
744 	kfree(edesc);
745 	return ret;
746 }
747 
caam_rsa_dec_priv_f1(struct akcipher_request * req)748 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
749 {
750 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
751 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
752 	struct device *jrdev = ctx->dev;
753 	struct rsa_edesc *edesc;
754 	int ret;
755 
756 	/* Allocate extended descriptor */
757 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
758 	if (IS_ERR(edesc))
759 		return PTR_ERR(edesc);
760 
761 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
762 	ret = set_rsa_priv_f1_pdb(req, edesc);
763 	if (ret)
764 		goto init_fail;
765 
766 	/* Initialize Job Descriptor */
767 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
768 
769 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
770 
771 init_fail:
772 	rsa_io_unmap(jrdev, edesc, req);
773 	kfree(edesc);
774 	return ret;
775 }
776 
caam_rsa_dec_priv_f2(struct akcipher_request * req)777 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
778 {
779 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
780 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
781 	struct device *jrdev = ctx->dev;
782 	struct rsa_edesc *edesc;
783 	int ret;
784 
785 	/* Allocate extended descriptor */
786 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
787 	if (IS_ERR(edesc))
788 		return PTR_ERR(edesc);
789 
790 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
791 	ret = set_rsa_priv_f2_pdb(req, edesc);
792 	if (ret)
793 		goto init_fail;
794 
795 	/* Initialize Job Descriptor */
796 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
797 
798 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
799 
800 init_fail:
801 	rsa_io_unmap(jrdev, edesc, req);
802 	kfree(edesc);
803 	return ret;
804 }
805 
caam_rsa_dec_priv_f3(struct akcipher_request * req)806 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
807 {
808 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
809 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
810 	struct device *jrdev = ctx->dev;
811 	struct rsa_edesc *edesc;
812 	int ret;
813 
814 	/* Allocate extended descriptor */
815 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
816 	if (IS_ERR(edesc))
817 		return PTR_ERR(edesc);
818 
819 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
820 	ret = set_rsa_priv_f3_pdb(req, edesc);
821 	if (ret)
822 		goto init_fail;
823 
824 	/* Initialize Job Descriptor */
825 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
826 
827 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
828 
829 init_fail:
830 	rsa_io_unmap(jrdev, edesc, req);
831 	kfree(edesc);
832 	return ret;
833 }
834 
caam_rsa_dec(struct akcipher_request * req)835 static int caam_rsa_dec(struct akcipher_request *req)
836 {
837 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
838 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
839 	struct caam_rsa_key *key = &ctx->key;
840 	int ret;
841 
842 	if (unlikely(!key->n || !key->d))
843 		return -EINVAL;
844 
845 	if (req->dst_len < key->n_sz) {
846 		req->dst_len = key->n_sz;
847 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
848 		return -EOVERFLOW;
849 	}
850 
851 	if (key->priv_form == FORM3)
852 		ret = caam_rsa_dec_priv_f3(req);
853 	else if (key->priv_form == FORM2)
854 		ret = caam_rsa_dec_priv_f2(req);
855 	else
856 		ret = caam_rsa_dec_priv_f1(req);
857 
858 	return ret;
859 }
860 
caam_rsa_free_key(struct caam_rsa_key * key)861 static void caam_rsa_free_key(struct caam_rsa_key *key)
862 {
863 	kfree_sensitive(key->d);
864 	kfree_sensitive(key->p);
865 	kfree_sensitive(key->q);
866 	kfree_sensitive(key->dp);
867 	kfree_sensitive(key->dq);
868 	kfree_sensitive(key->qinv);
869 	kfree_sensitive(key->tmp1);
870 	kfree_sensitive(key->tmp2);
871 	kfree(key->e);
872 	kfree(key->n);
873 	memset(key, 0, sizeof(*key));
874 }
875 
caam_rsa_drop_leading_zeros(const u8 ** ptr,size_t * nbytes)876 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
877 {
878 	while (!**ptr && *nbytes) {
879 		(*ptr)++;
880 		(*nbytes)--;
881 	}
882 }
883 
884 /**
885  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
886  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
887  * BER-encoding requires that the minimum number of bytes be used to encode the
888  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
889  * length.
890  *
891  * @ptr   : pointer to {dP, dQ, qInv} CRT member
892  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
893  * @dstlen: length in bytes of corresponding p or q prime factor
894  */
caam_read_rsa_crt(const u8 * ptr,size_t nbytes,size_t dstlen)895 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
896 {
897 	u8 *dst;
898 
899 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
900 	if (!nbytes)
901 		return NULL;
902 
903 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
904 	if (!dst)
905 		return NULL;
906 
907 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
908 
909 	return dst;
910 }
911 
912 /**
913  * caam_read_raw_data - Read a raw byte stream as a positive integer.
914  * The function skips buffer's leading zeros, copies the remained data
915  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
916  * the address of the new buffer.
917  *
918  * @buf   : The data to read
919  * @nbytes: The amount of data to read
920  */
caam_read_raw_data(const u8 * buf,size_t * nbytes)921 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
922 {
923 
924 	caam_rsa_drop_leading_zeros(&buf, nbytes);
925 	if (!*nbytes)
926 		return NULL;
927 
928 	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
929 }
930 
caam_rsa_check_key_length(unsigned int len)931 static int caam_rsa_check_key_length(unsigned int len)
932 {
933 	if (len > 4096)
934 		return -EINVAL;
935 	return 0;
936 }
937 
caam_rsa_set_pub_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)938 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
939 				unsigned int keylen)
940 {
941 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
942 	struct rsa_key raw_key = {NULL};
943 	struct caam_rsa_key *rsa_key = &ctx->key;
944 	int ret;
945 
946 	/* Free the old RSA key if any */
947 	caam_rsa_free_key(rsa_key);
948 
949 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
950 	if (ret)
951 		return ret;
952 
953 	/* Copy key in DMA zone */
954 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
955 	if (!rsa_key->e)
956 		goto err;
957 
958 	/*
959 	 * Skip leading zeros and copy the positive integer to a buffer
960 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
961 	 * expects a positive integer for the RSA modulus and uses its length as
962 	 * decryption output length.
963 	 */
964 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
965 	if (!rsa_key->n)
966 		goto err;
967 
968 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
969 		caam_rsa_free_key(rsa_key);
970 		return -EINVAL;
971 	}
972 
973 	rsa_key->e_sz = raw_key.e_sz;
974 	rsa_key->n_sz = raw_key.n_sz;
975 
976 	return 0;
977 err:
978 	caam_rsa_free_key(rsa_key);
979 	return -ENOMEM;
980 }
981 
caam_rsa_set_priv_key_form(struct caam_rsa_ctx * ctx,struct rsa_key * raw_key)982 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
983 				       struct rsa_key *raw_key)
984 {
985 	struct caam_rsa_key *rsa_key = &ctx->key;
986 	size_t p_sz = raw_key->p_sz;
987 	size_t q_sz = raw_key->q_sz;
988 
989 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
990 	if (!rsa_key->p)
991 		return;
992 	rsa_key->p_sz = p_sz;
993 
994 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
995 	if (!rsa_key->q)
996 		goto free_p;
997 	rsa_key->q_sz = q_sz;
998 
999 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
1000 	if (!rsa_key->tmp1)
1001 		goto free_q;
1002 
1003 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
1004 	if (!rsa_key->tmp2)
1005 		goto free_tmp1;
1006 
1007 	rsa_key->priv_form = FORM2;
1008 
1009 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1010 	if (!rsa_key->dp)
1011 		goto free_tmp2;
1012 
1013 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1014 	if (!rsa_key->dq)
1015 		goto free_dp;
1016 
1017 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1018 					  q_sz);
1019 	if (!rsa_key->qinv)
1020 		goto free_dq;
1021 
1022 	rsa_key->priv_form = FORM3;
1023 
1024 	return;
1025 
1026 free_dq:
1027 	kfree_sensitive(rsa_key->dq);
1028 free_dp:
1029 	kfree_sensitive(rsa_key->dp);
1030 free_tmp2:
1031 	kfree_sensitive(rsa_key->tmp2);
1032 free_tmp1:
1033 	kfree_sensitive(rsa_key->tmp1);
1034 free_q:
1035 	kfree_sensitive(rsa_key->q);
1036 free_p:
1037 	kfree_sensitive(rsa_key->p);
1038 }
1039 
caam_rsa_set_priv_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1040 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1041 				 unsigned int keylen)
1042 {
1043 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1044 	struct rsa_key raw_key = {NULL};
1045 	struct caam_rsa_key *rsa_key = &ctx->key;
1046 	int ret;
1047 
1048 	/* Free the old RSA key if any */
1049 	caam_rsa_free_key(rsa_key);
1050 
1051 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
1052 	if (ret)
1053 		return ret;
1054 
1055 	/* Copy key in DMA zone */
1056 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1057 	if (!rsa_key->d)
1058 		goto err;
1059 
1060 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1061 	if (!rsa_key->e)
1062 		goto err;
1063 
1064 	/*
1065 	 * Skip leading zeros and copy the positive integer to a buffer
1066 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1067 	 * expects a positive integer for the RSA modulus and uses its length as
1068 	 * decryption output length.
1069 	 */
1070 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1071 	if (!rsa_key->n)
1072 		goto err;
1073 
1074 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1075 		caam_rsa_free_key(rsa_key);
1076 		return -EINVAL;
1077 	}
1078 
1079 	rsa_key->d_sz = raw_key.d_sz;
1080 	rsa_key->e_sz = raw_key.e_sz;
1081 	rsa_key->n_sz = raw_key.n_sz;
1082 
1083 	caam_rsa_set_priv_key_form(ctx, &raw_key);
1084 
1085 	return 0;
1086 
1087 err:
1088 	caam_rsa_free_key(rsa_key);
1089 	return -ENOMEM;
1090 }
1091 
caam_rsa_max_size(struct crypto_akcipher * tfm)1092 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1093 {
1094 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1095 
1096 	return ctx->key.n_sz;
1097 }
1098 
1099 /* Per session pkc's driver context creation function */
caam_rsa_init_tfm(struct crypto_akcipher * tfm)1100 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1101 {
1102 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1103 
1104 	ctx->dev = caam_jr_alloc();
1105 
1106 	if (IS_ERR(ctx->dev)) {
1107 		pr_err("Job Ring Device allocation for transform failed\n");
1108 		return PTR_ERR(ctx->dev);
1109 	}
1110 
1111 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1112 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1113 					  DMA_TO_DEVICE);
1114 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1115 		dev_err(ctx->dev, "unable to map padding\n");
1116 		caam_jr_free(ctx->dev);
1117 		return -ENOMEM;
1118 	}
1119 
1120 	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1121 
1122 	return 0;
1123 }
1124 
1125 /* Per session pkc's driver context cleanup function */
caam_rsa_exit_tfm(struct crypto_akcipher * tfm)1126 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1127 {
1128 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1129 	struct caam_rsa_key *key = &ctx->key;
1130 
1131 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1132 			 1, DMA_TO_DEVICE);
1133 	caam_rsa_free_key(key);
1134 	caam_jr_free(ctx->dev);
1135 }
1136 
1137 static struct caam_akcipher_alg caam_rsa = {
1138 	.akcipher = {
1139 		.encrypt = caam_rsa_enc,
1140 		.decrypt = caam_rsa_dec,
1141 		.set_pub_key = caam_rsa_set_pub_key,
1142 		.set_priv_key = caam_rsa_set_priv_key,
1143 		.max_size = caam_rsa_max_size,
1144 		.init = caam_rsa_init_tfm,
1145 		.exit = caam_rsa_exit_tfm,
1146 		.reqsize = sizeof(struct caam_rsa_req_ctx),
1147 		.base = {
1148 			.cra_name = "rsa",
1149 			.cra_driver_name = "rsa-caam",
1150 			.cra_priority = 3000,
1151 			.cra_module = THIS_MODULE,
1152 			.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1153 		},
1154 	}
1155 };
1156 
1157 /* Public Key Cryptography module initialization handler */
caam_pkc_init(struct device * ctrldev)1158 int caam_pkc_init(struct device *ctrldev)
1159 {
1160 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1161 	u32 pk_inst, pkha;
1162 	int err;
1163 	init_done = false;
1164 
1165 	/* Determine public key hardware accelerator presence. */
1166 	if (priv->era < 10) {
1167 		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1168 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1169 	} else {
1170 		pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1171 		pk_inst = pkha & CHA_VER_NUM_MASK;
1172 
1173 		/*
1174 		 * Newer CAAMs support partially disabled functionality. If this is the
1175 		 * case, the number is non-zero, but this bit is set to indicate that
1176 		 * no encryption or decryption is supported. Only signing and verifying
1177 		 * is supported.
1178 		 */
1179 		if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1180 			pk_inst = 0;
1181 	}
1182 
1183 	/* Do not register algorithms if PKHA is not present. */
1184 	if (!pk_inst)
1185 		return 0;
1186 
1187 	/* allocate zero buffer, used for padding input */
1188 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1189 			      GFP_KERNEL);
1190 	if (!zero_buffer)
1191 		return -ENOMEM;
1192 
1193 	err = crypto_register_akcipher(&caam_rsa.akcipher);
1194 
1195 	if (err) {
1196 		kfree(zero_buffer);
1197 		dev_warn(ctrldev, "%s alg registration failed\n",
1198 			 caam_rsa.akcipher.base.cra_driver_name);
1199 	} else {
1200 		init_done = true;
1201 		caam_rsa.registered = true;
1202 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1203 	}
1204 
1205 	return err;
1206 }
1207 
caam_pkc_exit(void)1208 void caam_pkc_exit(void)
1209 {
1210 	if (!init_done)
1211 		return;
1212 
1213 	if (caam_rsa.registered)
1214 		crypto_unregister_akcipher(&caam_rsa.akcipher);
1215 
1216 	kfree(zero_buffer);
1217 }
1218