• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * caam - Freescale FSL CAAM support for Public Key Cryptography
3  *
4  * Copyright 2016 Freescale Semiconductor, Inc.
5  *
6  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7  * all the desired key parameters, input and output pointers.
8  */
9 #include "compat.h"
10 #include "regs.h"
11 #include "intern.h"
12 #include "jr.h"
13 #include "error.h"
14 #include "desc_constr.h"
15 #include "sg_sw_sec4.h"
16 #include "caampkc.h"
17 
18 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
20 				 sizeof(struct rsa_priv_f1_pdb))
21 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
22 				 sizeof(struct rsa_priv_f2_pdb))
23 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
24 				 sizeof(struct rsa_priv_f3_pdb))
25 
rsa_io_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)26 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
27 			 struct akcipher_request *req)
28 {
29 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
30 	dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
31 
32 	if (edesc->sec4_sg_bytes)
33 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
34 				 DMA_TO_DEVICE);
35 }
36 
rsa_pub_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)37 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
38 			  struct akcipher_request *req)
39 {
40 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
41 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
42 	struct caam_rsa_key *key = &ctx->key;
43 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
44 
45 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
46 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
47 }
48 
rsa_priv_f1_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)49 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
50 			      struct akcipher_request *req)
51 {
52 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
53 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
54 	struct caam_rsa_key *key = &ctx->key;
55 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
56 
57 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
58 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
59 }
60 
rsa_priv_f2_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)61 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
62 			      struct akcipher_request *req)
63 {
64 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
65 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
66 	struct caam_rsa_key *key = &ctx->key;
67 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
68 	size_t p_sz = key->p_sz;
69 	size_t q_sz = key->q_sz;
70 
71 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
72 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
73 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
74 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
75 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
76 }
77 
rsa_priv_f3_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)78 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
79 			      struct akcipher_request *req)
80 {
81 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
82 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
83 	struct caam_rsa_key *key = &ctx->key;
84 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
85 	size_t p_sz = key->p_sz;
86 	size_t q_sz = key->q_sz;
87 
88 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
89 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
90 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
91 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
92 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
93 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95 }
96 
97 /* RSA Job Completion handler */
rsa_pub_done(struct device * dev,u32 * desc,u32 err,void * context)98 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
99 {
100 	struct akcipher_request *req = context;
101 	struct rsa_edesc *edesc;
102 
103 	if (err)
104 		caam_jr_strstatus(dev, err);
105 
106 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
107 
108 	rsa_pub_unmap(dev, edesc, req);
109 	rsa_io_unmap(dev, edesc, req);
110 	kfree(edesc);
111 
112 	akcipher_request_complete(req, err);
113 }
114 
rsa_priv_f1_done(struct device * dev,u32 * desc,u32 err,void * context)115 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
116 			     void *context)
117 {
118 	struct akcipher_request *req = context;
119 	struct rsa_edesc *edesc;
120 
121 	if (err)
122 		caam_jr_strstatus(dev, err);
123 
124 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
125 
126 	rsa_priv_f1_unmap(dev, edesc, req);
127 	rsa_io_unmap(dev, edesc, req);
128 	kfree(edesc);
129 
130 	akcipher_request_complete(req, err);
131 }
132 
rsa_priv_f2_done(struct device * dev,u32 * desc,u32 err,void * context)133 static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
134 			     void *context)
135 {
136 	struct akcipher_request *req = context;
137 	struct rsa_edesc *edesc;
138 
139 	if (err)
140 		caam_jr_strstatus(dev, err);
141 
142 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
143 
144 	rsa_priv_f2_unmap(dev, edesc, req);
145 	rsa_io_unmap(dev, edesc, req);
146 	kfree(edesc);
147 
148 	akcipher_request_complete(req, err);
149 }
150 
rsa_priv_f3_done(struct device * dev,u32 * desc,u32 err,void * context)151 static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
152 			     void *context)
153 {
154 	struct akcipher_request *req = context;
155 	struct rsa_edesc *edesc;
156 
157 	if (err)
158 		caam_jr_strstatus(dev, err);
159 
160 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
161 
162 	rsa_priv_f3_unmap(dev, edesc, req);
163 	rsa_io_unmap(dev, edesc, req);
164 	kfree(edesc);
165 
166 	akcipher_request_complete(req, err);
167 }
168 
caam_rsa_count_leading_zeros(struct scatterlist * sgl,unsigned int nbytes,unsigned int flags)169 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
170 					unsigned int nbytes,
171 					unsigned int flags)
172 {
173 	struct sg_mapping_iter miter;
174 	int lzeros, ents;
175 	unsigned int len;
176 	unsigned int tbytes = nbytes;
177 	const u8 *buff;
178 
179 	ents = sg_nents_for_len(sgl, nbytes);
180 	if (ents < 0)
181 		return ents;
182 
183 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
184 
185 	lzeros = 0;
186 	len = 0;
187 	while (nbytes > 0) {
188 		while (len && !*buff) {
189 			lzeros++;
190 			len--;
191 			buff++;
192 		}
193 
194 		if (len && *buff)
195 			break;
196 
197 		sg_miter_next(&miter);
198 		buff = miter.addr;
199 		len = miter.length;
200 
201 		nbytes -= lzeros;
202 		lzeros = 0;
203 	}
204 
205 	miter.consumed = lzeros;
206 	sg_miter_stop(&miter);
207 	nbytes -= lzeros;
208 
209 	return tbytes - nbytes;
210 }
211 
rsa_edesc_alloc(struct akcipher_request * req,size_t desclen)212 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
213 					 size_t desclen)
214 {
215 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
216 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
217 	struct device *dev = ctx->dev;
218 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
219 	struct rsa_edesc *edesc;
220 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
221 		       GFP_KERNEL : GFP_ATOMIC;
222 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
223 	int sgc;
224 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
225 	int src_nents, dst_nents;
226 	int lzeros;
227 
228 	lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
229 	if (lzeros < 0)
230 		return ERR_PTR(lzeros);
231 
232 	req->src_len -= lzeros;
233 	req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
234 
235 	src_nents = sg_nents_for_len(req->src, req->src_len);
236 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
237 
238 	if (src_nents > 1)
239 		sec4_sg_len = src_nents;
240 	if (dst_nents > 1)
241 		sec4_sg_len += dst_nents;
242 
243 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
244 
245 	/* allocate space for base edesc, hw desc commands and link tables */
246 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
247 			GFP_DMA | flags);
248 	if (!edesc)
249 		return ERR_PTR(-ENOMEM);
250 
251 	sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
252 	if (unlikely(!sgc)) {
253 		dev_err(dev, "unable to map source\n");
254 		goto src_fail;
255 	}
256 
257 	sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
258 	if (unlikely(!sgc)) {
259 		dev_err(dev, "unable to map destination\n");
260 		goto dst_fail;
261 	}
262 
263 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
264 
265 	sec4_sg_index = 0;
266 	if (src_nents > 1) {
267 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
268 		sec4_sg_index += src_nents;
269 	}
270 	if (dst_nents > 1)
271 		sg_to_sec4_sg_last(req->dst, dst_nents,
272 				   edesc->sec4_sg + sec4_sg_index, 0);
273 
274 	/* Save nents for later use in Job Descriptor */
275 	edesc->src_nents = src_nents;
276 	edesc->dst_nents = dst_nents;
277 
278 	if (!sec4_sg_bytes)
279 		return edesc;
280 
281 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
282 					    sec4_sg_bytes, DMA_TO_DEVICE);
283 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
284 		dev_err(dev, "unable to map S/G table\n");
285 		goto sec4_sg_fail;
286 	}
287 
288 	edesc->sec4_sg_bytes = sec4_sg_bytes;
289 
290 	return edesc;
291 
292 sec4_sg_fail:
293 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
294 dst_fail:
295 	dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
296 src_fail:
297 	kfree(edesc);
298 	return ERR_PTR(-ENOMEM);
299 }
300 
set_rsa_pub_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)301 static int set_rsa_pub_pdb(struct akcipher_request *req,
302 			   struct rsa_edesc *edesc)
303 {
304 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
305 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
306 	struct caam_rsa_key *key = &ctx->key;
307 	struct device *dev = ctx->dev;
308 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
309 	int sec4_sg_index = 0;
310 
311 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
312 	if (dma_mapping_error(dev, pdb->n_dma)) {
313 		dev_err(dev, "Unable to map RSA modulus memory\n");
314 		return -ENOMEM;
315 	}
316 
317 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
318 	if (dma_mapping_error(dev, pdb->e_dma)) {
319 		dev_err(dev, "Unable to map RSA public exponent memory\n");
320 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
321 		return -ENOMEM;
322 	}
323 
324 	if (edesc->src_nents > 1) {
325 		pdb->sgf |= RSA_PDB_SGF_F;
326 		pdb->f_dma = edesc->sec4_sg_dma;
327 		sec4_sg_index += edesc->src_nents;
328 	} else {
329 		pdb->f_dma = sg_dma_address(req->src);
330 	}
331 
332 	if (edesc->dst_nents > 1) {
333 		pdb->sgf |= RSA_PDB_SGF_G;
334 		pdb->g_dma = edesc->sec4_sg_dma +
335 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
336 	} else {
337 		pdb->g_dma = sg_dma_address(req->dst);
338 	}
339 
340 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
341 	pdb->f_len = req->src_len;
342 
343 	return 0;
344 }
345 
set_rsa_priv_f1_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)346 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
347 			       struct rsa_edesc *edesc)
348 {
349 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
350 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
351 	struct caam_rsa_key *key = &ctx->key;
352 	struct device *dev = ctx->dev;
353 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
354 	int sec4_sg_index = 0;
355 
356 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
357 	if (dma_mapping_error(dev, pdb->n_dma)) {
358 		dev_err(dev, "Unable to map modulus memory\n");
359 		return -ENOMEM;
360 	}
361 
362 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
363 	if (dma_mapping_error(dev, pdb->d_dma)) {
364 		dev_err(dev, "Unable to map RSA private exponent memory\n");
365 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
366 		return -ENOMEM;
367 	}
368 
369 	if (edesc->src_nents > 1) {
370 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
371 		pdb->g_dma = edesc->sec4_sg_dma;
372 		sec4_sg_index += edesc->src_nents;
373 	} else {
374 		pdb->g_dma = sg_dma_address(req->src);
375 	}
376 
377 	if (edesc->dst_nents > 1) {
378 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
379 		pdb->f_dma = edesc->sec4_sg_dma +
380 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
381 	} else {
382 		pdb->f_dma = sg_dma_address(req->dst);
383 	}
384 
385 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
386 
387 	return 0;
388 }
389 
set_rsa_priv_f2_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)390 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
391 			       struct rsa_edesc *edesc)
392 {
393 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
394 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
395 	struct caam_rsa_key *key = &ctx->key;
396 	struct device *dev = ctx->dev;
397 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
398 	int sec4_sg_index = 0;
399 	size_t p_sz = key->p_sz;
400 	size_t q_sz = key->q_sz;
401 
402 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
403 	if (dma_mapping_error(dev, pdb->d_dma)) {
404 		dev_err(dev, "Unable to map RSA private exponent memory\n");
405 		return -ENOMEM;
406 	}
407 
408 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
409 	if (dma_mapping_error(dev, pdb->p_dma)) {
410 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
411 		goto unmap_d;
412 	}
413 
414 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
415 	if (dma_mapping_error(dev, pdb->q_dma)) {
416 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
417 		goto unmap_p;
418 	}
419 
420 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
421 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
422 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
423 		goto unmap_q;
424 	}
425 
426 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
427 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
428 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
429 		goto unmap_tmp1;
430 	}
431 
432 	if (edesc->src_nents > 1) {
433 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
434 		pdb->g_dma = edesc->sec4_sg_dma;
435 		sec4_sg_index += edesc->src_nents;
436 	} else {
437 		pdb->g_dma = sg_dma_address(req->src);
438 	}
439 
440 	if (edesc->dst_nents > 1) {
441 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
442 		pdb->f_dma = edesc->sec4_sg_dma +
443 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
444 	} else {
445 		pdb->f_dma = sg_dma_address(req->dst);
446 	}
447 
448 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
449 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
450 
451 	return 0;
452 
453 unmap_tmp1:
454 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
455 unmap_q:
456 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
457 unmap_p:
458 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
459 unmap_d:
460 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
461 
462 	return -ENOMEM;
463 }
464 
set_rsa_priv_f3_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)465 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
466 			       struct rsa_edesc *edesc)
467 {
468 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
469 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
470 	struct caam_rsa_key *key = &ctx->key;
471 	struct device *dev = ctx->dev;
472 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
473 	int sec4_sg_index = 0;
474 	size_t p_sz = key->p_sz;
475 	size_t q_sz = key->q_sz;
476 
477 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
478 	if (dma_mapping_error(dev, pdb->p_dma)) {
479 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
480 		return -ENOMEM;
481 	}
482 
483 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
484 	if (dma_mapping_error(dev, pdb->q_dma)) {
485 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
486 		goto unmap_p;
487 	}
488 
489 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
490 	if (dma_mapping_error(dev, pdb->dp_dma)) {
491 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
492 		goto unmap_q;
493 	}
494 
495 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
496 	if (dma_mapping_error(dev, pdb->dq_dma)) {
497 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
498 		goto unmap_dp;
499 	}
500 
501 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
502 	if (dma_mapping_error(dev, pdb->c_dma)) {
503 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
504 		goto unmap_dq;
505 	}
506 
507 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
508 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
509 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
510 		goto unmap_qinv;
511 	}
512 
513 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
514 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
515 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
516 		goto unmap_tmp1;
517 	}
518 
519 	if (edesc->src_nents > 1) {
520 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
521 		pdb->g_dma = edesc->sec4_sg_dma;
522 		sec4_sg_index += edesc->src_nents;
523 	} else {
524 		pdb->g_dma = sg_dma_address(req->src);
525 	}
526 
527 	if (edesc->dst_nents > 1) {
528 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
529 		pdb->f_dma = edesc->sec4_sg_dma +
530 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
531 	} else {
532 		pdb->f_dma = sg_dma_address(req->dst);
533 	}
534 
535 	pdb->sgf |= key->n_sz;
536 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
537 
538 	return 0;
539 
540 unmap_tmp1:
541 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
542 unmap_qinv:
543 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
544 unmap_dq:
545 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
546 unmap_dp:
547 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
548 unmap_q:
549 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
550 unmap_p:
551 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
552 
553 	return -ENOMEM;
554 }
555 
caam_rsa_enc(struct akcipher_request * req)556 static int caam_rsa_enc(struct akcipher_request *req)
557 {
558 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
559 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
560 	struct caam_rsa_key *key = &ctx->key;
561 	struct device *jrdev = ctx->dev;
562 	struct rsa_edesc *edesc;
563 	int ret;
564 
565 	if (unlikely(!key->n || !key->e))
566 		return -EINVAL;
567 
568 	if (req->dst_len < key->n_sz) {
569 		req->dst_len = key->n_sz;
570 		dev_err(jrdev, "Output buffer length less than parameter n\n");
571 		return -EOVERFLOW;
572 	}
573 
574 	/* Allocate extended descriptor */
575 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
576 	if (IS_ERR(edesc))
577 		return PTR_ERR(edesc);
578 
579 	/* Set RSA Encrypt Protocol Data Block */
580 	ret = set_rsa_pub_pdb(req, edesc);
581 	if (ret)
582 		goto init_fail;
583 
584 	/* Initialize Job Descriptor */
585 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
586 
587 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
588 	if (!ret)
589 		return -EINPROGRESS;
590 
591 	rsa_pub_unmap(jrdev, edesc, req);
592 
593 init_fail:
594 	rsa_io_unmap(jrdev, edesc, req);
595 	kfree(edesc);
596 	return ret;
597 }
598 
caam_rsa_dec_priv_f1(struct akcipher_request * req)599 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
600 {
601 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
602 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
603 	struct device *jrdev = ctx->dev;
604 	struct rsa_edesc *edesc;
605 	int ret;
606 
607 	/* Allocate extended descriptor */
608 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
609 	if (IS_ERR(edesc))
610 		return PTR_ERR(edesc);
611 
612 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
613 	ret = set_rsa_priv_f1_pdb(req, edesc);
614 	if (ret)
615 		goto init_fail;
616 
617 	/* Initialize Job Descriptor */
618 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
619 
620 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
621 	if (!ret)
622 		return -EINPROGRESS;
623 
624 	rsa_priv_f1_unmap(jrdev, edesc, req);
625 
626 init_fail:
627 	rsa_io_unmap(jrdev, edesc, req);
628 	kfree(edesc);
629 	return ret;
630 }
631 
caam_rsa_dec_priv_f2(struct akcipher_request * req)632 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
633 {
634 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
635 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
636 	struct device *jrdev = ctx->dev;
637 	struct rsa_edesc *edesc;
638 	int ret;
639 
640 	/* Allocate extended descriptor */
641 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
642 	if (IS_ERR(edesc))
643 		return PTR_ERR(edesc);
644 
645 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
646 	ret = set_rsa_priv_f2_pdb(req, edesc);
647 	if (ret)
648 		goto init_fail;
649 
650 	/* Initialize Job Descriptor */
651 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
652 
653 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
654 	if (!ret)
655 		return -EINPROGRESS;
656 
657 	rsa_priv_f2_unmap(jrdev, edesc, req);
658 
659 init_fail:
660 	rsa_io_unmap(jrdev, edesc, req);
661 	kfree(edesc);
662 	return ret;
663 }
664 
caam_rsa_dec_priv_f3(struct akcipher_request * req)665 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
666 {
667 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
668 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
669 	struct device *jrdev = ctx->dev;
670 	struct rsa_edesc *edesc;
671 	int ret;
672 
673 	/* Allocate extended descriptor */
674 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
675 	if (IS_ERR(edesc))
676 		return PTR_ERR(edesc);
677 
678 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
679 	ret = set_rsa_priv_f3_pdb(req, edesc);
680 	if (ret)
681 		goto init_fail;
682 
683 	/* Initialize Job Descriptor */
684 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
685 
686 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
687 	if (!ret)
688 		return -EINPROGRESS;
689 
690 	rsa_priv_f3_unmap(jrdev, edesc, req);
691 
692 init_fail:
693 	rsa_io_unmap(jrdev, edesc, req);
694 	kfree(edesc);
695 	return ret;
696 }
697 
caam_rsa_dec(struct akcipher_request * req)698 static int caam_rsa_dec(struct akcipher_request *req)
699 {
700 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
701 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
702 	struct caam_rsa_key *key = &ctx->key;
703 	int ret;
704 
705 	if (unlikely(!key->n || !key->d))
706 		return -EINVAL;
707 
708 	if (req->dst_len < key->n_sz) {
709 		req->dst_len = key->n_sz;
710 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
711 		return -EOVERFLOW;
712 	}
713 
714 	if (key->priv_form == FORM3)
715 		ret = caam_rsa_dec_priv_f3(req);
716 	else if (key->priv_form == FORM2)
717 		ret = caam_rsa_dec_priv_f2(req);
718 	else
719 		ret = caam_rsa_dec_priv_f1(req);
720 
721 	return ret;
722 }
723 
caam_rsa_free_key(struct caam_rsa_key * key)724 static void caam_rsa_free_key(struct caam_rsa_key *key)
725 {
726 	kzfree(key->d);
727 	kzfree(key->p);
728 	kzfree(key->q);
729 	kzfree(key->dp);
730 	kzfree(key->dq);
731 	kzfree(key->qinv);
732 	kzfree(key->tmp1);
733 	kzfree(key->tmp2);
734 	kfree(key->e);
735 	kfree(key->n);
736 	memset(key, 0, sizeof(*key));
737 }
738 
caam_rsa_drop_leading_zeros(const u8 ** ptr,size_t * nbytes)739 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
740 {
741 	while (!**ptr && *nbytes) {
742 		(*ptr)++;
743 		(*nbytes)--;
744 	}
745 }
746 
747 /**
748  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
749  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
750  * BER-encoding requires that the minimum number of bytes be used to encode the
751  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
752  * length.
753  *
754  * @ptr   : pointer to {dP, dQ, qInv} CRT member
755  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
756  * @dstlen: length in bytes of corresponding p or q prime factor
757  */
caam_read_rsa_crt(const u8 * ptr,size_t nbytes,size_t dstlen)758 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
759 {
760 	u8 *dst;
761 
762 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
763 	if (!nbytes)
764 		return NULL;
765 
766 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
767 	if (!dst)
768 		return NULL;
769 
770 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
771 
772 	return dst;
773 }
774 
775 /**
776  * caam_read_raw_data - Read a raw byte stream as a positive integer.
777  * The function skips buffer's leading zeros, copies the remained data
778  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
779  * the address of the new buffer.
780  *
781  * @buf   : The data to read
782  * @nbytes: The amount of data to read
783  */
caam_read_raw_data(const u8 * buf,size_t * nbytes)784 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
785 {
786 	u8 *val;
787 
788 	caam_rsa_drop_leading_zeros(&buf, nbytes);
789 	if (!*nbytes)
790 		return NULL;
791 
792 	val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
793 	if (!val)
794 		return NULL;
795 
796 	memcpy(val, buf, *nbytes);
797 
798 	return val;
799 }
800 
caam_rsa_check_key_length(unsigned int len)801 static int caam_rsa_check_key_length(unsigned int len)
802 {
803 	if (len > 4096)
804 		return -EINVAL;
805 	return 0;
806 }
807 
caam_rsa_set_pub_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)808 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
809 				unsigned int keylen)
810 {
811 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
812 	struct rsa_key raw_key = {NULL};
813 	struct caam_rsa_key *rsa_key = &ctx->key;
814 	int ret;
815 
816 	/* Free the old RSA key if any */
817 	caam_rsa_free_key(rsa_key);
818 
819 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
820 	if (ret)
821 		return ret;
822 
823 	/* Copy key in DMA zone */
824 	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
825 	if (!rsa_key->e)
826 		goto err;
827 
828 	/*
829 	 * Skip leading zeros and copy the positive integer to a buffer
830 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
831 	 * expects a positive integer for the RSA modulus and uses its length as
832 	 * decryption output length.
833 	 */
834 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
835 	if (!rsa_key->n)
836 		goto err;
837 
838 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
839 		caam_rsa_free_key(rsa_key);
840 		return -EINVAL;
841 	}
842 
843 	rsa_key->e_sz = raw_key.e_sz;
844 	rsa_key->n_sz = raw_key.n_sz;
845 
846 	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
847 
848 	return 0;
849 err:
850 	caam_rsa_free_key(rsa_key);
851 	return -ENOMEM;
852 }
853 
caam_rsa_set_priv_key_form(struct caam_rsa_ctx * ctx,struct rsa_key * raw_key)854 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
855 				       struct rsa_key *raw_key)
856 {
857 	struct caam_rsa_key *rsa_key = &ctx->key;
858 	size_t p_sz = raw_key->p_sz;
859 	size_t q_sz = raw_key->q_sz;
860 
861 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
862 	if (!rsa_key->p)
863 		return;
864 	rsa_key->p_sz = p_sz;
865 
866 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
867 	if (!rsa_key->q)
868 		goto free_p;
869 	rsa_key->q_sz = q_sz;
870 
871 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
872 	if (!rsa_key->tmp1)
873 		goto free_q;
874 
875 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
876 	if (!rsa_key->tmp2)
877 		goto free_tmp1;
878 
879 	rsa_key->priv_form = FORM2;
880 
881 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
882 	if (!rsa_key->dp)
883 		goto free_tmp2;
884 
885 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
886 	if (!rsa_key->dq)
887 		goto free_dp;
888 
889 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
890 					  q_sz);
891 	if (!rsa_key->qinv)
892 		goto free_dq;
893 
894 	rsa_key->priv_form = FORM3;
895 
896 	return;
897 
898 free_dq:
899 	kzfree(rsa_key->dq);
900 free_dp:
901 	kzfree(rsa_key->dp);
902 free_tmp2:
903 	kzfree(rsa_key->tmp2);
904 free_tmp1:
905 	kzfree(rsa_key->tmp1);
906 free_q:
907 	kzfree(rsa_key->q);
908 free_p:
909 	kzfree(rsa_key->p);
910 }
911 
caam_rsa_set_priv_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)912 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
913 				 unsigned int keylen)
914 {
915 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
916 	struct rsa_key raw_key = {NULL};
917 	struct caam_rsa_key *rsa_key = &ctx->key;
918 	int ret;
919 
920 	/* Free the old RSA key if any */
921 	caam_rsa_free_key(rsa_key);
922 
923 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
924 	if (ret)
925 		return ret;
926 
927 	/* Copy key in DMA zone */
928 	rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
929 	if (!rsa_key->d)
930 		goto err;
931 
932 	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
933 	if (!rsa_key->e)
934 		goto err;
935 
936 	/*
937 	 * Skip leading zeros and copy the positive integer to a buffer
938 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
939 	 * expects a positive integer for the RSA modulus and uses its length as
940 	 * decryption output length.
941 	 */
942 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
943 	if (!rsa_key->n)
944 		goto err;
945 
946 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
947 		caam_rsa_free_key(rsa_key);
948 		return -EINVAL;
949 	}
950 
951 	rsa_key->d_sz = raw_key.d_sz;
952 	rsa_key->e_sz = raw_key.e_sz;
953 	rsa_key->n_sz = raw_key.n_sz;
954 
955 	memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
956 	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
957 
958 	caam_rsa_set_priv_key_form(ctx, &raw_key);
959 
960 	return 0;
961 
962 err:
963 	caam_rsa_free_key(rsa_key);
964 	return -ENOMEM;
965 }
966 
caam_rsa_max_size(struct crypto_akcipher * tfm)967 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
968 {
969 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
970 
971 	return ctx->key.n_sz;
972 }
973 
974 /* Per session pkc's driver context creation function */
caam_rsa_init_tfm(struct crypto_akcipher * tfm)975 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
976 {
977 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
978 
979 	ctx->dev = caam_jr_alloc();
980 
981 	if (IS_ERR(ctx->dev)) {
982 		pr_err("Job Ring Device allocation for transform failed\n");
983 		return PTR_ERR(ctx->dev);
984 	}
985 
986 	return 0;
987 }
988 
989 /* Per session pkc's driver context cleanup function */
caam_rsa_exit_tfm(struct crypto_akcipher * tfm)990 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
991 {
992 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
993 	struct caam_rsa_key *key = &ctx->key;
994 
995 	caam_rsa_free_key(key);
996 	caam_jr_free(ctx->dev);
997 }
998 
999 static struct akcipher_alg caam_rsa = {
1000 	.encrypt = caam_rsa_enc,
1001 	.decrypt = caam_rsa_dec,
1002 	.sign = caam_rsa_dec,
1003 	.verify = caam_rsa_enc,
1004 	.set_pub_key = caam_rsa_set_pub_key,
1005 	.set_priv_key = caam_rsa_set_priv_key,
1006 	.max_size = caam_rsa_max_size,
1007 	.init = caam_rsa_init_tfm,
1008 	.exit = caam_rsa_exit_tfm,
1009 	.reqsize = sizeof(struct caam_rsa_req_ctx),
1010 	.base = {
1011 		.cra_name = "rsa",
1012 		.cra_driver_name = "rsa-caam",
1013 		.cra_priority = 3000,
1014 		.cra_module = THIS_MODULE,
1015 		.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1016 	},
1017 };
1018 
1019 /* Public Key Cryptography module initialization handler */
caam_pkc_init(void)1020 static int __init caam_pkc_init(void)
1021 {
1022 	struct device_node *dev_node;
1023 	struct platform_device *pdev;
1024 	struct device *ctrldev;
1025 	struct caam_drv_private *priv;
1026 	u32 cha_inst, pk_inst;
1027 	int err;
1028 
1029 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1030 	if (!dev_node) {
1031 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1032 		if (!dev_node)
1033 			return -ENODEV;
1034 	}
1035 
1036 	pdev = of_find_device_by_node(dev_node);
1037 	if (!pdev) {
1038 		of_node_put(dev_node);
1039 		return -ENODEV;
1040 	}
1041 
1042 	ctrldev = &pdev->dev;
1043 	priv = dev_get_drvdata(ctrldev);
1044 	of_node_put(dev_node);
1045 
1046 	/*
1047 	 * If priv is NULL, it's probably because the caam driver wasn't
1048 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1049 	 */
1050 	if (!priv)
1051 		return -ENODEV;
1052 
1053 	/* Determine public key hardware accelerator presence. */
1054 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1055 	pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1056 
1057 	/* Do not register algorithms if PKHA is not present. */
1058 	if (!pk_inst)
1059 		return -ENODEV;
1060 
1061 	err = crypto_register_akcipher(&caam_rsa);
1062 	if (err)
1063 		dev_warn(ctrldev, "%s alg registration failed\n",
1064 			 caam_rsa.base.cra_driver_name);
1065 	else
1066 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1067 
1068 	return err;
1069 }
1070 
caam_pkc_exit(void)1071 static void __exit caam_pkc_exit(void)
1072 {
1073 	crypto_unregister_akcipher(&caam_rsa);
1074 }
1075 
1076 module_init(caam_pkc_init);
1077 module_exit(caam_pkc_exit);
1078 
1079 MODULE_LICENSE("Dual BSD/GPL");
1080 MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1081 MODULE_AUTHOR("Freescale Semiconductor");
1082