• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <crypto/internal/rsa.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/akcipher.h>
7 #include <crypto/kpp.h>
8 #include <crypto/internal/kpp.h>
9 #include <crypto/dh.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <crypto/scatterwalk.h>
13 #include "icp_qat_fw_pke.h"
14 #include "adf_accel_devices.h"
15 #include "adf_transport.h"
16 #include "adf_common_drv.h"
17 #include "qat_crypto.h"
18 
19 static DEFINE_MUTEX(algs_lock);
20 static unsigned int active_devs;
21 
22 struct qat_rsa_input_params {
23 	union {
24 		struct {
25 			dma_addr_t m;
26 			dma_addr_t e;
27 			dma_addr_t n;
28 		} enc;
29 		struct {
30 			dma_addr_t c;
31 			dma_addr_t d;
32 			dma_addr_t n;
33 		} dec;
34 		struct {
35 			dma_addr_t c;
36 			dma_addr_t p;
37 			dma_addr_t q;
38 			dma_addr_t dp;
39 			dma_addr_t dq;
40 			dma_addr_t qinv;
41 		} dec_crt;
42 		u64 in_tab[8];
43 	};
44 } __packed __aligned(64);
45 
46 struct qat_rsa_output_params {
47 	union {
48 		struct {
49 			dma_addr_t c;
50 		} enc;
51 		struct {
52 			dma_addr_t m;
53 		} dec;
54 		u64 out_tab[8];
55 	};
56 } __packed __aligned(64);
57 
58 struct qat_rsa_ctx {
59 	char *n;
60 	char *e;
61 	char *d;
62 	char *p;
63 	char *q;
64 	char *dp;
65 	char *dq;
66 	char *qinv;
67 	dma_addr_t dma_n;
68 	dma_addr_t dma_e;
69 	dma_addr_t dma_d;
70 	dma_addr_t dma_p;
71 	dma_addr_t dma_q;
72 	dma_addr_t dma_dp;
73 	dma_addr_t dma_dq;
74 	dma_addr_t dma_qinv;
75 	unsigned int key_sz;
76 	bool crt_mode;
77 	struct qat_crypto_instance *inst;
78 } __packed __aligned(64);
79 
80 struct qat_dh_input_params {
81 	union {
82 		struct {
83 			dma_addr_t b;
84 			dma_addr_t xa;
85 			dma_addr_t p;
86 		} in;
87 		struct {
88 			dma_addr_t xa;
89 			dma_addr_t p;
90 		} in_g2;
91 		u64 in_tab[8];
92 	};
93 } __packed __aligned(64);
94 
95 struct qat_dh_output_params {
96 	union {
97 		dma_addr_t r;
98 		u64 out_tab[8];
99 	};
100 } __packed __aligned(64);
101 
102 struct qat_dh_ctx {
103 	char *g;
104 	char *xa;
105 	char *p;
106 	dma_addr_t dma_g;
107 	dma_addr_t dma_xa;
108 	dma_addr_t dma_p;
109 	unsigned int p_size;
110 	bool g2;
111 	struct qat_crypto_instance *inst;
112 } __packed __aligned(64);
113 
114 struct qat_asym_request {
115 	union {
116 		struct qat_rsa_input_params rsa;
117 		struct qat_dh_input_params dh;
118 	} in;
119 	union {
120 		struct qat_rsa_output_params rsa;
121 		struct qat_dh_output_params dh;
122 	} out;
123 	dma_addr_t phy_in;
124 	dma_addr_t phy_out;
125 	char *src_align;
126 	char *dst_align;
127 	struct icp_qat_fw_pke_request req;
128 	union {
129 		struct qat_rsa_ctx *rsa;
130 		struct qat_dh_ctx *dh;
131 	} ctx;
132 	union {
133 		struct akcipher_request *rsa;
134 		struct kpp_request *dh;
135 	} areq;
136 	int err;
137 	void (*cb)(struct icp_qat_fw_pke_resp *resp);
138 } __aligned(64);
139 
qat_dh_cb(struct icp_qat_fw_pke_resp * resp)140 static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
141 {
142 	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
143 	struct kpp_request *areq = req->areq.dh;
144 	struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
145 	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
146 				resp->pke_resp_hdr.comn_resp_flags);
147 
148 	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
149 
150 	if (areq->src) {
151 		if (req->src_align)
152 			dma_free_coherent(dev, req->ctx.dh->p_size,
153 					  req->src_align, req->in.dh.in.b);
154 		else
155 			dma_unmap_single(dev, req->in.dh.in.b,
156 					 req->ctx.dh->p_size, DMA_TO_DEVICE);
157 	}
158 
159 	areq->dst_len = req->ctx.dh->p_size;
160 	if (req->dst_align) {
161 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
162 					 areq->dst_len, 1);
163 
164 		dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
165 				  req->out.dh.r);
166 	} else {
167 		dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
168 				 DMA_FROM_DEVICE);
169 	}
170 
171 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
172 			 DMA_TO_DEVICE);
173 	dma_unmap_single(dev, req->phy_out,
174 			 sizeof(struct qat_dh_output_params),
175 			 DMA_TO_DEVICE);
176 
177 	kpp_request_complete(areq, err);
178 }
179 
180 #define PKE_DH_1536 0x390c1a49
181 #define PKE_DH_G2_1536 0x2e0b1a3e
182 #define PKE_DH_2048 0x4d0c1a60
183 #define PKE_DH_G2_2048 0x3e0b1a55
184 #define PKE_DH_3072 0x510c1a77
185 #define PKE_DH_G2_3072 0x3a0b1a6c
186 #define PKE_DH_4096 0x690c1a8e
187 #define PKE_DH_G2_4096 0x4a0b1a83
188 
qat_dh_fn_id(unsigned int len,bool g2)189 static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
190 {
191 	unsigned int bitslen = len << 3;
192 
193 	switch (bitslen) {
194 	case 1536:
195 		return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
196 	case 2048:
197 		return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
198 	case 3072:
199 		return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
200 	case 4096:
201 		return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
202 	default:
203 		return 0;
204 	};
205 }
206 
qat_dh_get_params(struct crypto_kpp * tfm)207 static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
208 {
209 	return kpp_tfm_ctx(tfm);
210 }
211 
qat_dh_compute_value(struct kpp_request * req)212 static int qat_dh_compute_value(struct kpp_request *req)
213 {
214 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
215 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
216 	struct qat_crypto_instance *inst = ctx->inst;
217 	struct device *dev = &GET_DEV(inst->accel_dev);
218 	struct qat_asym_request *qat_req =
219 			PTR_ALIGN(kpp_request_ctx(req), 64);
220 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
221 	int ret, ctr = 0;
222 	int n_input_params = 0;
223 
224 	if (unlikely(!ctx->xa))
225 		return -EINVAL;
226 
227 	if (req->dst_len < ctx->p_size) {
228 		req->dst_len = ctx->p_size;
229 		return -EOVERFLOW;
230 	}
231 	memset(msg, '\0', sizeof(*msg));
232 	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
233 					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
234 
235 	msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
236 						    !req->src && ctx->g2);
237 	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
238 		return -EINVAL;
239 
240 	qat_req->cb = qat_dh_cb;
241 	qat_req->ctx.dh = ctx;
242 	qat_req->areq.dh = req;
243 	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
244 	msg->pke_hdr.comn_req_flags =
245 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
246 					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
247 
248 	/*
249 	 * If no source is provided use g as base
250 	 */
251 	if (req->src) {
252 		qat_req->in.dh.in.xa = ctx->dma_xa;
253 		qat_req->in.dh.in.p = ctx->dma_p;
254 		n_input_params = 3;
255 	} else {
256 		if (ctx->g2) {
257 			qat_req->in.dh.in_g2.xa = ctx->dma_xa;
258 			qat_req->in.dh.in_g2.p = ctx->dma_p;
259 			n_input_params = 2;
260 		} else {
261 			qat_req->in.dh.in.b = ctx->dma_g;
262 			qat_req->in.dh.in.xa = ctx->dma_xa;
263 			qat_req->in.dh.in.p = ctx->dma_p;
264 			n_input_params = 3;
265 		}
266 	}
267 
268 	ret = -ENOMEM;
269 	if (req->src) {
270 		/*
271 		 * src can be of any size in valid range, but HW expects it to
272 		 * be the same as modulo p so in case it is different we need
273 		 * to allocate a new buf and copy src data.
274 		 * In other case we just need to map the user provided buffer.
275 		 * Also need to make sure that it is in contiguous buffer.
276 		 */
277 		if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
278 			qat_req->src_align = NULL;
279 			qat_req->in.dh.in.b = dma_map_single(dev,
280 							     sg_virt(req->src),
281 							     req->src_len,
282 							     DMA_TO_DEVICE);
283 			if (unlikely(dma_mapping_error(dev,
284 						       qat_req->in.dh.in.b)))
285 				return ret;
286 
287 		} else {
288 			int shift = ctx->p_size - req->src_len;
289 
290 			qat_req->src_align = dma_alloc_coherent(dev,
291 								ctx->p_size,
292 								&qat_req->in.dh.in.b,
293 								GFP_KERNEL);
294 			if (unlikely(!qat_req->src_align))
295 				return ret;
296 
297 			scatterwalk_map_and_copy(qat_req->src_align + shift,
298 						 req->src, 0, req->src_len, 0);
299 		}
300 	}
301 	/*
302 	 * dst can be of any size in valid range, but HW expects it to be the
303 	 * same as modulo m so in case it is different we need to allocate a
304 	 * new buf and copy src data.
305 	 * In other case we just need to map the user provided buffer.
306 	 * Also need to make sure that it is in contiguous buffer.
307 	 */
308 	if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
309 		qat_req->dst_align = NULL;
310 		qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
311 						   req->dst_len,
312 						   DMA_FROM_DEVICE);
313 
314 		if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
315 			goto unmap_src;
316 
317 	} else {
318 		qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
319 							&qat_req->out.dh.r,
320 							GFP_KERNEL);
321 		if (unlikely(!qat_req->dst_align))
322 			goto unmap_src;
323 	}
324 
325 	qat_req->in.dh.in_tab[n_input_params] = 0;
326 	qat_req->out.dh.out_tab[1] = 0;
327 	/* Mapping in.in.b or in.in_g2.xa is the same */
328 	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
329 					 sizeof(struct qat_dh_input_params),
330 					 DMA_TO_DEVICE);
331 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
332 		goto unmap_dst;
333 
334 	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
335 					  sizeof(struct qat_dh_output_params),
336 					  DMA_TO_DEVICE);
337 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
338 		goto unmap_in_params;
339 
340 	msg->pke_mid.src_data_addr = qat_req->phy_in;
341 	msg->pke_mid.dest_data_addr = qat_req->phy_out;
342 	msg->pke_mid.opaque = (u64)(__force long)qat_req;
343 	msg->input_param_count = n_input_params;
344 	msg->output_param_count = 1;
345 
346 	do {
347 		ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
348 	} while (ret == -EBUSY && ctr++ < 100);
349 
350 	if (!ret)
351 		return -EINPROGRESS;
352 
353 	if (!dma_mapping_error(dev, qat_req->phy_out))
354 		dma_unmap_single(dev, qat_req->phy_out,
355 				 sizeof(struct qat_dh_output_params),
356 				 DMA_TO_DEVICE);
357 unmap_in_params:
358 	if (!dma_mapping_error(dev, qat_req->phy_in))
359 		dma_unmap_single(dev, qat_req->phy_in,
360 				 sizeof(struct qat_dh_input_params),
361 				 DMA_TO_DEVICE);
362 unmap_dst:
363 	if (qat_req->dst_align)
364 		dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
365 				  qat_req->out.dh.r);
366 	else
367 		if (!dma_mapping_error(dev, qat_req->out.dh.r))
368 			dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
369 					 DMA_FROM_DEVICE);
370 unmap_src:
371 	if (req->src) {
372 		if (qat_req->src_align)
373 			dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
374 					  qat_req->in.dh.in.b);
375 		else
376 			if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
377 				dma_unmap_single(dev, qat_req->in.dh.in.b,
378 						 ctx->p_size,
379 						 DMA_TO_DEVICE);
380 	}
381 	return ret;
382 }
383 
qat_dh_check_params_length(unsigned int p_len)384 static int qat_dh_check_params_length(unsigned int p_len)
385 {
386 	switch (p_len) {
387 	case 1536:
388 	case 2048:
389 	case 3072:
390 	case 4096:
391 		return 0;
392 	}
393 	return -EINVAL;
394 }
395 
qat_dh_set_params(struct qat_dh_ctx * ctx,struct dh * params)396 static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
397 {
398 	struct qat_crypto_instance *inst = ctx->inst;
399 	struct device *dev = &GET_DEV(inst->accel_dev);
400 
401 	if (qat_dh_check_params_length(params->p_size << 3))
402 		return -EINVAL;
403 
404 	ctx->p_size = params->p_size;
405 	ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
406 	if (!ctx->p)
407 		return -ENOMEM;
408 	memcpy(ctx->p, params->p, ctx->p_size);
409 
410 	/* If g equals 2 don't copy it */
411 	if (params->g_size == 1 && *(char *)params->g == 0x02) {
412 		ctx->g2 = true;
413 		return 0;
414 	}
415 
416 	ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
417 	if (!ctx->g)
418 		return -ENOMEM;
419 	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
420 	       params->g_size);
421 
422 	return 0;
423 }
424 
qat_dh_clear_ctx(struct device * dev,struct qat_dh_ctx * ctx)425 static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
426 {
427 	if (ctx->g) {
428 		dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
429 		ctx->g = NULL;
430 	}
431 	if (ctx->xa) {
432 		dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
433 		ctx->xa = NULL;
434 	}
435 	if (ctx->p) {
436 		dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
437 		ctx->p = NULL;
438 	}
439 	ctx->p_size = 0;
440 	ctx->g2 = false;
441 }
442 
qat_dh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)443 static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
444 			     unsigned int len)
445 {
446 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
447 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
448 	struct dh params;
449 	int ret;
450 
451 	if (crypto_dh_decode_key(buf, len, &params) < 0)
452 		return -EINVAL;
453 
454 	/* Free old secret if any */
455 	qat_dh_clear_ctx(dev, ctx);
456 
457 	ret = qat_dh_set_params(ctx, &params);
458 	if (ret < 0)
459 		goto err_clear_ctx;
460 
461 	ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
462 				     GFP_KERNEL);
463 	if (!ctx->xa) {
464 		ret = -ENOMEM;
465 		goto err_clear_ctx;
466 	}
467 	memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
468 	       params.key_size);
469 
470 	return 0;
471 
472 err_clear_ctx:
473 	qat_dh_clear_ctx(dev, ctx);
474 	return ret;
475 }
476 
qat_dh_max_size(struct crypto_kpp * tfm)477 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
478 {
479 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
480 
481 	return ctx->p_size;
482 }
483 
qat_dh_init_tfm(struct crypto_kpp * tfm)484 static int qat_dh_init_tfm(struct crypto_kpp *tfm)
485 {
486 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
487 	struct qat_crypto_instance *inst =
488 			qat_crypto_get_instance_node(get_current_node());
489 
490 	if (!inst)
491 		return -EINVAL;
492 
493 	ctx->p_size = 0;
494 	ctx->g2 = false;
495 	ctx->inst = inst;
496 	return 0;
497 }
498 
qat_dh_exit_tfm(struct crypto_kpp * tfm)499 static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
500 {
501 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
502 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
503 
504 	qat_dh_clear_ctx(dev, ctx);
505 	qat_crypto_put_instance(ctx->inst);
506 }
507 
qat_rsa_cb(struct icp_qat_fw_pke_resp * resp)508 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
509 {
510 	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
511 	struct akcipher_request *areq = req->areq.rsa;
512 	struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
513 	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
514 				resp->pke_resp_hdr.comn_resp_flags);
515 
516 	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
517 
518 	if (req->src_align)
519 		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
520 				  req->in.rsa.enc.m);
521 	else
522 		dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
523 				 DMA_TO_DEVICE);
524 
525 	areq->dst_len = req->ctx.rsa->key_sz;
526 	if (req->dst_align) {
527 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
528 					 areq->dst_len, 1);
529 
530 		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
531 				  req->out.rsa.enc.c);
532 	} else {
533 		dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
534 				 DMA_FROM_DEVICE);
535 	}
536 
537 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
538 			 DMA_TO_DEVICE);
539 	dma_unmap_single(dev, req->phy_out,
540 			 sizeof(struct qat_rsa_output_params),
541 			 DMA_TO_DEVICE);
542 
543 	akcipher_request_complete(areq, err);
544 }
545 
qat_alg_asym_callback(void * _resp)546 void qat_alg_asym_callback(void *_resp)
547 {
548 	struct icp_qat_fw_pke_resp *resp = _resp;
549 	struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
550 
551 	areq->cb(resp);
552 }
553 
554 #define PKE_RSA_EP_512 0x1c161b21
555 #define PKE_RSA_EP_1024 0x35111bf7
556 #define PKE_RSA_EP_1536 0x4d111cdc
557 #define PKE_RSA_EP_2048 0x6e111dba
558 #define PKE_RSA_EP_3072 0x7d111ea3
559 #define PKE_RSA_EP_4096 0xa5101f7e
560 
qat_rsa_enc_fn_id(unsigned int len)561 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
562 {
563 	unsigned int bitslen = len << 3;
564 
565 	switch (bitslen) {
566 	case 512:
567 		return PKE_RSA_EP_512;
568 	case 1024:
569 		return PKE_RSA_EP_1024;
570 	case 1536:
571 		return PKE_RSA_EP_1536;
572 	case 2048:
573 		return PKE_RSA_EP_2048;
574 	case 3072:
575 		return PKE_RSA_EP_3072;
576 	case 4096:
577 		return PKE_RSA_EP_4096;
578 	default:
579 		return 0;
580 	};
581 }
582 
583 #define PKE_RSA_DP1_512 0x1c161b3c
584 #define PKE_RSA_DP1_1024 0x35111c12
585 #define PKE_RSA_DP1_1536 0x4d111cf7
586 #define PKE_RSA_DP1_2048 0x6e111dda
587 #define PKE_RSA_DP1_3072 0x7d111ebe
588 #define PKE_RSA_DP1_4096 0xa5101f98
589 
qat_rsa_dec_fn_id(unsigned int len)590 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
591 {
592 	unsigned int bitslen = len << 3;
593 
594 	switch (bitslen) {
595 	case 512:
596 		return PKE_RSA_DP1_512;
597 	case 1024:
598 		return PKE_RSA_DP1_1024;
599 	case 1536:
600 		return PKE_RSA_DP1_1536;
601 	case 2048:
602 		return PKE_RSA_DP1_2048;
603 	case 3072:
604 		return PKE_RSA_DP1_3072;
605 	case 4096:
606 		return PKE_RSA_DP1_4096;
607 	default:
608 		return 0;
609 	};
610 }
611 
612 #define PKE_RSA_DP2_512 0x1c131b57
613 #define PKE_RSA_DP2_1024 0x26131c2d
614 #define PKE_RSA_DP2_1536 0x45111d12
615 #define PKE_RSA_DP2_2048 0x59121dfa
616 #define PKE_RSA_DP2_3072 0x81121ed9
617 #define PKE_RSA_DP2_4096 0xb1111fb2
618 
qat_rsa_dec_fn_id_crt(unsigned int len)619 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
620 {
621 	unsigned int bitslen = len << 3;
622 
623 	switch (bitslen) {
624 	case 512:
625 		return PKE_RSA_DP2_512;
626 	case 1024:
627 		return PKE_RSA_DP2_1024;
628 	case 1536:
629 		return PKE_RSA_DP2_1536;
630 	case 2048:
631 		return PKE_RSA_DP2_2048;
632 	case 3072:
633 		return PKE_RSA_DP2_3072;
634 	case 4096:
635 		return PKE_RSA_DP2_4096;
636 	default:
637 		return 0;
638 	};
639 }
640 
qat_rsa_enc(struct akcipher_request * req)641 static int qat_rsa_enc(struct akcipher_request *req)
642 {
643 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
644 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
645 	struct qat_crypto_instance *inst = ctx->inst;
646 	struct device *dev = &GET_DEV(inst->accel_dev);
647 	struct qat_asym_request *qat_req =
648 			PTR_ALIGN(akcipher_request_ctx(req), 64);
649 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
650 	int ret, ctr = 0;
651 
652 	if (unlikely(!ctx->n || !ctx->e))
653 		return -EINVAL;
654 
655 	if (req->dst_len < ctx->key_sz) {
656 		req->dst_len = ctx->key_sz;
657 		return -EOVERFLOW;
658 	}
659 	memset(msg, '\0', sizeof(*msg));
660 	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
661 					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
662 	msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
663 	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
664 		return -EINVAL;
665 
666 	qat_req->cb = qat_rsa_cb;
667 	qat_req->ctx.rsa = ctx;
668 	qat_req->areq.rsa = req;
669 	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
670 	msg->pke_hdr.comn_req_flags =
671 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
672 					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
673 
674 	qat_req->in.rsa.enc.e = ctx->dma_e;
675 	qat_req->in.rsa.enc.n = ctx->dma_n;
676 	ret = -ENOMEM;
677 
678 	/*
679 	 * src can be of any size in valid range, but HW expects it to be the
680 	 * same as modulo n so in case it is different we need to allocate a
681 	 * new buf and copy src data.
682 	 * In other case we just need to map the user provided buffer.
683 	 * Also need to make sure that it is in contiguous buffer.
684 	 */
685 	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
686 		qat_req->src_align = NULL;
687 		qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
688 						   req->src_len, DMA_TO_DEVICE);
689 		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
690 			return ret;
691 
692 	} else {
693 		int shift = ctx->key_sz - req->src_len;
694 
695 		qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
696 							&qat_req->in.rsa.enc.m,
697 							GFP_KERNEL);
698 		if (unlikely(!qat_req->src_align))
699 			return ret;
700 
701 		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
702 					 0, req->src_len, 0);
703 	}
704 	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
705 		qat_req->dst_align = NULL;
706 		qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
707 							req->dst_len,
708 							DMA_FROM_DEVICE);
709 
710 		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
711 			goto unmap_src;
712 
713 	} else {
714 		qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
715 							&qat_req->out.rsa.enc.c,
716 							GFP_KERNEL);
717 		if (unlikely(!qat_req->dst_align))
718 			goto unmap_src;
719 
720 	}
721 	qat_req->in.rsa.in_tab[3] = 0;
722 	qat_req->out.rsa.out_tab[1] = 0;
723 	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
724 					 sizeof(struct qat_rsa_input_params),
725 					 DMA_TO_DEVICE);
726 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
727 		goto unmap_dst;
728 
729 	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
730 					  sizeof(struct qat_rsa_output_params),
731 					  DMA_TO_DEVICE);
732 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
733 		goto unmap_in_params;
734 
735 	msg->pke_mid.src_data_addr = qat_req->phy_in;
736 	msg->pke_mid.dest_data_addr = qat_req->phy_out;
737 	msg->pke_mid.opaque = (u64)(__force long)qat_req;
738 	msg->input_param_count = 3;
739 	msg->output_param_count = 1;
740 	do {
741 		ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
742 	} while (ret == -EBUSY && ctr++ < 100);
743 
744 	if (!ret)
745 		return -EINPROGRESS;
746 
747 	if (!dma_mapping_error(dev, qat_req->phy_out))
748 		dma_unmap_single(dev, qat_req->phy_out,
749 				 sizeof(struct qat_rsa_output_params),
750 				 DMA_TO_DEVICE);
751 unmap_in_params:
752 	if (!dma_mapping_error(dev, qat_req->phy_in))
753 		dma_unmap_single(dev, qat_req->phy_in,
754 				 sizeof(struct qat_rsa_input_params),
755 				 DMA_TO_DEVICE);
756 unmap_dst:
757 	if (qat_req->dst_align)
758 		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
759 				  qat_req->out.rsa.enc.c);
760 	else
761 		if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
762 			dma_unmap_single(dev, qat_req->out.rsa.enc.c,
763 					 ctx->key_sz, DMA_FROM_DEVICE);
764 unmap_src:
765 	if (qat_req->src_align)
766 		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
767 				  qat_req->in.rsa.enc.m);
768 	else
769 		if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
770 			dma_unmap_single(dev, qat_req->in.rsa.enc.m,
771 					 ctx->key_sz, DMA_TO_DEVICE);
772 	return ret;
773 }
774 
qat_rsa_dec(struct akcipher_request * req)775 static int qat_rsa_dec(struct akcipher_request *req)
776 {
777 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
778 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
779 	struct qat_crypto_instance *inst = ctx->inst;
780 	struct device *dev = &GET_DEV(inst->accel_dev);
781 	struct qat_asym_request *qat_req =
782 			PTR_ALIGN(akcipher_request_ctx(req), 64);
783 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
784 	int ret, ctr = 0;
785 
786 	if (unlikely(!ctx->n || !ctx->d))
787 		return -EINVAL;
788 
789 	if (req->dst_len < ctx->key_sz) {
790 		req->dst_len = ctx->key_sz;
791 		return -EOVERFLOW;
792 	}
793 	memset(msg, '\0', sizeof(*msg));
794 	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
795 					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
796 	msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
797 		qat_rsa_dec_fn_id_crt(ctx->key_sz) :
798 		qat_rsa_dec_fn_id(ctx->key_sz);
799 	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
800 		return -EINVAL;
801 
802 	qat_req->cb = qat_rsa_cb;
803 	qat_req->ctx.rsa = ctx;
804 	qat_req->areq.rsa = req;
805 	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
806 	msg->pke_hdr.comn_req_flags =
807 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
808 					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
809 
810 	if (ctx->crt_mode) {
811 		qat_req->in.rsa.dec_crt.p = ctx->dma_p;
812 		qat_req->in.rsa.dec_crt.q = ctx->dma_q;
813 		qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
814 		qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
815 		qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
816 	} else {
817 		qat_req->in.rsa.dec.d = ctx->dma_d;
818 		qat_req->in.rsa.dec.n = ctx->dma_n;
819 	}
820 	ret = -ENOMEM;
821 
822 	/*
823 	 * src can be of any size in valid range, but HW expects it to be the
824 	 * same as modulo n so in case it is different we need to allocate a
825 	 * new buf and copy src data.
826 	 * In other case we just need to map the user provided buffer.
827 	 * Also need to make sure that it is in contiguous buffer.
828 	 */
829 	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
830 		qat_req->src_align = NULL;
831 		qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
832 						   req->dst_len, DMA_TO_DEVICE);
833 		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
834 			return ret;
835 
836 	} else {
837 		int shift = ctx->key_sz - req->src_len;
838 
839 		qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
840 							&qat_req->in.rsa.dec.c,
841 							GFP_KERNEL);
842 		if (unlikely(!qat_req->src_align))
843 			return ret;
844 
845 		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
846 					 0, req->src_len, 0);
847 	}
848 	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
849 		qat_req->dst_align = NULL;
850 		qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
851 						    req->dst_len,
852 						    DMA_FROM_DEVICE);
853 
854 		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
855 			goto unmap_src;
856 
857 	} else {
858 		qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
859 							&qat_req->out.rsa.dec.m,
860 							GFP_KERNEL);
861 		if (unlikely(!qat_req->dst_align))
862 			goto unmap_src;
863 
864 	}
865 
866 	if (ctx->crt_mode)
867 		qat_req->in.rsa.in_tab[6] = 0;
868 	else
869 		qat_req->in.rsa.in_tab[3] = 0;
870 	qat_req->out.rsa.out_tab[1] = 0;
871 	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
872 					 sizeof(struct qat_rsa_input_params),
873 					 DMA_TO_DEVICE);
874 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
875 		goto unmap_dst;
876 
877 	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
878 					  sizeof(struct qat_rsa_output_params),
879 					  DMA_TO_DEVICE);
880 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
881 		goto unmap_in_params;
882 
883 	msg->pke_mid.src_data_addr = qat_req->phy_in;
884 	msg->pke_mid.dest_data_addr = qat_req->phy_out;
885 	msg->pke_mid.opaque = (u64)(__force long)qat_req;
886 	if (ctx->crt_mode)
887 		msg->input_param_count = 6;
888 	else
889 		msg->input_param_count = 3;
890 
891 	msg->output_param_count = 1;
892 	do {
893 		ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
894 	} while (ret == -EBUSY && ctr++ < 100);
895 
896 	if (!ret)
897 		return -EINPROGRESS;
898 
899 	if (!dma_mapping_error(dev, qat_req->phy_out))
900 		dma_unmap_single(dev, qat_req->phy_out,
901 				 sizeof(struct qat_rsa_output_params),
902 				 DMA_TO_DEVICE);
903 unmap_in_params:
904 	if (!dma_mapping_error(dev, qat_req->phy_in))
905 		dma_unmap_single(dev, qat_req->phy_in,
906 				 sizeof(struct qat_rsa_input_params),
907 				 DMA_TO_DEVICE);
908 unmap_dst:
909 	if (qat_req->dst_align)
910 		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
911 				  qat_req->out.rsa.dec.m);
912 	else
913 		if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
914 			dma_unmap_single(dev, qat_req->out.rsa.dec.m,
915 					 ctx->key_sz, DMA_FROM_DEVICE);
916 unmap_src:
917 	if (qat_req->src_align)
918 		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
919 				  qat_req->in.rsa.dec.c);
920 	else
921 		if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
922 			dma_unmap_single(dev, qat_req->in.rsa.dec.c,
923 					 ctx->key_sz, DMA_TO_DEVICE);
924 	return ret;
925 }
926 
qat_rsa_set_n(struct qat_rsa_ctx * ctx,const char * value,size_t vlen)927 static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
928 			 size_t vlen)
929 {
930 	struct qat_crypto_instance *inst = ctx->inst;
931 	struct device *dev = &GET_DEV(inst->accel_dev);
932 	const char *ptr = value;
933 	int ret;
934 
935 	while (!*ptr && vlen) {
936 		ptr++;
937 		vlen--;
938 	}
939 
940 	ctx->key_sz = vlen;
941 	ret = -EINVAL;
942 	/* invalid key size provided */
943 	if (!qat_rsa_enc_fn_id(ctx->key_sz))
944 		goto err;
945 
946 	ret = -ENOMEM;
947 	ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
948 	if (!ctx->n)
949 		goto err;
950 
951 	memcpy(ctx->n, ptr, ctx->key_sz);
952 	return 0;
953 err:
954 	ctx->key_sz = 0;
955 	ctx->n = NULL;
956 	return ret;
957 }
958 
qat_rsa_set_e(struct qat_rsa_ctx * ctx,const char * value,size_t vlen)959 static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
960 			 size_t vlen)
961 {
962 	struct qat_crypto_instance *inst = ctx->inst;
963 	struct device *dev = &GET_DEV(inst->accel_dev);
964 	const char *ptr = value;
965 
966 	while (!*ptr && vlen) {
967 		ptr++;
968 		vlen--;
969 	}
970 
971 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
972 		ctx->e = NULL;
973 		return -EINVAL;
974 	}
975 
976 	ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
977 	if (!ctx->e)
978 		return -ENOMEM;
979 
980 	memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
981 	return 0;
982 }
983 
qat_rsa_set_d(struct qat_rsa_ctx * ctx,const char * value,size_t vlen)984 static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
985 			 size_t vlen)
986 {
987 	struct qat_crypto_instance *inst = ctx->inst;
988 	struct device *dev = &GET_DEV(inst->accel_dev);
989 	const char *ptr = value;
990 	int ret;
991 
992 	while (!*ptr && vlen) {
993 		ptr++;
994 		vlen--;
995 	}
996 
997 	ret = -EINVAL;
998 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
999 		goto err;
1000 
1001 	ret = -ENOMEM;
1002 	ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1003 	if (!ctx->d)
1004 		goto err;
1005 
1006 	memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1007 	return 0;
1008 err:
1009 	ctx->d = NULL;
1010 	return ret;
1011 }
1012 
qat_rsa_drop_leading_zeros(const char ** ptr,unsigned int * len)1013 static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1014 {
1015 	while (!**ptr && *len) {
1016 		(*ptr)++;
1017 		(*len)--;
1018 	}
1019 }
1020 
qat_rsa_setkey_crt(struct qat_rsa_ctx * ctx,struct rsa_key * rsa_key)1021 static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1022 {
1023 	struct qat_crypto_instance *inst = ctx->inst;
1024 	struct device *dev = &GET_DEV(inst->accel_dev);
1025 	const char *ptr;
1026 	unsigned int len;
1027 	unsigned int half_key_sz = ctx->key_sz / 2;
1028 
1029 	/* p */
1030 	ptr = rsa_key->p;
1031 	len = rsa_key->p_sz;
1032 	qat_rsa_drop_leading_zeros(&ptr, &len);
1033 	if (!len)
1034 		goto err;
1035 	ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1036 	if (!ctx->p)
1037 		goto err;
1038 	memcpy(ctx->p + (half_key_sz - len), ptr, len);
1039 
1040 	/* q */
1041 	ptr = rsa_key->q;
1042 	len = rsa_key->q_sz;
1043 	qat_rsa_drop_leading_zeros(&ptr, &len);
1044 	if (!len)
1045 		goto free_p;
1046 	ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1047 	if (!ctx->q)
1048 		goto free_p;
1049 	memcpy(ctx->q + (half_key_sz - len), ptr, len);
1050 
1051 	/* dp */
1052 	ptr = rsa_key->dp;
1053 	len = rsa_key->dp_sz;
1054 	qat_rsa_drop_leading_zeros(&ptr, &len);
1055 	if (!len)
1056 		goto free_q;
1057 	ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1058 				     GFP_KERNEL);
1059 	if (!ctx->dp)
1060 		goto free_q;
1061 	memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1062 
1063 	/* dq */
1064 	ptr = rsa_key->dq;
1065 	len = rsa_key->dq_sz;
1066 	qat_rsa_drop_leading_zeros(&ptr, &len);
1067 	if (!len)
1068 		goto free_dp;
1069 	ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1070 				     GFP_KERNEL);
1071 	if (!ctx->dq)
1072 		goto free_dp;
1073 	memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1074 
1075 	/* qinv */
1076 	ptr = rsa_key->qinv;
1077 	len = rsa_key->qinv_sz;
1078 	qat_rsa_drop_leading_zeros(&ptr, &len);
1079 	if (!len)
1080 		goto free_dq;
1081 	ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1082 				       GFP_KERNEL);
1083 	if (!ctx->qinv)
1084 		goto free_dq;
1085 	memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1086 
1087 	ctx->crt_mode = true;
1088 	return;
1089 
1090 free_dq:
1091 	memset(ctx->dq, '\0', half_key_sz);
1092 	dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1093 	ctx->dq = NULL;
1094 free_dp:
1095 	memset(ctx->dp, '\0', half_key_sz);
1096 	dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1097 	ctx->dp = NULL;
1098 free_q:
1099 	memset(ctx->q, '\0', half_key_sz);
1100 	dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1101 	ctx->q = NULL;
1102 free_p:
1103 	memset(ctx->p, '\0', half_key_sz);
1104 	dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1105 	ctx->p = NULL;
1106 err:
1107 	ctx->crt_mode = false;
1108 }
1109 
qat_rsa_clear_ctx(struct device * dev,struct qat_rsa_ctx * ctx)1110 static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1111 {
1112 	unsigned int half_key_sz = ctx->key_sz / 2;
1113 
1114 	/* Free the old key if any */
1115 	if (ctx->n)
1116 		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1117 	if (ctx->e)
1118 		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1119 	if (ctx->d) {
1120 		memset(ctx->d, '\0', ctx->key_sz);
1121 		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1122 	}
1123 	if (ctx->p) {
1124 		memset(ctx->p, '\0', half_key_sz);
1125 		dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1126 	}
1127 	if (ctx->q) {
1128 		memset(ctx->q, '\0', half_key_sz);
1129 		dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1130 	}
1131 	if (ctx->dp) {
1132 		memset(ctx->dp, '\0', half_key_sz);
1133 		dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1134 	}
1135 	if (ctx->dq) {
1136 		memset(ctx->dq, '\0', half_key_sz);
1137 		dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1138 	}
1139 	if (ctx->qinv) {
1140 		memset(ctx->qinv, '\0', half_key_sz);
1141 		dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1142 	}
1143 
1144 	ctx->n = NULL;
1145 	ctx->e = NULL;
1146 	ctx->d = NULL;
1147 	ctx->p = NULL;
1148 	ctx->q = NULL;
1149 	ctx->dp = NULL;
1150 	ctx->dq = NULL;
1151 	ctx->qinv = NULL;
1152 	ctx->crt_mode = false;
1153 	ctx->key_sz = 0;
1154 }
1155 
qat_rsa_setkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen,bool private)1156 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1157 			  unsigned int keylen, bool private)
1158 {
1159 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1160 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1161 	struct rsa_key rsa_key;
1162 	int ret;
1163 
1164 	qat_rsa_clear_ctx(dev, ctx);
1165 
1166 	if (private)
1167 		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1168 	else
1169 		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1170 	if (ret < 0)
1171 		goto free;
1172 
1173 	ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1174 	if (ret < 0)
1175 		goto free;
1176 	ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1177 	if (ret < 0)
1178 		goto free;
1179 	if (private) {
1180 		ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1181 		if (ret < 0)
1182 			goto free;
1183 		qat_rsa_setkey_crt(ctx, &rsa_key);
1184 	}
1185 
1186 	if (!ctx->n || !ctx->e) {
1187 		/* invalid key provided */
1188 		ret = -EINVAL;
1189 		goto free;
1190 	}
1191 	if (private && !ctx->d) {
1192 		/* invalid private key provided */
1193 		ret = -EINVAL;
1194 		goto free;
1195 	}
1196 
1197 	return 0;
1198 free:
1199 	qat_rsa_clear_ctx(dev, ctx);
1200 	return ret;
1201 }
1202 
qat_rsa_setpubkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1203 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1204 			     unsigned int keylen)
1205 {
1206 	return qat_rsa_setkey(tfm, key, keylen, false);
1207 }
1208 
qat_rsa_setprivkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1209 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1210 			      unsigned int keylen)
1211 {
1212 	return qat_rsa_setkey(tfm, key, keylen, true);
1213 }
1214 
qat_rsa_max_size(struct crypto_akcipher * tfm)1215 static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
1216 {
1217 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1218 
1219 	return ctx->key_sz;
1220 }
1221 
qat_rsa_init_tfm(struct crypto_akcipher * tfm)1222 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1223 {
1224 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1225 	struct qat_crypto_instance *inst =
1226 			qat_crypto_get_instance_node(get_current_node());
1227 
1228 	if (!inst)
1229 		return -EINVAL;
1230 
1231 	ctx->key_sz = 0;
1232 	ctx->inst = inst;
1233 	return 0;
1234 }
1235 
qat_rsa_exit_tfm(struct crypto_akcipher * tfm)1236 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1237 {
1238 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1239 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1240 
1241 	if (ctx->n)
1242 		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1243 	if (ctx->e)
1244 		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1245 	if (ctx->d) {
1246 		memset(ctx->d, '\0', ctx->key_sz);
1247 		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1248 	}
1249 	qat_crypto_put_instance(ctx->inst);
1250 	ctx->n = NULL;
1251 	ctx->e = NULL;
1252 	ctx->d = NULL;
1253 }
1254 
1255 static struct akcipher_alg rsa = {
1256 	.encrypt = qat_rsa_enc,
1257 	.decrypt = qat_rsa_dec,
1258 	.set_pub_key = qat_rsa_setpubkey,
1259 	.set_priv_key = qat_rsa_setprivkey,
1260 	.max_size = qat_rsa_max_size,
1261 	.init = qat_rsa_init_tfm,
1262 	.exit = qat_rsa_exit_tfm,
1263 	.reqsize = sizeof(struct qat_asym_request) + 64,
1264 	.base = {
1265 		.cra_name = "rsa",
1266 		.cra_driver_name = "qat-rsa",
1267 		.cra_priority = 1000,
1268 		.cra_module = THIS_MODULE,
1269 		.cra_ctxsize = sizeof(struct qat_rsa_ctx),
1270 	},
1271 };
1272 
1273 static struct kpp_alg dh = {
1274 	.set_secret = qat_dh_set_secret,
1275 	.generate_public_key = qat_dh_compute_value,
1276 	.compute_shared_secret = qat_dh_compute_value,
1277 	.max_size = qat_dh_max_size,
1278 	.init = qat_dh_init_tfm,
1279 	.exit = qat_dh_exit_tfm,
1280 	.reqsize = sizeof(struct qat_asym_request) + 64,
1281 	.base = {
1282 		.cra_name = "dh",
1283 		.cra_driver_name = "qat-dh",
1284 		.cra_priority = 1000,
1285 		.cra_module = THIS_MODULE,
1286 		.cra_ctxsize = sizeof(struct qat_dh_ctx),
1287 	},
1288 };
1289 
qat_asym_algs_register(void)1290 int qat_asym_algs_register(void)
1291 {
1292 	int ret = 0;
1293 
1294 	mutex_lock(&algs_lock);
1295 	if (++active_devs == 1) {
1296 		rsa.base.cra_flags = 0;
1297 		ret = crypto_register_akcipher(&rsa);
1298 		if (ret)
1299 			goto unlock;
1300 		ret = crypto_register_kpp(&dh);
1301 	}
1302 unlock:
1303 	mutex_unlock(&algs_lock);
1304 	return ret;
1305 }
1306 
qat_asym_algs_unregister(void)1307 void qat_asym_algs_unregister(void)
1308 {
1309 	mutex_lock(&algs_lock);
1310 	if (--active_devs == 0) {
1311 		crypto_unregister_akcipher(&rsa);
1312 		crypto_unregister_kpp(&dh);
1313 	}
1314 	mutex_unlock(&algs_lock);
1315 }
1316