1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3
4 #include <crypto/aes.h>
5 #include <crypto/aead.h>
6 #include <crypto/algapi.h>
7 #include <crypto/authenc.h>
8 #include <crypto/des.h>
9 #include <crypto/hash.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/idr.h>
19
20 #include "sec.h"
21 #include "sec_crypto.h"
22
23 #define SEC_PRIORITY 4001
24 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
25 #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
26 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
27 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
28 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
29
30 /* SEC sqe(bd) bit operational relative MACRO */
31 #define SEC_DE_OFFSET 1
32 #define SEC_CIPHER_OFFSET 4
33 #define SEC_SCENE_OFFSET 3
34 #define SEC_DST_SGL_OFFSET 2
35 #define SEC_SRC_SGL_OFFSET 7
36 #define SEC_CKEY_OFFSET 9
37 #define SEC_CMODE_OFFSET 12
38 #define SEC_AKEY_OFFSET 5
39 #define SEC_AEAD_ALG_OFFSET 11
40 #define SEC_AUTH_OFFSET 6
41
42 #define SEC_DE_OFFSET_V3 9
43 #define SEC_SCENE_OFFSET_V3 5
44 #define SEC_CKEY_OFFSET_V3 13
45 #define SEC_SRC_SGL_OFFSET_V3 11
46 #define SEC_DST_SGL_OFFSET_V3 14
47 #define SEC_CALG_OFFSET_V3 4
48 #define SEC_AKEY_OFFSET_V3 9
49 #define SEC_MAC_OFFSET_V3 4
50 #define SEC_AUTH_ALG_OFFSET_V3 15
51 #define SEC_CIPHER_AUTH_V3 0xbf
52 #define SEC_AUTH_CIPHER_V3 0x40
53 #define SEC_FLAG_OFFSET 7
54 #define SEC_FLAG_MASK 0x0780
55 #define SEC_TYPE_MASK 0x0F
56 #define SEC_DONE_MASK 0x0001
57 #define SEC_ICV_MASK 0x000E
58 #define SEC_SQE_LEN_RATE_MASK 0x3
59
60 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
61 #define SEC_SGL_SGE_NR 128
62 #define SEC_CIPHER_AUTH 0xfe
63 #define SEC_AUTH_CIPHER 0x1
64 #define SEC_MAX_MAC_LEN 64
65 #define SEC_MAX_AAD_LEN 65535
66 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
67
68 #define SEC_PBUF_SZ 512
69 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
70 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
71 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
72 SEC_MAX_MAC_LEN * 2)
73 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
74 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
75 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
76 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
77 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
78 SEC_PBUF_LEFT_SZ)
79
80 #define SEC_SQE_LEN_RATE 4
81 #define SEC_SQE_CFLAG 2
82 #define SEC_SQE_AEAD_FLAG 3
83 #define SEC_SQE_DONE 0x1
84 #define SEC_ICV_ERR 0x2
85 #define MIN_MAC_LEN 4
86 #define MAC_LEN_MASK 0x1U
87 #define MAX_INPUT_DATA_LEN 0xFFFE00
88 #define BITS_MASK 0xFF
89 #define BYTE_BITS 0x8
90 #define SEC_XTS_NAME_SZ 0x3
91 #define IV_CM_CAL_NUM 2
92 #define IV_CL_MASK 0x7
93 #define IV_CL_MIN 2
94 #define IV_CL_MID 4
95 #define IV_CL_MAX 8
96 #define IV_FLAGS_OFFSET 0x6
97 #define IV_CM_OFFSET 0x3
98 #define IV_LAST_BYTE1 1
99 #define IV_LAST_BYTE2 2
100 #define IV_LAST_BYTE_MASK 0xFF
101 #define IV_CTR_INIT 0x1
102 #define IV_BYTE_OFFSET 0x8
103
104 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
sec_alloc_queue_id(struct sec_ctx * ctx,struct sec_req * req)105 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
106 {
107 if (req->c_req.encrypt)
108 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
109 ctx->hlf_q_num;
110
111 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
112 ctx->hlf_q_num;
113 }
114
sec_free_queue_id(struct sec_ctx * ctx,struct sec_req * req)115 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
116 {
117 if (req->c_req.encrypt)
118 atomic_dec(&ctx->enc_qcyclic);
119 else
120 atomic_dec(&ctx->dec_qcyclic);
121 }
122
sec_alloc_req_id(struct sec_req * req,struct sec_qp_ctx * qp_ctx)123 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
124 {
125 int req_id;
126
127 spin_lock_bh(&qp_ctx->req_lock);
128
129 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
130 0, QM_Q_DEPTH, GFP_ATOMIC);
131 spin_unlock_bh(&qp_ctx->req_lock);
132 if (unlikely(req_id < 0)) {
133 dev_err(req->ctx->dev, "alloc req id fail!\n");
134 return req_id;
135 }
136
137 req->qp_ctx = qp_ctx;
138 qp_ctx->req_list[req_id] = req;
139
140 return req_id;
141 }
142
sec_free_req_id(struct sec_req * req)143 static void sec_free_req_id(struct sec_req *req)
144 {
145 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
146 int req_id = req->req_id;
147
148 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
149 dev_err(req->ctx->dev, "free request id invalid!\n");
150 return;
151 }
152
153 qp_ctx->req_list[req_id] = NULL;
154 req->qp_ctx = NULL;
155
156 spin_lock_bh(&qp_ctx->req_lock);
157 idr_remove(&qp_ctx->req_idr, req_id);
158 spin_unlock_bh(&qp_ctx->req_lock);
159 }
160
pre_parse_finished_bd(struct bd_status * status,void * resp)161 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
162 {
163 struct sec_sqe *bd = resp;
164
165 status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
166 status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
167 status->flag = (le16_to_cpu(bd->type2.done_flag) &
168 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
169 status->tag = le16_to_cpu(bd->type2.tag);
170 status->err_type = bd->type2.error_type;
171
172 return bd->type_cipher_auth & SEC_TYPE_MASK;
173 }
174
pre_parse_finished_bd3(struct bd_status * status,void * resp)175 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
176 {
177 struct sec_sqe3 *bd3 = resp;
178
179 status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
180 status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
181 status->flag = (le16_to_cpu(bd3->done_flag) &
182 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
183 status->tag = le64_to_cpu(bd3->tag);
184 status->err_type = bd3->error_type;
185
186 return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
187 }
188
sec_cb_status_check(struct sec_req * req,struct bd_status * status)189 static int sec_cb_status_check(struct sec_req *req,
190 struct bd_status *status)
191 {
192 struct sec_ctx *ctx = req->ctx;
193
194 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
195 dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
196 req->err_type, status->done);
197 return -EIO;
198 }
199
200 if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
201 if (unlikely(status->flag != SEC_SQE_CFLAG)) {
202 dev_err_ratelimited(ctx->dev, "flag[%u]\n",
203 status->flag);
204 return -EIO;
205 }
206 } else if (unlikely(ctx->alg_type == SEC_AEAD)) {
207 if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
208 status->icv == SEC_ICV_ERR)) {
209 dev_err_ratelimited(ctx->dev,
210 "flag[%u], icv[%u]\n",
211 status->flag, status->icv);
212 return -EBADMSG;
213 }
214 }
215
216 return 0;
217 }
218
sec_req_cb(struct hisi_qp * qp,void * resp)219 static void sec_req_cb(struct hisi_qp *qp, void *resp)
220 {
221 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
222 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
223 u8 type_supported = qp_ctx->ctx->type_supported;
224 struct bd_status status;
225 struct sec_ctx *ctx;
226 struct sec_req *req;
227 int err;
228 u8 type;
229
230 if (type_supported == SEC_BD_TYPE2) {
231 type = pre_parse_finished_bd(&status, resp);
232 req = qp_ctx->req_list[status.tag];
233 } else {
234 type = pre_parse_finished_bd3(&status, resp);
235 req = (void *)(uintptr_t)status.tag;
236 }
237
238 if (unlikely(type != type_supported)) {
239 atomic64_inc(&dfx->err_bd_cnt);
240 pr_err("err bd type [%d]\n", type);
241 return;
242 }
243
244 if (unlikely(!req)) {
245 atomic64_inc(&dfx->invalid_req_cnt);
246 atomic_inc(&qp->qp_status.used);
247 return;
248 }
249
250 req->err_type = status.err_type;
251 ctx = req->ctx;
252 err = sec_cb_status_check(req, &status);
253 if (err)
254 atomic64_inc(&dfx->done_flag_cnt);
255
256 atomic64_inc(&dfx->recv_cnt);
257
258 ctx->req_op->buf_unmap(ctx, req);
259
260 ctx->req_op->callback(ctx, req, err);
261 }
262
sec_bd_send(struct sec_ctx * ctx,struct sec_req * req)263 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
264 {
265 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
266 int ret;
267
268 if (ctx->fake_req_limit <=
269 atomic_read(&qp_ctx->qp->qp_status.used) &&
270 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
271 return -EBUSY;
272
273 spin_lock_bh(&qp_ctx->req_lock);
274 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
275
276 if (ctx->fake_req_limit <=
277 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
278 list_add_tail(&req->backlog_head, &qp_ctx->backlog);
279 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
280 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
281 spin_unlock_bh(&qp_ctx->req_lock);
282 return -EBUSY;
283 }
284 spin_unlock_bh(&qp_ctx->req_lock);
285
286 if (unlikely(ret == -EBUSY))
287 return -ENOBUFS;
288
289 if (likely(!ret)) {
290 ret = -EINPROGRESS;
291 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
292 }
293
294 return ret;
295 }
296
297 /* Get DMA memory resources */
sec_alloc_civ_resource(struct device * dev,struct sec_alg_res * res)298 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
299 {
300 int i;
301
302 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
303 &res->c_ivin_dma, GFP_KERNEL);
304 if (!res->c_ivin)
305 return -ENOMEM;
306
307 for (i = 1; i < QM_Q_DEPTH; i++) {
308 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
309 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
310 }
311
312 return 0;
313 }
314
sec_free_civ_resource(struct device * dev,struct sec_alg_res * res)315 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
316 {
317 if (res->c_ivin)
318 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
319 res->c_ivin, res->c_ivin_dma);
320 }
321
sec_alloc_aiv_resource(struct device * dev,struct sec_alg_res * res)322 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
323 {
324 int i;
325
326 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
327 &res->a_ivin_dma, GFP_KERNEL);
328 if (!res->a_ivin)
329 return -ENOMEM;
330
331 for (i = 1; i < QM_Q_DEPTH; i++) {
332 res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
333 res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
334 }
335
336 return 0;
337 }
338
sec_free_aiv_resource(struct device * dev,struct sec_alg_res * res)339 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
340 {
341 if (res->a_ivin)
342 dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
343 res->a_ivin, res->a_ivin_dma);
344 }
345
sec_alloc_mac_resource(struct device * dev,struct sec_alg_res * res)346 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
347 {
348 int i;
349
350 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
351 &res->out_mac_dma, GFP_KERNEL);
352 if (!res->out_mac)
353 return -ENOMEM;
354
355 for (i = 1; i < QM_Q_DEPTH; i++) {
356 res[i].out_mac_dma = res->out_mac_dma +
357 i * (SEC_MAX_MAC_LEN << 1);
358 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
359 }
360
361 return 0;
362 }
363
sec_free_mac_resource(struct device * dev,struct sec_alg_res * res)364 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
365 {
366 if (res->out_mac)
367 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
368 res->out_mac, res->out_mac_dma);
369 }
370
sec_free_pbuf_resource(struct device * dev,struct sec_alg_res * res)371 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
372 {
373 if (res->pbuf)
374 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
375 res->pbuf, res->pbuf_dma);
376 }
377
378 /*
379 * To improve performance, pbuffer is used for
380 * small packets (< 512Bytes) as IOMMU translation using.
381 */
sec_alloc_pbuf_resource(struct device * dev,struct sec_alg_res * res)382 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
383 {
384 int pbuf_page_offset;
385 int i, j, k;
386
387 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
388 &res->pbuf_dma, GFP_KERNEL);
389 if (!res->pbuf)
390 return -ENOMEM;
391
392 /*
393 * SEC_PBUF_PKG contains data pbuf, iv and
394 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
395 * Every PAGE contains six SEC_PBUF_PKG
396 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
397 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
398 * for the SEC_TOTAL_PBUF_SZ
399 */
400 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
401 pbuf_page_offset = PAGE_SIZE * i;
402 for (j = 0; j < SEC_PBUF_NUM; j++) {
403 k = i * SEC_PBUF_NUM + j;
404 if (k == QM_Q_DEPTH)
405 break;
406 res[k].pbuf = res->pbuf +
407 j * SEC_PBUF_PKG + pbuf_page_offset;
408 res[k].pbuf_dma = res->pbuf_dma +
409 j * SEC_PBUF_PKG + pbuf_page_offset;
410 }
411 }
412
413 return 0;
414 }
415
sec_alg_resource_alloc(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)416 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
417 struct sec_qp_ctx *qp_ctx)
418 {
419 struct sec_alg_res *res = qp_ctx->res;
420 struct device *dev = ctx->dev;
421 int ret;
422
423 ret = sec_alloc_civ_resource(dev, res);
424 if (ret)
425 return ret;
426
427 if (ctx->alg_type == SEC_AEAD) {
428 ret = sec_alloc_aiv_resource(dev, res);
429 if (ret)
430 goto alloc_aiv_fail;
431
432 ret = sec_alloc_mac_resource(dev, res);
433 if (ret)
434 goto alloc_mac_fail;
435 }
436 if (ctx->pbuf_supported) {
437 ret = sec_alloc_pbuf_resource(dev, res);
438 if (ret) {
439 dev_err(dev, "fail to alloc pbuf dma resource!\n");
440 goto alloc_pbuf_fail;
441 }
442 }
443
444 return 0;
445
446 alloc_pbuf_fail:
447 if (ctx->alg_type == SEC_AEAD)
448 sec_free_mac_resource(dev, qp_ctx->res);
449 alloc_mac_fail:
450 if (ctx->alg_type == SEC_AEAD)
451 sec_free_aiv_resource(dev, res);
452 alloc_aiv_fail:
453 sec_free_civ_resource(dev, res);
454 return ret;
455 }
456
sec_alg_resource_free(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)457 static void sec_alg_resource_free(struct sec_ctx *ctx,
458 struct sec_qp_ctx *qp_ctx)
459 {
460 struct device *dev = ctx->dev;
461
462 sec_free_civ_resource(dev, qp_ctx->res);
463
464 if (ctx->pbuf_supported)
465 sec_free_pbuf_resource(dev, qp_ctx->res);
466 if (ctx->alg_type == SEC_AEAD)
467 sec_free_mac_resource(dev, qp_ctx->res);
468 }
469
sec_create_qp_ctx(struct hisi_qm * qm,struct sec_ctx * ctx,int qp_ctx_id,int alg_type)470 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
471 int qp_ctx_id, int alg_type)
472 {
473 struct device *dev = ctx->dev;
474 struct sec_qp_ctx *qp_ctx;
475 struct hisi_qp *qp;
476 int ret = -ENOMEM;
477
478 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
479 qp = ctx->qps[qp_ctx_id];
480 qp->req_type = 0;
481 qp->qp_ctx = qp_ctx;
482 qp_ctx->qp = qp;
483 qp_ctx->ctx = ctx;
484
485 qp->req_cb = sec_req_cb;
486
487 spin_lock_init(&qp_ctx->req_lock);
488 idr_init(&qp_ctx->req_idr);
489 INIT_LIST_HEAD(&qp_ctx->backlog);
490
491 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
492 SEC_SGL_SGE_NR);
493 if (IS_ERR(qp_ctx->c_in_pool)) {
494 dev_err(dev, "fail to create sgl pool for input!\n");
495 goto err_destroy_idr;
496 }
497
498 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
499 SEC_SGL_SGE_NR);
500 if (IS_ERR(qp_ctx->c_out_pool)) {
501 dev_err(dev, "fail to create sgl pool for output!\n");
502 goto err_free_c_in_pool;
503 }
504
505 ret = sec_alg_resource_alloc(ctx, qp_ctx);
506 if (ret)
507 goto err_free_c_out_pool;
508
509 ret = hisi_qm_start_qp(qp, 0);
510 if (ret < 0)
511 goto err_queue_free;
512
513 return 0;
514
515 err_queue_free:
516 sec_alg_resource_free(ctx, qp_ctx);
517 err_free_c_out_pool:
518 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
519 err_free_c_in_pool:
520 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
521 err_destroy_idr:
522 idr_destroy(&qp_ctx->req_idr);
523 return ret;
524 }
525
sec_release_qp_ctx(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)526 static void sec_release_qp_ctx(struct sec_ctx *ctx,
527 struct sec_qp_ctx *qp_ctx)
528 {
529 struct device *dev = ctx->dev;
530
531 hisi_qm_stop_qp(qp_ctx->qp);
532 sec_alg_resource_free(ctx, qp_ctx);
533
534 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
535 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
536
537 idr_destroy(&qp_ctx->req_idr);
538 }
539
sec_ctx_base_init(struct sec_ctx * ctx)540 static int sec_ctx_base_init(struct sec_ctx *ctx)
541 {
542 struct sec_dev *sec;
543 int i, ret;
544
545 ctx->qps = sec_create_qps();
546 if (!ctx->qps) {
547 pr_err("Can not create sec qps!\n");
548 return -ENODEV;
549 }
550
551 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
552 ctx->sec = sec;
553 ctx->dev = &sec->qm.pdev->dev;
554 ctx->hlf_q_num = sec->ctx_q_num >> 1;
555
556 ctx->pbuf_supported = ctx->sec->iommu_used;
557
558 /* Half of queue depth is taken as fake requests limit in the queue. */
559 ctx->fake_req_limit = QM_Q_DEPTH >> 1;
560 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
561 GFP_KERNEL);
562 if (!ctx->qp_ctx) {
563 ret = -ENOMEM;
564 goto err_destroy_qps;
565 }
566
567 for (i = 0; i < sec->ctx_q_num; i++) {
568 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
569 if (ret)
570 goto err_sec_release_qp_ctx;
571 }
572
573 return 0;
574
575 err_sec_release_qp_ctx:
576 for (i = i - 1; i >= 0; i--)
577 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
578 kfree(ctx->qp_ctx);
579 err_destroy_qps:
580 sec_destroy_qps(ctx->qps, sec->ctx_q_num);
581 return ret;
582 }
583
sec_ctx_base_uninit(struct sec_ctx * ctx)584 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
585 {
586 int i;
587
588 for (i = 0; i < ctx->sec->ctx_q_num; i++)
589 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
590
591 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
592 kfree(ctx->qp_ctx);
593 }
594
sec_cipher_init(struct sec_ctx * ctx)595 static int sec_cipher_init(struct sec_ctx *ctx)
596 {
597 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
598
599 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
600 &c_ctx->c_key_dma, GFP_KERNEL);
601 if (!c_ctx->c_key)
602 return -ENOMEM;
603
604 return 0;
605 }
606
sec_cipher_uninit(struct sec_ctx * ctx)607 static void sec_cipher_uninit(struct sec_ctx *ctx)
608 {
609 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
610
611 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
612 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
613 c_ctx->c_key, c_ctx->c_key_dma);
614 }
615
sec_auth_init(struct sec_ctx * ctx)616 static int sec_auth_init(struct sec_ctx *ctx)
617 {
618 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
619
620 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
621 &a_ctx->a_key_dma, GFP_KERNEL);
622 if (!a_ctx->a_key)
623 return -ENOMEM;
624
625 return 0;
626 }
627
sec_auth_uninit(struct sec_ctx * ctx)628 static void sec_auth_uninit(struct sec_ctx *ctx)
629 {
630 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
631
632 memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
633 dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
634 a_ctx->a_key, a_ctx->a_key_dma);
635 }
636
sec_skcipher_fbtfm_init(struct crypto_skcipher * tfm)637 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
638 {
639 const char *alg = crypto_tfm_alg_name(&tfm->base);
640 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
641 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
642
643 c_ctx->fallback = false;
644 if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
645 return 0;
646
647 c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
648 CRYPTO_ALG_NEED_FALLBACK);
649 if (IS_ERR(c_ctx->fbtfm)) {
650 pr_err("failed to alloc fallback tfm!\n");
651 return PTR_ERR(c_ctx->fbtfm);
652 }
653
654 return 0;
655 }
656
sec_skcipher_init(struct crypto_skcipher * tfm)657 static int sec_skcipher_init(struct crypto_skcipher *tfm)
658 {
659 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
660 int ret;
661
662 ctx->alg_type = SEC_SKCIPHER;
663 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
664 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
665 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
666 pr_err("get error skcipher iv size!\n");
667 return -EINVAL;
668 }
669
670 ret = sec_ctx_base_init(ctx);
671 if (ret)
672 return ret;
673
674 ret = sec_cipher_init(ctx);
675 if (ret)
676 goto err_cipher_init;
677
678 ret = sec_skcipher_fbtfm_init(tfm);
679 if (ret)
680 goto err_fbtfm_init;
681
682 return 0;
683
684 err_fbtfm_init:
685 sec_cipher_uninit(ctx);
686 err_cipher_init:
687 sec_ctx_base_uninit(ctx);
688 return ret;
689 }
690
sec_skcipher_uninit(struct crypto_skcipher * tfm)691 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
692 {
693 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
694
695 if (ctx->c_ctx.fbtfm)
696 crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
697
698 sec_cipher_uninit(ctx);
699 sec_ctx_base_uninit(ctx);
700 }
701
sec_skcipher_3des_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen,const enum sec_cmode c_mode)702 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
703 const u32 keylen,
704 const enum sec_cmode c_mode)
705 {
706 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
707 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
708 int ret;
709
710 ret = verify_skcipher_des3_key(tfm, key);
711 if (ret)
712 return ret;
713
714 switch (keylen) {
715 case SEC_DES3_2KEY_SIZE:
716 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
717 break;
718 case SEC_DES3_3KEY_SIZE:
719 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
720 break;
721 default:
722 return -EINVAL;
723 }
724
725 return 0;
726 }
727
sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx * c_ctx,const u32 keylen,const enum sec_cmode c_mode)728 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
729 const u32 keylen,
730 const enum sec_cmode c_mode)
731 {
732 if (c_mode == SEC_CMODE_XTS) {
733 switch (keylen) {
734 case SEC_XTS_MIN_KEY_SIZE:
735 c_ctx->c_key_len = SEC_CKEY_128BIT;
736 break;
737 case SEC_XTS_MID_KEY_SIZE:
738 c_ctx->fallback = true;
739 break;
740 case SEC_XTS_MAX_KEY_SIZE:
741 c_ctx->c_key_len = SEC_CKEY_256BIT;
742 break;
743 default:
744 pr_err("hisi_sec2: xts mode key error!\n");
745 return -EINVAL;
746 }
747 } else {
748 if (c_ctx->c_alg == SEC_CALG_SM4 &&
749 keylen != AES_KEYSIZE_128) {
750 pr_err("hisi_sec2: sm4 key error!\n");
751 return -EINVAL;
752 } else {
753 switch (keylen) {
754 case AES_KEYSIZE_128:
755 c_ctx->c_key_len = SEC_CKEY_128BIT;
756 break;
757 case AES_KEYSIZE_192:
758 c_ctx->c_key_len = SEC_CKEY_192BIT;
759 break;
760 case AES_KEYSIZE_256:
761 c_ctx->c_key_len = SEC_CKEY_256BIT;
762 break;
763 default:
764 pr_err("hisi_sec2: aes key error!\n");
765 return -EINVAL;
766 }
767 }
768 }
769
770 return 0;
771 }
772
sec_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen,const enum sec_calg c_alg,const enum sec_cmode c_mode)773 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
774 const u32 keylen, const enum sec_calg c_alg,
775 const enum sec_cmode c_mode)
776 {
777 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
778 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
779 struct device *dev = ctx->dev;
780 int ret;
781
782 if (c_mode == SEC_CMODE_XTS) {
783 ret = xts_verify_key(tfm, key, keylen);
784 if (ret) {
785 dev_err(dev, "xts mode key err!\n");
786 return ret;
787 }
788 }
789
790 c_ctx->c_alg = c_alg;
791 c_ctx->c_mode = c_mode;
792
793 switch (c_alg) {
794 case SEC_CALG_3DES:
795 ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
796 break;
797 case SEC_CALG_AES:
798 case SEC_CALG_SM4:
799 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
800 break;
801 default:
802 return -EINVAL;
803 }
804
805 if (ret) {
806 dev_err(dev, "set sec key err!\n");
807 return ret;
808 }
809
810 memcpy(c_ctx->c_key, key, keylen);
811 if (c_ctx->fallback) {
812 ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
813 if (ret) {
814 dev_err(dev, "failed to set fallback skcipher key!\n");
815 return ret;
816 }
817 }
818 return 0;
819 }
820
821 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
822 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
823 u32 keylen) \
824 { \
825 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
826 }
827
GEN_SEC_SETKEY_FUNC(aes_ecb,SEC_CALG_AES,SEC_CMODE_ECB)828 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
829 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
830 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
831 GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
832 GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
833 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
834 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
835 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
836 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
837 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
838 GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
839 GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
840 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
841
842 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
843 struct scatterlist *src)
844 {
845 struct sec_aead_req *a_req = &req->aead_req;
846 struct aead_request *aead_req = a_req->aead_req;
847 struct sec_cipher_req *c_req = &req->c_req;
848 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
849 struct device *dev = ctx->dev;
850 int copy_size, pbuf_length;
851 int req_id = req->req_id;
852 struct crypto_aead *tfm;
853 size_t authsize;
854 u8 *mac_offset;
855
856 if (ctx->alg_type == SEC_AEAD)
857 copy_size = aead_req->cryptlen + aead_req->assoclen;
858 else
859 copy_size = c_req->c_len;
860
861 pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
862 qp_ctx->res[req_id].pbuf, copy_size);
863 if (unlikely(pbuf_length != copy_size)) {
864 dev_err(dev, "copy src data to pbuf error!\n");
865 return -EINVAL;
866 }
867 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
868 tfm = crypto_aead_reqtfm(aead_req);
869 authsize = crypto_aead_authsize(tfm);
870 mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
871 memcpy(a_req->out_mac, mac_offset, authsize);
872 }
873
874 req->in_dma = qp_ctx->res[req_id].pbuf_dma;
875 c_req->c_out_dma = req->in_dma;
876
877 return 0;
878 }
879
sec_cipher_pbuf_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * dst)880 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
881 struct scatterlist *dst)
882 {
883 struct aead_request *aead_req = req->aead_req.aead_req;
884 struct sec_cipher_req *c_req = &req->c_req;
885 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
886 int copy_size, pbuf_length;
887 int req_id = req->req_id;
888
889 if (ctx->alg_type == SEC_AEAD)
890 copy_size = c_req->c_len + aead_req->assoclen;
891 else
892 copy_size = c_req->c_len;
893
894 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
895 qp_ctx->res[req_id].pbuf, copy_size);
896 if (unlikely(pbuf_length != copy_size))
897 dev_err(ctx->dev, "copy pbuf data to dst error!\n");
898 }
899
sec_aead_mac_init(struct sec_aead_req * req)900 static int sec_aead_mac_init(struct sec_aead_req *req)
901 {
902 struct aead_request *aead_req = req->aead_req;
903 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
904 size_t authsize = crypto_aead_authsize(tfm);
905 u8 *mac_out = req->out_mac;
906 struct scatterlist *sgl = aead_req->src;
907 size_t copy_size;
908 off_t skip_size;
909
910 /* Copy input mac */
911 skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
912 copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
913 authsize, skip_size);
914 if (unlikely(copy_size != authsize))
915 return -EINVAL;
916
917 return 0;
918 }
919
sec_cipher_map(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)920 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
921 struct scatterlist *src, struct scatterlist *dst)
922 {
923 struct sec_cipher_req *c_req = &req->c_req;
924 struct sec_aead_req *a_req = &req->aead_req;
925 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
926 struct sec_alg_res *res = &qp_ctx->res[req->req_id];
927 struct device *dev = ctx->dev;
928 int ret;
929
930 if (req->use_pbuf) {
931 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
932 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
933 if (ctx->alg_type == SEC_AEAD) {
934 a_req->a_ivin = res->a_ivin;
935 a_req->a_ivin_dma = res->a_ivin_dma;
936 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
937 a_req->out_mac_dma = res->pbuf_dma +
938 SEC_PBUF_MAC_OFFSET;
939 }
940 ret = sec_cipher_pbuf_map(ctx, req, src);
941
942 return ret;
943 }
944 c_req->c_ivin = res->c_ivin;
945 c_req->c_ivin_dma = res->c_ivin_dma;
946 if (ctx->alg_type == SEC_AEAD) {
947 a_req->a_ivin = res->a_ivin;
948 a_req->a_ivin_dma = res->a_ivin_dma;
949 a_req->out_mac = res->out_mac;
950 a_req->out_mac_dma = res->out_mac_dma;
951 }
952
953 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
954 qp_ctx->c_in_pool,
955 req->req_id,
956 &req->in_dma);
957 if (IS_ERR(req->in)) {
958 dev_err(dev, "fail to dma map input sgl buffers!\n");
959 return PTR_ERR(req->in);
960 }
961
962 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
963 ret = sec_aead_mac_init(a_req);
964 if (unlikely(ret)) {
965 dev_err(dev, "fail to init mac data for ICV!\n");
966 return ret;
967 }
968 }
969
970 if (dst == src) {
971 c_req->c_out = req->in;
972 c_req->c_out_dma = req->in_dma;
973 } else {
974 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
975 qp_ctx->c_out_pool,
976 req->req_id,
977 &c_req->c_out_dma);
978
979 if (IS_ERR(c_req->c_out)) {
980 dev_err(dev, "fail to dma map output sgl buffers!\n");
981 hisi_acc_sg_buf_unmap(dev, src, req->in);
982 return PTR_ERR(c_req->c_out);
983 }
984 }
985
986 return 0;
987 }
988
sec_cipher_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)989 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
990 struct scatterlist *src, struct scatterlist *dst)
991 {
992 struct sec_cipher_req *c_req = &req->c_req;
993 struct device *dev = ctx->dev;
994
995 if (req->use_pbuf) {
996 sec_cipher_pbuf_unmap(ctx, req, dst);
997 } else {
998 if (dst != src)
999 hisi_acc_sg_buf_unmap(dev, src, req->in);
1000
1001 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
1002 }
1003 }
1004
sec_skcipher_sgl_map(struct sec_ctx * ctx,struct sec_req * req)1005 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1006 {
1007 struct skcipher_request *sq = req->c_req.sk_req;
1008
1009 return sec_cipher_map(ctx, req, sq->src, sq->dst);
1010 }
1011
sec_skcipher_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)1012 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1013 {
1014 struct skcipher_request *sq = req->c_req.sk_req;
1015
1016 sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1017 }
1018
sec_aead_aes_set_key(struct sec_cipher_ctx * c_ctx,struct crypto_authenc_keys * keys)1019 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
1020 struct crypto_authenc_keys *keys)
1021 {
1022 switch (keys->enckeylen) {
1023 case AES_KEYSIZE_128:
1024 c_ctx->c_key_len = SEC_CKEY_128BIT;
1025 break;
1026 case AES_KEYSIZE_192:
1027 c_ctx->c_key_len = SEC_CKEY_192BIT;
1028 break;
1029 case AES_KEYSIZE_256:
1030 c_ctx->c_key_len = SEC_CKEY_256BIT;
1031 break;
1032 default:
1033 pr_err("hisi_sec2: aead aes key error!\n");
1034 return -EINVAL;
1035 }
1036 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
1037
1038 return 0;
1039 }
1040
sec_aead_auth_set_key(struct sec_auth_ctx * ctx,struct crypto_authenc_keys * keys)1041 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
1042 struct crypto_authenc_keys *keys)
1043 {
1044 struct crypto_shash *hash_tfm = ctx->hash_tfm;
1045 int blocksize, digestsize, ret;
1046
1047 if (!keys->authkeylen) {
1048 pr_err("hisi_sec2: aead auth key error!\n");
1049 return -EINVAL;
1050 }
1051
1052 blocksize = crypto_shash_blocksize(hash_tfm);
1053 digestsize = crypto_shash_digestsize(hash_tfm);
1054 if (keys->authkeylen > blocksize) {
1055 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
1056 keys->authkeylen, ctx->a_key);
1057 if (ret) {
1058 pr_err("hisi_sec2: aead auth digest error!\n");
1059 return -EINVAL;
1060 }
1061 ctx->a_key_len = digestsize;
1062 } else {
1063 memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
1064 ctx->a_key_len = keys->authkeylen;
1065 }
1066
1067 return 0;
1068 }
1069
sec_aead_setauthsize(struct crypto_aead * aead,unsigned int authsize)1070 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
1071 {
1072 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1073 struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
1074 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1075
1076 if (unlikely(a_ctx->fallback_aead_tfm))
1077 return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
1078
1079 return 0;
1080 }
1081
sec_aead_fallback_setkey(struct sec_auth_ctx * a_ctx,struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1082 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
1083 struct crypto_aead *tfm, const u8 *key,
1084 unsigned int keylen)
1085 {
1086 crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
1087 crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
1088 crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
1089 return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
1090 }
1091
sec_aead_setkey(struct crypto_aead * tfm,const u8 * key,const u32 keylen,const enum sec_hash_alg a_alg,const enum sec_calg c_alg,const enum sec_mac_len mac_len,const enum sec_cmode c_mode)1092 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1093 const u32 keylen, const enum sec_hash_alg a_alg,
1094 const enum sec_calg c_alg,
1095 const enum sec_mac_len mac_len,
1096 const enum sec_cmode c_mode)
1097 {
1098 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1099 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1100 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1101 struct device *dev = ctx->dev;
1102 struct crypto_authenc_keys keys;
1103 int ret;
1104
1105 ctx->a_ctx.a_alg = a_alg;
1106 ctx->c_ctx.c_alg = c_alg;
1107 ctx->a_ctx.mac_len = mac_len;
1108 c_ctx->c_mode = c_mode;
1109
1110 if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
1111 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
1112 if (ret) {
1113 dev_err(dev, "set sec aes ccm cipher key err!\n");
1114 return ret;
1115 }
1116 memcpy(c_ctx->c_key, key, keylen);
1117
1118 if (unlikely(a_ctx->fallback_aead_tfm)) {
1119 ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1120 if (ret)
1121 return ret;
1122 }
1123
1124 return 0;
1125 }
1126
1127 if (crypto_authenc_extractkeys(&keys, key, keylen))
1128 goto bad_key;
1129
1130 ret = sec_aead_aes_set_key(c_ctx, &keys);
1131 if (ret) {
1132 dev_err(dev, "set sec cipher key err!\n");
1133 goto bad_key;
1134 }
1135
1136 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
1137 if (ret) {
1138 dev_err(dev, "set sec auth key err!\n");
1139 goto bad_key;
1140 }
1141
1142 if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
1143 (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
1144 dev_err(dev, "MAC or AUTH key length error!\n");
1145 goto bad_key;
1146 }
1147
1148 return 0;
1149
1150 bad_key:
1151 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
1152 return -EINVAL;
1153 }
1154
1155
1156 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
1157 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
1158 u32 keylen) \
1159 { \
1160 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
1161 }
1162
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1,SEC_A_HMAC_SHA1,SEC_CALG_AES,SEC_HMAC_SHA1_MAC,SEC_CMODE_CBC)1163 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
1164 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
1165 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
1166 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
1167 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
1168 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
1169 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
1170 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
1171 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
1172 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
1173 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
1174 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
1175 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
1176 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
1177
1178 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1179 {
1180 struct aead_request *aq = req->aead_req.aead_req;
1181
1182 return sec_cipher_map(ctx, req, aq->src, aq->dst);
1183 }
1184
sec_aead_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)1185 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1186 {
1187 struct aead_request *aq = req->aead_req.aead_req;
1188
1189 sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1190 }
1191
sec_request_transfer(struct sec_ctx * ctx,struct sec_req * req)1192 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1193 {
1194 int ret;
1195
1196 ret = ctx->req_op->buf_map(ctx, req);
1197 if (unlikely(ret))
1198 return ret;
1199
1200 ctx->req_op->do_transfer(ctx, req);
1201
1202 ret = ctx->req_op->bd_fill(ctx, req);
1203 if (unlikely(ret))
1204 goto unmap_req_buf;
1205
1206 return ret;
1207
1208 unmap_req_buf:
1209 ctx->req_op->buf_unmap(ctx, req);
1210 return ret;
1211 }
1212
sec_request_untransfer(struct sec_ctx * ctx,struct sec_req * req)1213 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1214 {
1215 ctx->req_op->buf_unmap(ctx, req);
1216 }
1217
sec_skcipher_copy_iv(struct sec_ctx * ctx,struct sec_req * req)1218 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1219 {
1220 struct skcipher_request *sk_req = req->c_req.sk_req;
1221 struct sec_cipher_req *c_req = &req->c_req;
1222
1223 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1224 }
1225
sec_skcipher_bd_fill(struct sec_ctx * ctx,struct sec_req * req)1226 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1227 {
1228 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1229 struct sec_cipher_req *c_req = &req->c_req;
1230 struct sec_sqe *sec_sqe = &req->sec_sqe;
1231 u8 scene, sa_type, da_type;
1232 u8 bd_type, cipher;
1233 u8 de = 0;
1234
1235 memset(sec_sqe, 0, sizeof(struct sec_sqe));
1236
1237 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1238 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1239 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1240 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1241
1242 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1243 SEC_CMODE_OFFSET);
1244 sec_sqe->type2.c_alg = c_ctx->c_alg;
1245 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1246 SEC_CKEY_OFFSET);
1247
1248 bd_type = SEC_BD_TYPE2;
1249 if (c_req->encrypt)
1250 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1251 else
1252 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1253 sec_sqe->type_cipher_auth = bd_type | cipher;
1254
1255 /* Set destination and source address type */
1256 if (req->use_pbuf) {
1257 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1258 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1259 } else {
1260 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1261 da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1262 }
1263
1264 sec_sqe->sdm_addr_type |= da_type;
1265 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1266 if (req->in_dma != c_req->c_out_dma)
1267 de = 0x1 << SEC_DE_OFFSET;
1268
1269 sec_sqe->sds_sa_type = (de | scene | sa_type);
1270
1271 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1272 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1273
1274 return 0;
1275 }
1276
sec_skcipher_bd_fill_v3(struct sec_ctx * ctx,struct sec_req * req)1277 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1278 {
1279 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1280 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1281 struct sec_cipher_req *c_req = &req->c_req;
1282 u32 bd_param = 0;
1283 u16 cipher;
1284
1285 memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
1286
1287 sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1288 sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1289 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1290 sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1291
1292 sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
1293 c_ctx->c_mode;
1294 sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1295 SEC_CKEY_OFFSET_V3);
1296
1297 if (c_req->encrypt)
1298 cipher = SEC_CIPHER_ENC;
1299 else
1300 cipher = SEC_CIPHER_DEC;
1301 sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
1302
1303 if (req->use_pbuf) {
1304 bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
1305 bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
1306 } else {
1307 bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
1308 bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
1309 }
1310
1311 bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
1312 if (req->in_dma != c_req->c_out_dma)
1313 bd_param |= 0x1 << SEC_DE_OFFSET_V3;
1314
1315 bd_param |= SEC_BD_TYPE3;
1316 sec_sqe3->bd_param = cpu_to_le32(bd_param);
1317
1318 sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
1319 sec_sqe3->tag = cpu_to_le64(req);
1320
1321 return 0;
1322 }
1323
1324 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)1325 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
1326 {
1327 do {
1328 --bits;
1329 nums += counter[bits];
1330 counter[bits] = nums & BITS_MASK;
1331 nums >>= BYTE_BITS;
1332 } while (bits && nums);
1333 }
1334
sec_update_iv(struct sec_req * req,enum sec_alg_type alg_type)1335 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1336 {
1337 struct aead_request *aead_req = req->aead_req.aead_req;
1338 struct skcipher_request *sk_req = req->c_req.sk_req;
1339 u32 iv_size = req->ctx->c_ctx.ivsize;
1340 struct scatterlist *sgl;
1341 unsigned int cryptlen;
1342 size_t sz;
1343 u8 *iv;
1344
1345 if (req->c_req.encrypt)
1346 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1347 else
1348 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1349
1350 if (alg_type == SEC_SKCIPHER) {
1351 iv = sk_req->iv;
1352 cryptlen = sk_req->cryptlen;
1353 } else {
1354 iv = aead_req->iv;
1355 cryptlen = aead_req->cryptlen;
1356 }
1357
1358 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1359 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1360 cryptlen - iv_size);
1361 if (unlikely(sz != iv_size))
1362 dev_err(req->ctx->dev, "copy output iv error!\n");
1363 } else {
1364 sz = cryptlen / iv_size;
1365 if (cryptlen % iv_size)
1366 sz += 1;
1367 ctr_iv_inc(iv, iv_size, sz);
1368 }
1369 }
1370
sec_back_req_clear(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)1371 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1372 struct sec_qp_ctx *qp_ctx)
1373 {
1374 struct sec_req *backlog_req = NULL;
1375
1376 spin_lock_bh(&qp_ctx->req_lock);
1377 if (ctx->fake_req_limit >=
1378 atomic_read(&qp_ctx->qp->qp_status.used) &&
1379 !list_empty(&qp_ctx->backlog)) {
1380 backlog_req = list_first_entry(&qp_ctx->backlog,
1381 typeof(*backlog_req), backlog_head);
1382 list_del(&backlog_req->backlog_head);
1383 }
1384 spin_unlock_bh(&qp_ctx->req_lock);
1385
1386 return backlog_req;
1387 }
1388
sec_skcipher_callback(struct sec_ctx * ctx,struct sec_req * req,int err)1389 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1390 int err)
1391 {
1392 struct skcipher_request *sk_req = req->c_req.sk_req;
1393 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1394 struct skcipher_request *backlog_sk_req;
1395 struct sec_req *backlog_req;
1396
1397 sec_free_req_id(req);
1398
1399 /* IV output at encrypto of CBC/CTR mode */
1400 if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1401 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1402 sec_update_iv(req, SEC_SKCIPHER);
1403
1404 while (1) {
1405 backlog_req = sec_back_req_clear(ctx, qp_ctx);
1406 if (!backlog_req)
1407 break;
1408
1409 backlog_sk_req = backlog_req->c_req.sk_req;
1410 backlog_sk_req->base.complete(&backlog_sk_req->base,
1411 -EINPROGRESS);
1412 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1413 }
1414
1415 sk_req->base.complete(&sk_req->base, err);
1416 }
1417
set_aead_auth_iv(struct sec_ctx * ctx,struct sec_req * req)1418 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1419 {
1420 struct aead_request *aead_req = req->aead_req.aead_req;
1421 struct sec_cipher_req *c_req = &req->c_req;
1422 struct sec_aead_req *a_req = &req->aead_req;
1423 size_t authsize = ctx->a_ctx.mac_len;
1424 u32 data_size = aead_req->cryptlen;
1425 u8 flage = 0;
1426 u8 cm, cl;
1427
1428 /* the specification has been checked in aead_iv_demension_check() */
1429 cl = c_req->c_ivin[0] + 1;
1430 c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
1431 memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
1432 c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
1433
1434 /* the last 3bit is L' */
1435 flage |= c_req->c_ivin[0] & IV_CL_MASK;
1436
1437 /* the M' is bit3~bit5, the Flags is bit6 */
1438 cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
1439 flage |= cm << IV_CM_OFFSET;
1440 if (aead_req->assoclen)
1441 flage |= 0x01 << IV_FLAGS_OFFSET;
1442
1443 memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
1444 a_req->a_ivin[0] = flage;
1445
1446 /*
1447 * the last 32bit is counter's initial number,
1448 * but the nonce uses the first 16bit
1449 * the tail 16bit fill with the cipher length
1450 */
1451 if (!c_req->encrypt)
1452 data_size = aead_req->cryptlen - authsize;
1453
1454 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
1455 data_size & IV_LAST_BYTE_MASK;
1456 data_size >>= IV_BYTE_OFFSET;
1457 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
1458 data_size & IV_LAST_BYTE_MASK;
1459 }
1460
sec_aead_set_iv(struct sec_ctx * ctx,struct sec_req * req)1461 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1462 {
1463 struct aead_request *aead_req = req->aead_req.aead_req;
1464 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1465 size_t authsize = crypto_aead_authsize(tfm);
1466 struct sec_cipher_req *c_req = &req->c_req;
1467 struct sec_aead_req *a_req = &req->aead_req;
1468
1469 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1470
1471 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
1472 /*
1473 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1474 * the counter must set to 0x01
1475 */
1476 ctx->a_ctx.mac_len = authsize;
1477 /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
1478 set_aead_auth_iv(ctx, req);
1479 }
1480
1481 /* GCM 12Byte Cipher_IV == Auth_IV */
1482 if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
1483 ctx->a_ctx.mac_len = authsize;
1484 memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
1485 }
1486 }
1487
sec_auth_bd_fill_xcm(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe * sec_sqe)1488 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
1489 struct sec_req *req, struct sec_sqe *sec_sqe)
1490 {
1491 struct sec_aead_req *a_req = &req->aead_req;
1492 struct aead_request *aq = a_req->aead_req;
1493
1494 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1495 sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
1496
1497 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1498 sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
1499 sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1500 sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
1501
1502 if (dir)
1503 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1504 else
1505 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1506
1507 sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
1508 sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
1509 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1510
1511 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1512 }
1513
sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe3 * sqe3)1514 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
1515 struct sec_req *req, struct sec_sqe3 *sqe3)
1516 {
1517 struct sec_aead_req *a_req = &req->aead_req;
1518 struct aead_request *aq = a_req->aead_req;
1519
1520 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1521 sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
1522
1523 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1524 sqe3->a_key_addr = sqe3->c_key_addr;
1525 sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1526 sqe3->auth_mac_key |= SEC_NO_AUTH;
1527
1528 if (dir)
1529 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1530 else
1531 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1532
1533 sqe3->a_len_key = cpu_to_le32(aq->assoclen);
1534 sqe3->auth_src_offset = cpu_to_le16(0x0);
1535 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1536 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1537 }
1538
sec_auth_bd_fill_ex(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe * sec_sqe)1539 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1540 struct sec_req *req, struct sec_sqe *sec_sqe)
1541 {
1542 struct sec_aead_req *a_req = &req->aead_req;
1543 struct sec_cipher_req *c_req = &req->c_req;
1544 struct aead_request *aq = a_req->aead_req;
1545
1546 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1547
1548 sec_sqe->type2.mac_key_alg =
1549 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1550
1551 sec_sqe->type2.mac_key_alg |=
1552 cpu_to_le32((u32)((ctx->a_key_len) /
1553 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1554
1555 sec_sqe->type2.mac_key_alg |=
1556 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1557
1558 if (dir) {
1559 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1560 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1561 } else {
1562 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1563 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1564 }
1565 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1566
1567 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1568
1569 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1570 }
1571
sec_aead_bd_fill(struct sec_ctx * ctx,struct sec_req * req)1572 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1573 {
1574 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1575 struct sec_sqe *sec_sqe = &req->sec_sqe;
1576 int ret;
1577
1578 ret = sec_skcipher_bd_fill(ctx, req);
1579 if (unlikely(ret)) {
1580 dev_err(ctx->dev, "skcipher bd fill is error!\n");
1581 return ret;
1582 }
1583
1584 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1585 ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1586 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1587 else
1588 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1589
1590 return 0;
1591 }
1592
sec_auth_bd_fill_ex_v3(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe3 * sqe3)1593 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
1594 struct sec_req *req, struct sec_sqe3 *sqe3)
1595 {
1596 struct sec_aead_req *a_req = &req->aead_req;
1597 struct sec_cipher_req *c_req = &req->c_req;
1598 struct aead_request *aq = a_req->aead_req;
1599
1600 sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
1601
1602 sqe3->auth_mac_key |=
1603 cpu_to_le32((u32)(ctx->mac_len /
1604 SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
1605
1606 sqe3->auth_mac_key |=
1607 cpu_to_le32((u32)(ctx->a_key_len /
1608 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
1609
1610 sqe3->auth_mac_key |=
1611 cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
1612
1613 if (dir) {
1614 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1615 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1616 } else {
1617 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1618 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1619 }
1620 sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
1621
1622 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1623
1624 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1625 }
1626
sec_aead_bd_fill_v3(struct sec_ctx * ctx,struct sec_req * req)1627 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1628 {
1629 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1630 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1631 int ret;
1632
1633 ret = sec_skcipher_bd_fill_v3(ctx, req);
1634 if (unlikely(ret)) {
1635 dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
1636 return ret;
1637 }
1638
1639 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1640 ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1641 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1642 req, sec_sqe3);
1643 else
1644 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1645 req, sec_sqe3);
1646
1647 return 0;
1648 }
1649
sec_aead_callback(struct sec_ctx * c,struct sec_req * req,int err)1650 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1651 {
1652 struct aead_request *a_req = req->aead_req.aead_req;
1653 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1654 struct sec_aead_req *aead_req = &req->aead_req;
1655 struct sec_cipher_req *c_req = &req->c_req;
1656 size_t authsize = crypto_aead_authsize(tfm);
1657 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1658 struct aead_request *backlog_aead_req;
1659 struct sec_req *backlog_req;
1660 size_t sz;
1661
1662 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1663 sec_update_iv(req, SEC_AEAD);
1664
1665 /* Copy output mac */
1666 if (!err && c_req->encrypt) {
1667 struct scatterlist *sgl = a_req->dst;
1668
1669 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1670 aead_req->out_mac,
1671 authsize, a_req->cryptlen +
1672 a_req->assoclen);
1673
1674 if (unlikely(sz != authsize)) {
1675 dev_err(c->dev, "copy out mac err!\n");
1676 err = -EINVAL;
1677 }
1678 }
1679
1680 sec_free_req_id(req);
1681
1682 while (1) {
1683 backlog_req = sec_back_req_clear(c, qp_ctx);
1684 if (!backlog_req)
1685 break;
1686
1687 backlog_aead_req = backlog_req->aead_req.aead_req;
1688 backlog_aead_req->base.complete(&backlog_aead_req->base,
1689 -EINPROGRESS);
1690 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1691 }
1692
1693 a_req->base.complete(&a_req->base, err);
1694 }
1695
sec_request_uninit(struct sec_ctx * ctx,struct sec_req * req)1696 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1697 {
1698 sec_free_req_id(req);
1699 sec_free_queue_id(ctx, req);
1700 }
1701
sec_request_init(struct sec_ctx * ctx,struct sec_req * req)1702 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1703 {
1704 struct sec_qp_ctx *qp_ctx;
1705 int queue_id;
1706
1707 /* To load balance */
1708 queue_id = sec_alloc_queue_id(ctx, req);
1709 qp_ctx = &ctx->qp_ctx[queue_id];
1710
1711 req->req_id = sec_alloc_req_id(req, qp_ctx);
1712 if (unlikely(req->req_id < 0)) {
1713 sec_free_queue_id(ctx, req);
1714 return req->req_id;
1715 }
1716
1717 return 0;
1718 }
1719
sec_process(struct sec_ctx * ctx,struct sec_req * req)1720 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1721 {
1722 struct sec_cipher_req *c_req = &req->c_req;
1723 int ret;
1724
1725 ret = sec_request_init(ctx, req);
1726 if (unlikely(ret))
1727 return ret;
1728
1729 ret = sec_request_transfer(ctx, req);
1730 if (unlikely(ret))
1731 goto err_uninit_req;
1732
1733 /* Output IV as decrypto */
1734 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1735 ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1736 sec_update_iv(req, ctx->alg_type);
1737
1738 ret = ctx->req_op->bd_send(ctx, req);
1739 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1740 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1741 dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1742 goto err_send_req;
1743 }
1744
1745 return ret;
1746
1747 err_send_req:
1748 /* As failing, restore the IV from user */
1749 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1750 if (ctx->alg_type == SEC_SKCIPHER)
1751 memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1752 ctx->c_ctx.ivsize);
1753 else
1754 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1755 ctx->c_ctx.ivsize);
1756 }
1757
1758 sec_request_untransfer(ctx, req);
1759 err_uninit_req:
1760 sec_request_uninit(ctx, req);
1761 return ret;
1762 }
1763
1764 static const struct sec_req_op sec_skcipher_req_ops = {
1765 .buf_map = sec_skcipher_sgl_map,
1766 .buf_unmap = sec_skcipher_sgl_unmap,
1767 .do_transfer = sec_skcipher_copy_iv,
1768 .bd_fill = sec_skcipher_bd_fill,
1769 .bd_send = sec_bd_send,
1770 .callback = sec_skcipher_callback,
1771 .process = sec_process,
1772 };
1773
1774 static const struct sec_req_op sec_aead_req_ops = {
1775 .buf_map = sec_aead_sgl_map,
1776 .buf_unmap = sec_aead_sgl_unmap,
1777 .do_transfer = sec_aead_set_iv,
1778 .bd_fill = sec_aead_bd_fill,
1779 .bd_send = sec_bd_send,
1780 .callback = sec_aead_callback,
1781 .process = sec_process,
1782 };
1783
1784 static const struct sec_req_op sec_skcipher_req_ops_v3 = {
1785 .buf_map = sec_skcipher_sgl_map,
1786 .buf_unmap = sec_skcipher_sgl_unmap,
1787 .do_transfer = sec_skcipher_copy_iv,
1788 .bd_fill = sec_skcipher_bd_fill_v3,
1789 .bd_send = sec_bd_send,
1790 .callback = sec_skcipher_callback,
1791 .process = sec_process,
1792 };
1793
1794 static const struct sec_req_op sec_aead_req_ops_v3 = {
1795 .buf_map = sec_aead_sgl_map,
1796 .buf_unmap = sec_aead_sgl_unmap,
1797 .do_transfer = sec_aead_set_iv,
1798 .bd_fill = sec_aead_bd_fill_v3,
1799 .bd_send = sec_bd_send,
1800 .callback = sec_aead_callback,
1801 .process = sec_process,
1802 };
1803
sec_skcipher_ctx_init(struct crypto_skcipher * tfm)1804 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1805 {
1806 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1807 int ret;
1808
1809 ret = sec_skcipher_init(tfm);
1810 if (ret)
1811 return ret;
1812
1813 if (ctx->sec->qm.ver < QM_HW_V3) {
1814 ctx->type_supported = SEC_BD_TYPE2;
1815 ctx->req_op = &sec_skcipher_req_ops;
1816 } else {
1817 ctx->type_supported = SEC_BD_TYPE3;
1818 ctx->req_op = &sec_skcipher_req_ops_v3;
1819 }
1820
1821 return ret;
1822 }
1823
sec_skcipher_ctx_exit(struct crypto_skcipher * tfm)1824 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1825 {
1826 sec_skcipher_uninit(tfm);
1827 }
1828
sec_aead_init(struct crypto_aead * tfm)1829 static int sec_aead_init(struct crypto_aead *tfm)
1830 {
1831 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1832 int ret;
1833
1834 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1835 ctx->alg_type = SEC_AEAD;
1836 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1837 if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
1838 ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1839 pr_err("get error aead iv size!\n");
1840 return -EINVAL;
1841 }
1842
1843 ret = sec_ctx_base_init(ctx);
1844 if (ret)
1845 return ret;
1846 if (ctx->sec->qm.ver < QM_HW_V3) {
1847 ctx->type_supported = SEC_BD_TYPE2;
1848 ctx->req_op = &sec_aead_req_ops;
1849 } else {
1850 ctx->type_supported = SEC_BD_TYPE3;
1851 ctx->req_op = &sec_aead_req_ops_v3;
1852 }
1853
1854 ret = sec_auth_init(ctx);
1855 if (ret)
1856 goto err_auth_init;
1857
1858 ret = sec_cipher_init(ctx);
1859 if (ret)
1860 goto err_cipher_init;
1861
1862 return ret;
1863
1864 err_cipher_init:
1865 sec_auth_uninit(ctx);
1866 err_auth_init:
1867 sec_ctx_base_uninit(ctx);
1868 return ret;
1869 }
1870
sec_aead_exit(struct crypto_aead * tfm)1871 static void sec_aead_exit(struct crypto_aead *tfm)
1872 {
1873 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1874
1875 sec_cipher_uninit(ctx);
1876 sec_auth_uninit(ctx);
1877 sec_ctx_base_uninit(ctx);
1878 }
1879
sec_aead_ctx_init(struct crypto_aead * tfm,const char * hash_name)1880 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1881 {
1882 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1883 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1884 int ret;
1885
1886 ret = sec_aead_init(tfm);
1887 if (ret) {
1888 pr_err("hisi_sec2: aead init error!\n");
1889 return ret;
1890 }
1891
1892 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1893 if (IS_ERR(auth_ctx->hash_tfm)) {
1894 dev_err(ctx->dev, "aead alloc shash error!\n");
1895 sec_aead_exit(tfm);
1896 return PTR_ERR(auth_ctx->hash_tfm);
1897 }
1898
1899 return 0;
1900 }
1901
sec_aead_ctx_exit(struct crypto_aead * tfm)1902 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1903 {
1904 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1905
1906 crypto_free_shash(ctx->a_ctx.hash_tfm);
1907 sec_aead_exit(tfm);
1908 }
1909
sec_aead_xcm_ctx_init(struct crypto_aead * tfm)1910 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
1911 {
1912 struct aead_alg *alg = crypto_aead_alg(tfm);
1913 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1914 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1915 const char *aead_name = alg->base.cra_name;
1916 int ret;
1917
1918 ret = sec_aead_init(tfm);
1919 if (ret) {
1920 dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
1921 return ret;
1922 }
1923
1924 a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
1925 CRYPTO_ALG_NEED_FALLBACK |
1926 CRYPTO_ALG_ASYNC);
1927 if (IS_ERR(a_ctx->fallback_aead_tfm)) {
1928 dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
1929 sec_aead_exit(tfm);
1930 return PTR_ERR(a_ctx->fallback_aead_tfm);
1931 }
1932 a_ctx->fallback = false;
1933
1934 return 0;
1935 }
1936
sec_aead_xcm_ctx_exit(struct crypto_aead * tfm)1937 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
1938 {
1939 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1940
1941 crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1942 sec_aead_exit(tfm);
1943 }
1944
sec_aead_sha1_ctx_init(struct crypto_aead * tfm)1945 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1946 {
1947 return sec_aead_ctx_init(tfm, "sha1");
1948 }
1949
sec_aead_sha256_ctx_init(struct crypto_aead * tfm)1950 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1951 {
1952 return sec_aead_ctx_init(tfm, "sha256");
1953 }
1954
sec_aead_sha512_ctx_init(struct crypto_aead * tfm)1955 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1956 {
1957 return sec_aead_ctx_init(tfm, "sha512");
1958 }
1959
1960
sec_skcipher_cryptlen_ckeck(struct sec_ctx * ctx,struct sec_req * sreq)1961 static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
1962 struct sec_req *sreq)
1963 {
1964 u32 cryptlen = sreq->c_req.sk_req->cryptlen;
1965 struct device *dev = ctx->dev;
1966 u8 c_mode = ctx->c_ctx.c_mode;
1967 int ret = 0;
1968
1969 switch (c_mode) {
1970 case SEC_CMODE_XTS:
1971 if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
1972 dev_err(dev, "skcipher XTS mode input length error!\n");
1973 ret = -EINVAL;
1974 }
1975 break;
1976 case SEC_CMODE_ECB:
1977 case SEC_CMODE_CBC:
1978 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
1979 dev_err(dev, "skcipher AES input length error!\n");
1980 ret = -EINVAL;
1981 }
1982 break;
1983 case SEC_CMODE_CFB:
1984 case SEC_CMODE_OFB:
1985 case SEC_CMODE_CTR:
1986 if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
1987 dev_err(dev, "skcipher HW version error!\n");
1988 ret = -EINVAL;
1989 }
1990 break;
1991 default:
1992 ret = -EINVAL;
1993 }
1994
1995 return ret;
1996 }
1997
sec_skcipher_param_check(struct sec_ctx * ctx,struct sec_req * sreq)1998 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1999 {
2000 struct skcipher_request *sk_req = sreq->c_req.sk_req;
2001 struct device *dev = ctx->dev;
2002 u8 c_alg = ctx->c_ctx.c_alg;
2003
2004 if (unlikely(!sk_req->src || !sk_req->dst ||
2005 sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
2006 dev_err(dev, "skcipher input param error!\n");
2007 return -EINVAL;
2008 }
2009 sreq->c_req.c_len = sk_req->cryptlen;
2010
2011 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
2012 sreq->use_pbuf = true;
2013 else
2014 sreq->use_pbuf = false;
2015
2016 if (c_alg == SEC_CALG_3DES) {
2017 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2018 dev_err(dev, "skcipher 3des input length error!\n");
2019 return -EINVAL;
2020 }
2021 return 0;
2022 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2023 return sec_skcipher_cryptlen_ckeck(ctx, sreq);
2024 }
2025
2026 dev_err(dev, "skcipher algorithm error!\n");
2027
2028 return -EINVAL;
2029 }
2030
sec_skcipher_soft_crypto(struct sec_ctx * ctx,struct skcipher_request * sreq,bool encrypt)2031 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
2032 struct skcipher_request *sreq, bool encrypt)
2033 {
2034 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
2035 struct device *dev = ctx->dev;
2036 int ret;
2037
2038 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
2039
2040 if (!c_ctx->fbtfm) {
2041 dev_err(dev, "failed to check fallback tfm\n");
2042 return -EINVAL;
2043 }
2044
2045 skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
2046
2047 /* software need sync mode to do crypto */
2048 skcipher_request_set_callback(subreq, sreq->base.flags,
2049 NULL, NULL);
2050 skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
2051 sreq->cryptlen, sreq->iv);
2052 if (encrypt)
2053 ret = crypto_skcipher_encrypt(subreq);
2054 else
2055 ret = crypto_skcipher_decrypt(subreq);
2056
2057 skcipher_request_zero(subreq);
2058
2059 return ret;
2060 }
2061
sec_skcipher_crypto(struct skcipher_request * sk_req,bool encrypt)2062 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2063 {
2064 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
2065 struct sec_req *req = skcipher_request_ctx(sk_req);
2066 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2067 int ret;
2068
2069 if (!sk_req->cryptlen) {
2070 if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
2071 return -EINVAL;
2072 return 0;
2073 }
2074
2075 req->flag = sk_req->base.flags;
2076 req->c_req.sk_req = sk_req;
2077 req->c_req.encrypt = encrypt;
2078 req->ctx = ctx;
2079
2080 ret = sec_skcipher_param_check(ctx, req);
2081 if (unlikely(ret))
2082 return -EINVAL;
2083
2084 if (unlikely(ctx->c_ctx.fallback))
2085 return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
2086
2087 return ctx->req_op->process(ctx, req);
2088 }
2089
sec_skcipher_encrypt(struct skcipher_request * sk_req)2090 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
2091 {
2092 return sec_skcipher_crypto(sk_req, true);
2093 }
2094
sec_skcipher_decrypt(struct skcipher_request * sk_req)2095 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
2096 {
2097 return sec_skcipher_crypto(sk_req, false);
2098 }
2099
2100 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
2101 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
2102 {\
2103 .base = {\
2104 .cra_name = sec_cra_name,\
2105 .cra_driver_name = "hisi_sec_"sec_cra_name,\
2106 .cra_priority = SEC_PRIORITY,\
2107 .cra_flags = CRYPTO_ALG_ASYNC |\
2108 CRYPTO_ALG_ALLOCATES_MEMORY |\
2109 CRYPTO_ALG_NEED_FALLBACK,\
2110 .cra_blocksize = blk_size,\
2111 .cra_ctxsize = sizeof(struct sec_ctx),\
2112 .cra_module = THIS_MODULE,\
2113 },\
2114 .init = ctx_init,\
2115 .exit = ctx_exit,\
2116 .setkey = sec_set_key,\
2117 .decrypt = sec_skcipher_decrypt,\
2118 .encrypt = sec_skcipher_encrypt,\
2119 .min_keysize = sec_min_key_size,\
2120 .max_keysize = sec_max_key_size,\
2121 .ivsize = iv_size,\
2122 },
2123
2124 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
2125 max_key_size, blk_size, iv_size) \
2126 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
2127 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
2128
2129 static struct skcipher_alg sec_skciphers[] = {
2130 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
2131 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2132 AES_BLOCK_SIZE, 0)
2133
2134 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
2135 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2136 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2137
2138 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
2139 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
2140 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2141
2142 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
2143 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
2144 DES3_EDE_BLOCK_SIZE, 0)
2145
2146 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
2147 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
2148 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
2149
2150 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
2151 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
2152 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2153
2154 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
2155 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2156 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
2157 };
2158
2159 static struct skcipher_alg sec_skciphers_v3[] = {
2160 SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
2161 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2162 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2163
2164 SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
2165 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2166 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2167
2168 SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
2169 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
2170 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2171
2172 SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
2173 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2174 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2175
2176 SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
2177 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2178 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2179
2180 SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
2181 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
2182 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
2183 };
2184
aead_iv_demension_check(struct aead_request * aead_req)2185 static int aead_iv_demension_check(struct aead_request *aead_req)
2186 {
2187 u8 cl;
2188
2189 cl = aead_req->iv[0] + 1;
2190 if (cl < IV_CL_MIN || cl > IV_CL_MAX)
2191 return -EINVAL;
2192
2193 if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
2194 return -EOVERFLOW;
2195
2196 return 0;
2197 }
2198
sec_aead_spec_check(struct sec_ctx * ctx,struct sec_req * sreq)2199 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2200 {
2201 struct aead_request *req = sreq->aead_req.aead_req;
2202 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2203 size_t authsize = crypto_aead_authsize(tfm);
2204 u8 c_mode = ctx->c_ctx.c_mode;
2205 struct device *dev = ctx->dev;
2206 int ret;
2207
2208 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2209 req->assoclen > SEC_MAX_AAD_LEN)) {
2210 dev_err(dev, "aead input spec error!\n");
2211 return -EINVAL;
2212 }
2213
2214 if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
2215 (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
2216 authsize & MAC_LEN_MASK)))) {
2217 dev_err(dev, "aead input mac length error!\n");
2218 return -EINVAL;
2219 }
2220
2221 if (c_mode == SEC_CMODE_CCM) {
2222 ret = aead_iv_demension_check(req);
2223 if (ret) {
2224 dev_err(dev, "aead input iv param error!\n");
2225 return ret;
2226 }
2227 }
2228
2229 if (sreq->c_req.encrypt)
2230 sreq->c_req.c_len = req->cryptlen;
2231 else
2232 sreq->c_req.c_len = req->cryptlen - authsize;
2233 if (c_mode == SEC_CMODE_CBC) {
2234 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2235 dev_err(dev, "aead crypto length error!\n");
2236 return -EINVAL;
2237 }
2238 }
2239
2240 return 0;
2241 }
2242
sec_aead_param_check(struct sec_ctx * ctx,struct sec_req * sreq)2243 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2244 {
2245 struct aead_request *req = sreq->aead_req.aead_req;
2246 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2247 size_t authsize = crypto_aead_authsize(tfm);
2248 struct device *dev = ctx->dev;
2249 u8 c_alg = ctx->c_ctx.c_alg;
2250
2251 if (unlikely(!req->src || !req->dst)) {
2252 dev_err(dev, "aead input param error!\n");
2253 return -EINVAL;
2254 }
2255
2256 if (ctx->sec->qm.ver == QM_HW_V2) {
2257 if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
2258 req->cryptlen <= authsize))) {
2259 dev_err(dev, "Kunpeng920 not support 0 length!\n");
2260 ctx->a_ctx.fallback = true;
2261 return -EINVAL;
2262 }
2263 }
2264
2265 /* Support AES or SM4 */
2266 if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
2267 dev_err(dev, "aead crypto alg error!\n");
2268 return -EINVAL;
2269 }
2270
2271 if (unlikely(sec_aead_spec_check(ctx, sreq)))
2272 return -EINVAL;
2273
2274 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2275 SEC_PBUF_SZ)
2276 sreq->use_pbuf = true;
2277 else
2278 sreq->use_pbuf = false;
2279
2280 return 0;
2281 }
2282
sec_aead_soft_crypto(struct sec_ctx * ctx,struct aead_request * aead_req,bool encrypt)2283 static int sec_aead_soft_crypto(struct sec_ctx *ctx,
2284 struct aead_request *aead_req,
2285 bool encrypt)
2286 {
2287 struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2288 struct device *dev = ctx->dev;
2289 struct aead_request *subreq;
2290 int ret;
2291
2292 /* Kunpeng920 aead mode not support input 0 size */
2293 if (!a_ctx->fallback_aead_tfm) {
2294 dev_err(dev, "aead fallback tfm is NULL!\n");
2295 return -EINVAL;
2296 }
2297
2298 subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
2299 if (!subreq)
2300 return -ENOMEM;
2301
2302 aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
2303 aead_request_set_callback(subreq, aead_req->base.flags,
2304 aead_req->base.complete, aead_req->base.data);
2305 aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
2306 aead_req->cryptlen, aead_req->iv);
2307 aead_request_set_ad(subreq, aead_req->assoclen);
2308
2309 if (encrypt)
2310 ret = crypto_aead_encrypt(subreq);
2311 else
2312 ret = crypto_aead_decrypt(subreq);
2313 aead_request_free(subreq);
2314
2315 return ret;
2316 }
2317
sec_aead_crypto(struct aead_request * a_req,bool encrypt)2318 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2319 {
2320 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2321 struct sec_req *req = aead_request_ctx(a_req);
2322 struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2323 int ret;
2324
2325 req->flag = a_req->base.flags;
2326 req->aead_req.aead_req = a_req;
2327 req->c_req.encrypt = encrypt;
2328 req->ctx = ctx;
2329
2330 ret = sec_aead_param_check(ctx, req);
2331 if (unlikely(ret)) {
2332 if (ctx->a_ctx.fallback)
2333 return sec_aead_soft_crypto(ctx, a_req, encrypt);
2334 return -EINVAL;
2335 }
2336
2337 return ctx->req_op->process(ctx, req);
2338 }
2339
sec_aead_encrypt(struct aead_request * a_req)2340 static int sec_aead_encrypt(struct aead_request *a_req)
2341 {
2342 return sec_aead_crypto(a_req, true);
2343 }
2344
sec_aead_decrypt(struct aead_request * a_req)2345 static int sec_aead_decrypt(struct aead_request *a_req)
2346 {
2347 return sec_aead_crypto(a_req, false);
2348 }
2349
2350 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2351 ctx_exit, blk_size, iv_size, max_authsize)\
2352 {\
2353 .base = {\
2354 .cra_name = sec_cra_name,\
2355 .cra_driver_name = "hisi_sec_"sec_cra_name,\
2356 .cra_priority = SEC_PRIORITY,\
2357 .cra_flags = CRYPTO_ALG_ASYNC |\
2358 CRYPTO_ALG_ALLOCATES_MEMORY |\
2359 CRYPTO_ALG_NEED_FALLBACK,\
2360 .cra_blocksize = blk_size,\
2361 .cra_ctxsize = sizeof(struct sec_ctx),\
2362 .cra_module = THIS_MODULE,\
2363 },\
2364 .init = ctx_init,\
2365 .exit = ctx_exit,\
2366 .setkey = sec_set_key,\
2367 .setauthsize = sec_aead_setauthsize,\
2368 .decrypt = sec_aead_decrypt,\
2369 .encrypt = sec_aead_encrypt,\
2370 .ivsize = iv_size,\
2371 .maxauthsize = max_authsize,\
2372 }
2373
2374 static struct aead_alg sec_aeads[] = {
2375 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
2376 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
2377 sec_aead_ctx_exit, AES_BLOCK_SIZE,
2378 AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2379
2380 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
2381 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
2382 sec_aead_ctx_exit, AES_BLOCK_SIZE,
2383 AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2384
2385 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
2386 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
2387 sec_aead_ctx_exit, AES_BLOCK_SIZE,
2388 AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
2389
2390 SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
2391 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2392 AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2393
2394 SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
2395 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2396 SEC_AIV_SIZE, AES_BLOCK_SIZE)
2397 };
2398
2399 static struct aead_alg sec_aeads_v3[] = {
2400 SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
2401 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2402 AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2403
2404 SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
2405 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
2406 SEC_AIV_SIZE, AES_BLOCK_SIZE)
2407 };
2408
sec_register_to_crypto(struct hisi_qm * qm)2409 int sec_register_to_crypto(struct hisi_qm *qm)
2410 {
2411 int ret;
2412
2413 /* To avoid repeat register */
2414 ret = crypto_register_skciphers(sec_skciphers,
2415 ARRAY_SIZE(sec_skciphers));
2416 if (ret)
2417 return ret;
2418
2419 if (qm->ver > QM_HW_V2) {
2420 ret = crypto_register_skciphers(sec_skciphers_v3,
2421 ARRAY_SIZE(sec_skciphers_v3));
2422 if (ret)
2423 goto reg_skcipher_fail;
2424 }
2425
2426 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
2427 if (ret)
2428 goto reg_aead_fail;
2429 if (qm->ver > QM_HW_V2) {
2430 ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
2431 if (ret)
2432 goto reg_aead_v3_fail;
2433 }
2434 return ret;
2435
2436 reg_aead_v3_fail:
2437 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
2438 reg_aead_fail:
2439 if (qm->ver > QM_HW_V2)
2440 crypto_unregister_skciphers(sec_skciphers_v3,
2441 ARRAY_SIZE(sec_skciphers_v3));
2442 reg_skcipher_fail:
2443 crypto_unregister_skciphers(sec_skciphers,
2444 ARRAY_SIZE(sec_skciphers));
2445 return ret;
2446 }
2447
sec_unregister_from_crypto(struct hisi_qm * qm)2448 void sec_unregister_from_crypto(struct hisi_qm *qm)
2449 {
2450 if (qm->ver > QM_HW_V2)
2451 crypto_unregister_aeads(sec_aeads_v3,
2452 ARRAY_SIZE(sec_aeads_v3));
2453 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
2454
2455 if (qm->ver > QM_HW_V2)
2456 crypto_unregister_skciphers(sec_skciphers_v3,
2457 ARRAY_SIZE(sec_skciphers_v3));
2458 crypto_unregister_skciphers(sec_skciphers,
2459 ARRAY_SIZE(sec_skciphers));
2460 }
2461