1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4 * Copyright (C) 2016 Cavium, Inc.
5 */
6
7 #include <crypto/aes.h>
8 #include <crypto/algapi.h>
9 #include <crypto/authenc.h>
10 #include <crypto/internal/des.h>
11 #include <crypto/xts.h>
12 #include <linux/crypto.h>
13 #include <linux/err.h>
14 #include <linux/list.h>
15 #include <linux/scatterlist.h>
16
17 #include "cptvf.h"
18 #include "cptvf_algs.h"
19
20 struct cpt_device_handle {
21 void *cdev[MAX_DEVICES];
22 u32 dev_count;
23 };
24
25 static struct cpt_device_handle dev_handle;
26
cvm_callback(u32 status,void * arg)27 static void cvm_callback(u32 status, void *arg)
28 {
29 struct crypto_async_request *req = (struct crypto_async_request *)arg;
30
31 req->complete(req, !status);
32 }
33
update_input_iv(struct cpt_request_info * req_info,u8 * iv,u32 enc_iv_len,u32 * argcnt)34 static inline void update_input_iv(struct cpt_request_info *req_info,
35 u8 *iv, u32 enc_iv_len,
36 u32 *argcnt)
37 {
38 /* Setting the iv information */
39 req_info->in[*argcnt].vptr = (void *)iv;
40 req_info->in[*argcnt].size = enc_iv_len;
41 req_info->req.dlen += enc_iv_len;
42
43 ++(*argcnt);
44 }
45
update_output_iv(struct cpt_request_info * req_info,u8 * iv,u32 enc_iv_len,u32 * argcnt)46 static inline void update_output_iv(struct cpt_request_info *req_info,
47 u8 *iv, u32 enc_iv_len,
48 u32 *argcnt)
49 {
50 /* Setting the iv information */
51 req_info->out[*argcnt].vptr = (void *)iv;
52 req_info->out[*argcnt].size = enc_iv_len;
53 req_info->rlen += enc_iv_len;
54
55 ++(*argcnt);
56 }
57
update_input_data(struct cpt_request_info * req_info,struct scatterlist * inp_sg,u32 nbytes,u32 * argcnt)58 static inline void update_input_data(struct cpt_request_info *req_info,
59 struct scatterlist *inp_sg,
60 u32 nbytes, u32 *argcnt)
61 {
62 req_info->req.dlen += nbytes;
63
64 while (nbytes) {
65 u32 len = min(nbytes, inp_sg->length);
66 u8 *ptr = sg_virt(inp_sg);
67
68 req_info->in[*argcnt].vptr = (void *)ptr;
69 req_info->in[*argcnt].size = len;
70 nbytes -= len;
71
72 ++(*argcnt);
73 ++inp_sg;
74 }
75 }
76
update_output_data(struct cpt_request_info * req_info,struct scatterlist * outp_sg,u32 nbytes,u32 * argcnt)77 static inline void update_output_data(struct cpt_request_info *req_info,
78 struct scatterlist *outp_sg,
79 u32 nbytes, u32 *argcnt)
80 {
81 req_info->rlen += nbytes;
82
83 while (nbytes) {
84 u32 len = min(nbytes, outp_sg->length);
85 u8 *ptr = sg_virt(outp_sg);
86
87 req_info->out[*argcnt].vptr = (void *)ptr;
88 req_info->out[*argcnt].size = len;
89 nbytes -= len;
90 ++(*argcnt);
91 ++outp_sg;
92 }
93 }
94
create_ctx_hdr(struct skcipher_request * req,u32 enc,u32 * argcnt)95 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
96 u32 *argcnt)
97 {
98 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
99 struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
100 struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
101 struct fc_context *fctx = &rctx->fctx;
102 u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
103 struct cpt_request_info *req_info = &rctx->cpt_req;
104 __be64 *ctrl_flags = NULL;
105 __be64 *offset_control;
106
107 req_info->ctrl.s.grp = 0;
108 req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
109 req_info->ctrl.s.se_req = SE_CORE_REQ;
110
111 req_info->req.opcode.s.major = MAJOR_OP_FC |
112 DMA_MODE_FLAG(DMA_GATHER_SCATTER);
113 if (enc)
114 req_info->req.opcode.s.minor = 2;
115 else
116 req_info->req.opcode.s.minor = 3;
117
118 req_info->req.param1 = req->cryptlen; /* Encryption Data length */
119 req_info->req.param2 = 0; /*Auth data length */
120
121 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
122 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
123 fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
124
125 if (ctx->cipher_type == AES_XTS)
126 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
127 else
128 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
129 ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags;
130 *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
131
132 offset_control = (__be64 *)&rctx->control_word;
133 *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
134 /* Storing Packet Data Information in offset
135 * Control Word First 8 bytes
136 */
137 req_info->in[*argcnt].vptr = (u8 *)offset_control;
138 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
139 req_info->req.dlen += CONTROL_WORD_LEN;
140 ++(*argcnt);
141
142 req_info->in[*argcnt].vptr = (u8 *)fctx;
143 req_info->in[*argcnt].size = sizeof(struct fc_context);
144 req_info->req.dlen += sizeof(struct fc_context);
145
146 ++(*argcnt);
147
148 return 0;
149 }
150
create_input_list(struct skcipher_request * req,u32 enc,u32 enc_iv_len)151 static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
152 u32 enc_iv_len)
153 {
154 struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
155 struct cpt_request_info *req_info = &rctx->cpt_req;
156 u32 argcnt = 0;
157
158 create_ctx_hdr(req, enc, &argcnt);
159 update_input_iv(req_info, req->iv, enc_iv_len, &argcnt);
160 update_input_data(req_info, req->src, req->cryptlen, &argcnt);
161 req_info->incnt = argcnt;
162
163 return 0;
164 }
165
store_cb_info(struct skcipher_request * req,struct cpt_request_info * req_info)166 static inline void store_cb_info(struct skcipher_request *req,
167 struct cpt_request_info *req_info)
168 {
169 req_info->callback = (void *)cvm_callback;
170 req_info->callback_arg = (void *)&req->base;
171 }
172
create_output_list(struct skcipher_request * req,u32 enc_iv_len)173 static inline void create_output_list(struct skcipher_request *req,
174 u32 enc_iv_len)
175 {
176 struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
177 struct cpt_request_info *req_info = &rctx->cpt_req;
178 u32 argcnt = 0;
179
180 /* OUTPUT Buffer Processing
181 * AES encryption/decryption output would be
182 * received in the following format
183 *
184 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
185 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
186 */
187 /* Reading IV information */
188 update_output_iv(req_info, req->iv, enc_iv_len, &argcnt);
189 update_output_data(req_info, req->dst, req->cryptlen, &argcnt);
190 req_info->outcnt = argcnt;
191 }
192
cvm_enc_dec(struct skcipher_request * req,u32 enc)193 static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
194 {
195 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
196 struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
197 u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
198 struct fc_context *fctx = &rctx->fctx;
199 struct cpt_request_info *req_info = &rctx->cpt_req;
200 void *cdev = NULL;
201 int status;
202
203 memset(req_info, 0, sizeof(struct cpt_request_info));
204 req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
205 memset(fctx, 0, sizeof(struct fc_context));
206 create_input_list(req, enc, enc_iv_len);
207 create_output_list(req, enc_iv_len);
208 store_cb_info(req, req_info);
209 cdev = dev_handle.cdev[smp_processor_id()];
210 status = cptvf_do_request(cdev, req_info);
211 /* We perform an asynchronous send and once
212 * the request is completed the driver would
213 * intimate through registered call back functions
214 */
215
216 if (status)
217 return status;
218 else
219 return -EINPROGRESS;
220 }
221
cvm_encrypt(struct skcipher_request * req)222 static int cvm_encrypt(struct skcipher_request *req)
223 {
224 return cvm_enc_dec(req, true);
225 }
226
cvm_decrypt(struct skcipher_request * req)227 static int cvm_decrypt(struct skcipher_request *req)
228 {
229 return cvm_enc_dec(req, false);
230 }
231
cvm_xts_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)232 static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
233 u32 keylen)
234 {
235 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
236 struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
237 int err;
238 const u8 *key1 = key;
239 const u8 *key2 = key + (keylen / 2);
240
241 err = xts_check_key(tfm, key, keylen);
242 if (err)
243 return err;
244 ctx->key_len = keylen;
245 memcpy(ctx->enc_key, key1, keylen / 2);
246 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
247 ctx->cipher_type = AES_XTS;
248 switch (ctx->key_len) {
249 case 32:
250 ctx->key_type = AES_128_BIT;
251 break;
252 case 64:
253 ctx->key_type = AES_256_BIT;
254 break;
255 default:
256 return -EINVAL;
257 }
258
259 return 0;
260 }
261
cvm_validate_keylen(struct cvm_enc_ctx * ctx,u32 keylen)262 static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
263 {
264 if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
265 ctx->key_len = keylen;
266 switch (ctx->key_len) {
267 case 16:
268 ctx->key_type = AES_128_BIT;
269 break;
270 case 24:
271 ctx->key_type = AES_192_BIT;
272 break;
273 case 32:
274 ctx->key_type = AES_256_BIT;
275 break;
276 default:
277 return -EINVAL;
278 }
279
280 if (ctx->cipher_type == DES3_CBC)
281 ctx->key_type = 0;
282
283 return 0;
284 }
285
286 return -EINVAL;
287 }
288
cvm_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen,u8 cipher_type)289 static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
290 u32 keylen, u8 cipher_type)
291 {
292 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
293 struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
294
295 ctx->cipher_type = cipher_type;
296 if (!cvm_validate_keylen(ctx, keylen)) {
297 memcpy(ctx->enc_key, key, keylen);
298 return 0;
299 } else {
300 return -EINVAL;
301 }
302 }
303
cvm_cbc_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)304 static int cvm_cbc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
305 u32 keylen)
306 {
307 return cvm_setkey(cipher, key, keylen, AES_CBC);
308 }
309
cvm_ecb_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)310 static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
311 u32 keylen)
312 {
313 return cvm_setkey(cipher, key, keylen, AES_ECB);
314 }
315
cvm_cfb_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)316 static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
317 u32 keylen)
318 {
319 return cvm_setkey(cipher, key, keylen, AES_CFB);
320 }
321
cvm_cbc_des3_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)322 static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
323 u32 keylen)
324 {
325 return verify_skcipher_des3_key(cipher, key) ?:
326 cvm_setkey(cipher, key, keylen, DES3_CBC);
327 }
328
cvm_ecb_des3_setkey(struct crypto_skcipher * cipher,const u8 * key,u32 keylen)329 static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
330 u32 keylen)
331 {
332 return verify_skcipher_des3_key(cipher, key) ?:
333 cvm_setkey(cipher, key, keylen, DES3_ECB);
334 }
335
cvm_enc_dec_init(struct crypto_skcipher * tfm)336 static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
337 {
338 crypto_skcipher_set_reqsize(tfm, sizeof(struct cvm_req_ctx));
339
340 return 0;
341 }
342
343 static struct skcipher_alg algs[] = { {
344 .base.cra_flags = CRYPTO_ALG_ASYNC |
345 CRYPTO_ALG_ALLOCATES_MEMORY,
346 .base.cra_blocksize = AES_BLOCK_SIZE,
347 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
348 .base.cra_alignmask = 7,
349 .base.cra_priority = 4001,
350 .base.cra_name = "xts(aes)",
351 .base.cra_driver_name = "cavium-xts-aes",
352 .base.cra_module = THIS_MODULE,
353
354 .ivsize = AES_BLOCK_SIZE,
355 .min_keysize = 2 * AES_MIN_KEY_SIZE,
356 .max_keysize = 2 * AES_MAX_KEY_SIZE,
357 .setkey = cvm_xts_setkey,
358 .encrypt = cvm_encrypt,
359 .decrypt = cvm_decrypt,
360 .init = cvm_enc_dec_init,
361 }, {
362 .base.cra_flags = CRYPTO_ALG_ASYNC |
363 CRYPTO_ALG_ALLOCATES_MEMORY,
364 .base.cra_blocksize = AES_BLOCK_SIZE,
365 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
366 .base.cra_alignmask = 7,
367 .base.cra_priority = 4001,
368 .base.cra_name = "cbc(aes)",
369 .base.cra_driver_name = "cavium-cbc-aes",
370 .base.cra_module = THIS_MODULE,
371
372 .ivsize = AES_BLOCK_SIZE,
373 .min_keysize = AES_MIN_KEY_SIZE,
374 .max_keysize = AES_MAX_KEY_SIZE,
375 .setkey = cvm_cbc_aes_setkey,
376 .encrypt = cvm_encrypt,
377 .decrypt = cvm_decrypt,
378 .init = cvm_enc_dec_init,
379 }, {
380 .base.cra_flags = CRYPTO_ALG_ASYNC |
381 CRYPTO_ALG_ALLOCATES_MEMORY,
382 .base.cra_blocksize = AES_BLOCK_SIZE,
383 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
384 .base.cra_alignmask = 7,
385 .base.cra_priority = 4001,
386 .base.cra_name = "ecb(aes)",
387 .base.cra_driver_name = "cavium-ecb-aes",
388 .base.cra_module = THIS_MODULE,
389
390 .min_keysize = AES_MIN_KEY_SIZE,
391 .max_keysize = AES_MAX_KEY_SIZE,
392 .setkey = cvm_ecb_aes_setkey,
393 .encrypt = cvm_encrypt,
394 .decrypt = cvm_decrypt,
395 .init = cvm_enc_dec_init,
396 }, {
397 .base.cra_flags = CRYPTO_ALG_ASYNC |
398 CRYPTO_ALG_ALLOCATES_MEMORY,
399 .base.cra_blocksize = AES_BLOCK_SIZE,
400 .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
401 .base.cra_alignmask = 7,
402 .base.cra_priority = 4001,
403 .base.cra_name = "cfb(aes)",
404 .base.cra_driver_name = "cavium-cfb-aes",
405 .base.cra_module = THIS_MODULE,
406
407 .ivsize = AES_BLOCK_SIZE,
408 .min_keysize = AES_MIN_KEY_SIZE,
409 .max_keysize = AES_MAX_KEY_SIZE,
410 .setkey = cvm_cfb_aes_setkey,
411 .encrypt = cvm_encrypt,
412 .decrypt = cvm_decrypt,
413 .init = cvm_enc_dec_init,
414 }, {
415 .base.cra_flags = CRYPTO_ALG_ASYNC |
416 CRYPTO_ALG_ALLOCATES_MEMORY,
417 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
418 .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
419 .base.cra_alignmask = 7,
420 .base.cra_priority = 4001,
421 .base.cra_name = "cbc(des3_ede)",
422 .base.cra_driver_name = "cavium-cbc-des3_ede",
423 .base.cra_module = THIS_MODULE,
424
425 .min_keysize = DES3_EDE_KEY_SIZE,
426 .max_keysize = DES3_EDE_KEY_SIZE,
427 .ivsize = DES_BLOCK_SIZE,
428 .setkey = cvm_cbc_des3_setkey,
429 .encrypt = cvm_encrypt,
430 .decrypt = cvm_decrypt,
431 .init = cvm_enc_dec_init,
432 }, {
433 .base.cra_flags = CRYPTO_ALG_ASYNC |
434 CRYPTO_ALG_ALLOCATES_MEMORY,
435 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
436 .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
437 .base.cra_alignmask = 7,
438 .base.cra_priority = 4001,
439 .base.cra_name = "ecb(des3_ede)",
440 .base.cra_driver_name = "cavium-ecb-des3_ede",
441 .base.cra_module = THIS_MODULE,
442
443 .min_keysize = DES3_EDE_KEY_SIZE,
444 .max_keysize = DES3_EDE_KEY_SIZE,
445 .ivsize = DES_BLOCK_SIZE,
446 .setkey = cvm_ecb_des3_setkey,
447 .encrypt = cvm_encrypt,
448 .decrypt = cvm_decrypt,
449 .init = cvm_enc_dec_init,
450 } };
451
cav_register_algs(void)452 static inline int cav_register_algs(void)
453 {
454 return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
455 }
456
cav_unregister_algs(void)457 static inline void cav_unregister_algs(void)
458 {
459 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
460 }
461
cvm_crypto_init(struct cpt_vf * cptvf)462 int cvm_crypto_init(struct cpt_vf *cptvf)
463 {
464 struct pci_dev *pdev = cptvf->pdev;
465 u32 dev_count;
466
467 dev_count = dev_handle.dev_count;
468 dev_handle.cdev[dev_count] = cptvf;
469 dev_handle.dev_count++;
470
471 if (dev_count == 3) {
472 if (cav_register_algs()) {
473 dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
474 return -EINVAL;
475 }
476 }
477
478 return 0;
479 }
480
cvm_crypto_exit(void)481 void cvm_crypto_exit(void)
482 {
483 u32 dev_count;
484
485 dev_count = --dev_handle.dev_count;
486 if (!dev_count)
487 cav_unregister_algs();
488 }
489