1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78
79 struct qat_alg_buf {
80 uint32_t len;
81 uint32_t resrvd;
82 uint64_t addr;
83 } __packed;
84
85 struct qat_alg_buf_list {
86 uint64_t resrvd;
87 uint32_t num_bufs;
88 uint32_t num_mapped_bufs;
89 struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91
92 /* Common content descriptor */
93 struct qat_alg_cd {
94 union {
95 struct qat_enc { /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher;
97 struct icp_qat_hw_auth_algo_blk hash;
98 } qat_enc_cd;
99 struct qat_dec { /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash;
101 struct icp_qat_hw_cipher_algo_blk cipher;
102 } qat_dec_cd;
103 };
104 } __aligned(64);
105
106 struct qat_alg_aead_ctx {
107 struct qat_alg_cd *enc_cd;
108 struct qat_alg_cd *dec_cd;
109 dma_addr_t enc_cd_paddr;
110 dma_addr_t dec_cd_paddr;
111 struct icp_qat_fw_la_bulk_req enc_fw_req;
112 struct icp_qat_fw_la_bulk_req dec_fw_req;
113 struct crypto_shash *hash_tfm;
114 enum icp_qat_hw_auth_algo qat_hash_alg;
115 struct qat_crypto_instance *inst;
116 };
117
118 struct qat_alg_ablkcipher_ctx {
119 struct icp_qat_hw_cipher_algo_blk *enc_cd;
120 struct icp_qat_hw_cipher_algo_blk *dec_cd;
121 dma_addr_t enc_cd_paddr;
122 dma_addr_t dec_cd_paddr;
123 struct icp_qat_fw_la_bulk_req enc_fw_req;
124 struct icp_qat_fw_la_bulk_req dec_fw_req;
125 struct qat_crypto_instance *inst;
126 struct crypto_tfm *tfm;
127 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
128 };
129
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)130 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
131 {
132 switch (qat_hash_alg) {
133 case ICP_QAT_HW_AUTH_ALGO_SHA1:
134 return ICP_QAT_HW_SHA1_STATE1_SZ;
135 case ICP_QAT_HW_AUTH_ALGO_SHA256:
136 return ICP_QAT_HW_SHA256_STATE1_SZ;
137 case ICP_QAT_HW_AUTH_ALGO_SHA512:
138 return ICP_QAT_HW_SHA512_STATE1_SZ;
139 default:
140 return -EFAULT;
141 };
142 return -EFAULT;
143 }
144
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const uint8_t * auth_key,unsigned int auth_keylen)145 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
146 struct qat_alg_aead_ctx *ctx,
147 const uint8_t *auth_key,
148 unsigned int auth_keylen)
149 {
150 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151 struct sha1_state sha1;
152 struct sha256_state sha256;
153 struct sha512_state sha512;
154 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156 char ipad[block_size];
157 char opad[block_size];
158 __be32 *hash_state_out;
159 __be64 *hash512_state_out;
160 int i, offset;
161
162 memset(ipad, 0, block_size);
163 memset(opad, 0, block_size);
164 shash->tfm = ctx->hash_tfm;
165 shash->flags = 0x0;
166
167 if (auth_keylen > block_size) {
168 int ret = crypto_shash_digest(shash, auth_key,
169 auth_keylen, ipad);
170 if (ret)
171 return ret;
172
173 memcpy(opad, ipad, digest_size);
174 } else {
175 memcpy(ipad, auth_key, auth_keylen);
176 memcpy(opad, auth_key, auth_keylen);
177 }
178
179 for (i = 0; i < block_size; i++) {
180 char *ipad_ptr = ipad + i;
181 char *opad_ptr = opad + i;
182 *ipad_ptr ^= HMAC_IPAD_VALUE;
183 *opad_ptr ^= HMAC_OPAD_VALUE;
184 }
185
186 if (crypto_shash_init(shash))
187 return -EFAULT;
188
189 if (crypto_shash_update(shash, ipad, block_size))
190 return -EFAULT;
191
192 hash_state_out = (__be32 *)hash->sha.state1;
193 hash512_state_out = (__be64 *)hash_state_out;
194
195 switch (ctx->qat_hash_alg) {
196 case ICP_QAT_HW_AUTH_ALGO_SHA1:
197 if (crypto_shash_export(shash, &sha1))
198 return -EFAULT;
199 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200 *hash_state_out = cpu_to_be32(*(sha1.state + i));
201 break;
202 case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 if (crypto_shash_export(shash, &sha256))
204 return -EFAULT;
205 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206 *hash_state_out = cpu_to_be32(*(sha256.state + i));
207 break;
208 case ICP_QAT_HW_AUTH_ALGO_SHA512:
209 if (crypto_shash_export(shash, &sha512))
210 return -EFAULT;
211 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
213 break;
214 default:
215 return -EFAULT;
216 }
217
218 if (crypto_shash_init(shash))
219 return -EFAULT;
220
221 if (crypto_shash_update(shash, opad, block_size))
222 return -EFAULT;
223
224 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
226 hash512_state_out = (__be64 *)hash_state_out;
227
228 switch (ctx->qat_hash_alg) {
229 case ICP_QAT_HW_AUTH_ALGO_SHA1:
230 if (crypto_shash_export(shash, &sha1))
231 return -EFAULT;
232 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233 *hash_state_out = cpu_to_be32(*(sha1.state + i));
234 break;
235 case ICP_QAT_HW_AUTH_ALGO_SHA256:
236 if (crypto_shash_export(shash, &sha256))
237 return -EFAULT;
238 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239 *hash_state_out = cpu_to_be32(*(sha256.state + i));
240 break;
241 case ICP_QAT_HW_AUTH_ALGO_SHA512:
242 if (crypto_shash_export(shash, &sha512))
243 return -EFAULT;
244 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
246 break;
247 default:
248 return -EFAULT;
249 }
250 memzero_explicit(ipad, block_size);
251 memzero_explicit(opad, block_size);
252 return 0;
253 }
254
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header)255 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
256 {
257 header->hdr_flags =
258 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
259 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
260 header->comn_req_flags =
261 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
262 QAT_COMN_PTR_TYPE_SGL);
263 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
264 ICP_QAT_FW_LA_PARTIAL_NONE);
265 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
266 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
267 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
268 ICP_QAT_FW_LA_NO_PROTO);
269 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
270 ICP_QAT_FW_LA_NO_UPDATE_STATE);
271 }
272
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)273 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
274 int alg,
275 struct crypto_authenc_keys *keys,
276 int mode)
277 {
278 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
279 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
280 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
281 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
282 struct icp_qat_hw_auth_algo_blk *hash =
283 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
284 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
285 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
286 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
287 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
288 void *ptr = &req_tmpl->cd_ctrl;
289 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
290 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
291
292 /* CD setup */
293 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
294 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
295 hash->sha.inner_setup.auth_config.config =
296 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
297 ctx->qat_hash_alg, digestsize);
298 hash->sha.inner_setup.auth_counter.counter =
299 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
300
301 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
302 return -EFAULT;
303
304 /* Request setup */
305 qat_alg_init_common_hdr(header);
306 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
308 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
309 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
310 ICP_QAT_FW_LA_RET_AUTH_RES);
311 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
312 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
313 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
314 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
315
316 /* Cipher CD config setup */
317 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
318 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
319 cipher_cd_ctrl->cipher_cfg_offset = 0;
320 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
321 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
322 /* Auth CD config setup */
323 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
324 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
325 hash_cd_ctrl->inner_res_sz = digestsize;
326 hash_cd_ctrl->final_sz = digestsize;
327
328 switch (ctx->qat_hash_alg) {
329 case ICP_QAT_HW_AUTH_ALGO_SHA1:
330 hash_cd_ctrl->inner_state1_sz =
331 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
332 hash_cd_ctrl->inner_state2_sz =
333 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
334 break;
335 case ICP_QAT_HW_AUTH_ALGO_SHA256:
336 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
337 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
338 break;
339 case ICP_QAT_HW_AUTH_ALGO_SHA512:
340 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
341 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
342 break;
343 default:
344 break;
345 }
346 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
347 ((sizeof(struct icp_qat_hw_auth_setup) +
348 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
349 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
350 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
351 return 0;
352 }
353
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)354 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
355 int alg,
356 struct crypto_authenc_keys *keys,
357 int mode)
358 {
359 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
360 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
361 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
362 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
363 struct icp_qat_hw_cipher_algo_blk *cipher =
364 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
365 sizeof(struct icp_qat_hw_auth_setup) +
366 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
367 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
368 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370 void *ptr = &req_tmpl->cd_ctrl;
371 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373 struct icp_qat_fw_la_auth_req_params *auth_param =
374 (struct icp_qat_fw_la_auth_req_params *)
375 ((char *)&req_tmpl->serv_specif_rqpars +
376 sizeof(struct icp_qat_fw_la_cipher_req_params));
377
378 /* CD setup */
379 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
380 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
381 hash->sha.inner_setup.auth_config.config =
382 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
383 ctx->qat_hash_alg,
384 digestsize);
385 hash->sha.inner_setup.auth_counter.counter =
386 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
387
388 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
389 return -EFAULT;
390
391 /* Request setup */
392 qat_alg_init_common_hdr(header);
393 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
396 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_CMP_AUTH_RES);
400 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402
403 /* Cipher CD config setup */
404 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406 cipher_cd_ctrl->cipher_cfg_offset =
407 (sizeof(struct icp_qat_hw_auth_setup) +
408 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411
412 /* Auth CD config setup */
413 hash_cd_ctrl->hash_cfg_offset = 0;
414 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415 hash_cd_ctrl->inner_res_sz = digestsize;
416 hash_cd_ctrl->final_sz = digestsize;
417
418 switch (ctx->qat_hash_alg) {
419 case ICP_QAT_HW_AUTH_ALGO_SHA1:
420 hash_cd_ctrl->inner_state1_sz =
421 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422 hash_cd_ctrl->inner_state2_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424 break;
425 case ICP_QAT_HW_AUTH_ALGO_SHA256:
426 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428 break;
429 case ICP_QAT_HW_AUTH_ALGO_SHA512:
430 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
432 break;
433 default:
434 break;
435 }
436
437 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438 ((sizeof(struct icp_qat_hw_auth_setup) +
439 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440 auth_param->auth_res_sz = digestsize;
441 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
443 return 0;
444 }
445
qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const uint8_t * key,unsigned int keylen)446 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447 struct icp_qat_fw_la_bulk_req *req,
448 struct icp_qat_hw_cipher_algo_blk *cd,
449 const uint8_t *key, unsigned int keylen)
450 {
451 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
454
455 memcpy(cd->aes.key, key, keylen);
456 qat_alg_init_common_hdr(header);
457 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458 cd_pars->u.s.content_desc_params_sz =
459 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
460 /* Cipher CD config setup */
461 cd_ctrl->cipher_key_sz = keylen >> 3;
462 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
463 cd_ctrl->cipher_cfg_offset = 0;
464 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
465 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
466 }
467
qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen,int mode)468 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469 int alg, const uint8_t *key,
470 unsigned int keylen, int mode)
471 {
472 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
475
476 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
477 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
479 }
480
qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx * ctx,int alg,const uint8_t * key,unsigned int keylen,int mode)481 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482 int alg, const uint8_t *key,
483 unsigned int keylen, int mode)
484 {
485 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488
489 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
490 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491
492 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
493 dec_cd->aes.cipher_config.val =
494 QAT_AES_HW_CONFIG_DEC(alg, mode);
495 else
496 dec_cd->aes.cipher_config.val =
497 QAT_AES_HW_CONFIG_ENC(alg, mode);
498 }
499
qat_alg_validate_key(int key_len,int * alg,int mode)500 static int qat_alg_validate_key(int key_len, int *alg, int mode)
501 {
502 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
503 switch (key_len) {
504 case AES_KEYSIZE_128:
505 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
506 break;
507 case AES_KEYSIZE_192:
508 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
509 break;
510 case AES_KEYSIZE_256:
511 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
512 break;
513 default:
514 return -EINVAL;
515 }
516 } else {
517 switch (key_len) {
518 case AES_KEYSIZE_128 << 1:
519 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
520 break;
521 case AES_KEYSIZE_256 << 1:
522 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
523 break;
524 default:
525 return -EINVAL;
526 }
527 }
528 return 0;
529 }
530
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)531 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
532 unsigned int keylen, int mode)
533 {
534 struct crypto_authenc_keys keys;
535 int alg;
536
537 if (crypto_authenc_extractkeys(&keys, key, keylen))
538 goto bad_key;
539
540 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
541 goto bad_key;
542
543 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
544 goto error;
545
546 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
547 goto error;
548
549 return 0;
550 bad_key:
551 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
552 return -EINVAL;
553 error:
554 return -EFAULT;
555 }
556
qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx * ctx,const uint8_t * key,unsigned int keylen,int mode)557 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
558 const uint8_t *key,
559 unsigned int keylen,
560 int mode)
561 {
562 int alg;
563
564 if (qat_alg_validate_key(keylen, &alg, mode))
565 goto bad_key;
566
567 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
568 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
569 return 0;
570 bad_key:
571 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
572 return -EINVAL;
573 }
574
qat_alg_aead_setkey(struct crypto_aead * tfm,const uint8_t * key,unsigned int keylen)575 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
576 unsigned int keylen)
577 {
578 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
579 struct device *dev;
580
581 if (ctx->enc_cd) {
582 /* rekeying */
583 dev = &GET_DEV(ctx->inst->accel_dev);
584 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
585 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
586 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
587 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
588 } else {
589 /* new key */
590 int node = get_current_node();
591 struct qat_crypto_instance *inst =
592 qat_crypto_get_instance_node(node);
593 if (!inst) {
594 return -EINVAL;
595 }
596
597 dev = &GET_DEV(inst->accel_dev);
598 ctx->inst = inst;
599 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
600 &ctx->enc_cd_paddr,
601 GFP_ATOMIC);
602 if (!ctx->enc_cd) {
603 return -ENOMEM;
604 }
605 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
606 &ctx->dec_cd_paddr,
607 GFP_ATOMIC);
608 if (!ctx->dec_cd) {
609 goto out_free_enc;
610 }
611 }
612 if (qat_alg_aead_init_sessions(tfm, key, keylen,
613 ICP_QAT_HW_CIPHER_CBC_MODE))
614 goto out_free_all;
615
616 return 0;
617
618 out_free_all:
619 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
620 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
621 ctx->dec_cd, ctx->dec_cd_paddr);
622 ctx->dec_cd = NULL;
623 out_free_enc:
624 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
625 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
626 ctx->enc_cd, ctx->enc_cd_paddr);
627 ctx->enc_cd = NULL;
628 return -ENOMEM;
629 }
630
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)631 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
632 struct qat_crypto_request *qat_req)
633 {
634 struct device *dev = &GET_DEV(inst->accel_dev);
635 struct qat_alg_buf_list *bl = qat_req->buf.bl;
636 struct qat_alg_buf_list *blout = qat_req->buf.blout;
637 dma_addr_t blp = qat_req->buf.blp;
638 dma_addr_t blpout = qat_req->buf.bloutp;
639 size_t sz = qat_req->buf.sz;
640 size_t sz_out = qat_req->buf.sz_out;
641 int i;
642
643 for (i = 0; i < bl->num_bufs; i++)
644 dma_unmap_single(dev, bl->bufers[i].addr,
645 bl->bufers[i].len, DMA_BIDIRECTIONAL);
646
647 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
648 kfree(bl);
649 if (blp != blpout) {
650 /* If out of place operation dma unmap only data */
651 int bufless = blout->num_bufs - blout->num_mapped_bufs;
652
653 for (i = bufless; i < blout->num_bufs; i++) {
654 dma_unmap_single(dev, blout->bufers[i].addr,
655 blout->bufers[i].len,
656 DMA_BIDIRECTIONAL);
657 }
658 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
659 kfree(blout);
660 }
661 }
662
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req)663 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
664 struct scatterlist *sgl,
665 struct scatterlist *sglout,
666 struct qat_crypto_request *qat_req)
667 {
668 struct device *dev = &GET_DEV(inst->accel_dev);
669 int i, sg_nctr = 0;
670 int n = sg_nents(sgl);
671 struct qat_alg_buf_list *bufl;
672 struct qat_alg_buf_list *buflout = NULL;
673 dma_addr_t blp;
674 dma_addr_t bloutp = 0;
675 struct scatterlist *sg;
676 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
677 ((1 + n) * sizeof(struct qat_alg_buf));
678
679 if (unlikely(!n))
680 return -EINVAL;
681
682 bufl = kzalloc_node(sz, GFP_ATOMIC,
683 dev_to_node(&GET_DEV(inst->accel_dev)));
684 if (unlikely(!bufl))
685 return -ENOMEM;
686
687 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
688 if (unlikely(dma_mapping_error(dev, blp)))
689 goto err_in;
690
691 for_each_sg(sgl, sg, n, i) {
692 int y = sg_nctr;
693
694 if (!sg->length)
695 continue;
696
697 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
698 sg->length,
699 DMA_BIDIRECTIONAL);
700 bufl->bufers[y].len = sg->length;
701 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
702 goto err_in;
703 sg_nctr++;
704 }
705 bufl->num_bufs = sg_nctr;
706 qat_req->buf.bl = bufl;
707 qat_req->buf.blp = blp;
708 qat_req->buf.sz = sz;
709 /* Handle out of place operation */
710 if (sgl != sglout) {
711 struct qat_alg_buf *bufers;
712
713 n = sg_nents(sglout);
714 sz_out = sizeof(struct qat_alg_buf_list) +
715 ((1 + n) * sizeof(struct qat_alg_buf));
716 sg_nctr = 0;
717 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
718 dev_to_node(&GET_DEV(inst->accel_dev)));
719 if (unlikely(!buflout))
720 goto err_in;
721 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
722 if (unlikely(dma_mapping_error(dev, bloutp)))
723 goto err_out;
724 bufers = buflout->bufers;
725 for_each_sg(sglout, sg, n, i) {
726 int y = sg_nctr;
727
728 if (!sg->length)
729 continue;
730
731 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
732 sg->length,
733 DMA_BIDIRECTIONAL);
734 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
735 goto err_out;
736 bufers[y].len = sg->length;
737 sg_nctr++;
738 }
739 buflout->num_bufs = sg_nctr;
740 buflout->num_mapped_bufs = sg_nctr;
741 qat_req->buf.blout = buflout;
742 qat_req->buf.bloutp = bloutp;
743 qat_req->buf.sz_out = sz_out;
744 } else {
745 /* Otherwise set the src and dst to the same address */
746 qat_req->buf.bloutp = qat_req->buf.blp;
747 qat_req->buf.sz_out = 0;
748 }
749 return 0;
750
751 err_out:
752 n = sg_nents(sglout);
753 for (i = 0; i < n; i++)
754 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
755 dma_unmap_single(dev, buflout->bufers[i].addr,
756 buflout->bufers[i].len,
757 DMA_BIDIRECTIONAL);
758 if (!dma_mapping_error(dev, bloutp))
759 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
760 kfree(buflout);
761
762 err_in:
763 n = sg_nents(sgl);
764 for (i = 0; i < n; i++)
765 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
766 dma_unmap_single(dev, bufl->bufers[i].addr,
767 bufl->bufers[i].len,
768 DMA_BIDIRECTIONAL);
769
770 if (!dma_mapping_error(dev, blp))
771 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
772 kfree(bufl);
773
774 dev_err(dev, "Failed to map buf for dma\n");
775 return -ENOMEM;
776 }
777
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)778 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
779 struct qat_crypto_request *qat_req)
780 {
781 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
782 struct qat_crypto_instance *inst = ctx->inst;
783 struct aead_request *areq = qat_req->aead_req;
784 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
785 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
786
787 qat_alg_free_bufl(inst, qat_req);
788 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
789 res = -EBADMSG;
790 areq->base.complete(&areq->base, res);
791 }
792
qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)793 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
794 struct qat_crypto_request *qat_req)
795 {
796 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
797 struct qat_crypto_instance *inst = ctx->inst;
798 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
799 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
800 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
801
802 qat_alg_free_bufl(inst, qat_req);
803 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
804 res = -EINVAL;
805 areq->base.complete(&areq->base, res);
806 }
807
qat_alg_callback(void * resp)808 void qat_alg_callback(void *resp)
809 {
810 struct icp_qat_fw_la_resp *qat_resp = resp;
811 struct qat_crypto_request *qat_req =
812 (void *)(__force long)qat_resp->opaque_data;
813
814 qat_req->cb(qat_resp, qat_req);
815 }
816
qat_alg_aead_dec(struct aead_request * areq)817 static int qat_alg_aead_dec(struct aead_request *areq)
818 {
819 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
820 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
821 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
822 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
823 struct icp_qat_fw_la_cipher_req_params *cipher_param;
824 struct icp_qat_fw_la_auth_req_params *auth_param;
825 struct icp_qat_fw_la_bulk_req *msg;
826 int digst_size = crypto_aead_authsize(aead_tfm);
827 int ret, ctr = 0;
828
829 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
830 if (unlikely(ret))
831 return ret;
832
833 msg = &qat_req->req;
834 *msg = ctx->dec_fw_req;
835 qat_req->aead_ctx = ctx;
836 qat_req->aead_req = areq;
837 qat_req->cb = qat_aead_alg_callback;
838 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
839 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
840 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
841 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
842 cipher_param->cipher_length = areq->cryptlen - digst_size;
843 cipher_param->cipher_offset = areq->assoclen;
844 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
845 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
846 auth_param->auth_off = 0;
847 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
848 do {
849 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
850 } while (ret == -EAGAIN && ctr++ < 10);
851
852 if (ret == -EAGAIN) {
853 qat_alg_free_bufl(ctx->inst, qat_req);
854 return -EBUSY;
855 }
856 return -EINPROGRESS;
857 }
858
qat_alg_aead_enc(struct aead_request * areq)859 static int qat_alg_aead_enc(struct aead_request *areq)
860 {
861 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
862 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
863 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
864 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
865 struct icp_qat_fw_la_cipher_req_params *cipher_param;
866 struct icp_qat_fw_la_auth_req_params *auth_param;
867 struct icp_qat_fw_la_bulk_req *msg;
868 uint8_t *iv = areq->iv;
869 int ret, ctr = 0;
870
871 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
872 if (unlikely(ret))
873 return ret;
874
875 msg = &qat_req->req;
876 *msg = ctx->enc_fw_req;
877 qat_req->aead_ctx = ctx;
878 qat_req->aead_req = areq;
879 qat_req->cb = qat_aead_alg_callback;
880 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
881 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
882 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
883 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
884 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
885
886 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
887 cipher_param->cipher_length = areq->cryptlen;
888 cipher_param->cipher_offset = areq->assoclen;
889
890 auth_param->auth_off = 0;
891 auth_param->auth_len = areq->assoclen + areq->cryptlen;
892
893 do {
894 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
895 } while (ret == -EAGAIN && ctr++ < 10);
896
897 if (ret == -EAGAIN) {
898 qat_alg_free_bufl(ctx->inst, qat_req);
899 return -EBUSY;
900 }
901 return -EINPROGRESS;
902 }
903
qat_alg_ablkcipher_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen,int mode)904 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
905 const u8 *key, unsigned int keylen,
906 int mode)
907 {
908 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
909 struct device *dev;
910
911 spin_lock(&ctx->lock);
912 if (ctx->enc_cd) {
913 /* rekeying */
914 dev = &GET_DEV(ctx->inst->accel_dev);
915 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
916 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
917 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
918 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
919 } else {
920 /* new key */
921 int node = get_current_node();
922 struct qat_crypto_instance *inst =
923 qat_crypto_get_instance_node(node);
924 if (!inst) {
925 spin_unlock(&ctx->lock);
926 return -EINVAL;
927 }
928
929 dev = &GET_DEV(inst->accel_dev);
930 ctx->inst = inst;
931 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
932 &ctx->enc_cd_paddr,
933 GFP_ATOMIC);
934 if (!ctx->enc_cd) {
935 spin_unlock(&ctx->lock);
936 return -ENOMEM;
937 }
938 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
939 &ctx->dec_cd_paddr,
940 GFP_ATOMIC);
941 if (!ctx->dec_cd) {
942 spin_unlock(&ctx->lock);
943 goto out_free_enc;
944 }
945 }
946 spin_unlock(&ctx->lock);
947 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
948 goto out_free_all;
949
950 return 0;
951
952 out_free_all:
953 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
954 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
955 ctx->dec_cd, ctx->dec_cd_paddr);
956 ctx->dec_cd = NULL;
957 out_free_enc:
958 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
959 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
960 ctx->enc_cd, ctx->enc_cd_paddr);
961 ctx->enc_cd = NULL;
962 return -ENOMEM;
963 }
964
qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)965 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
966 const u8 *key, unsigned int keylen)
967 {
968 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
969 ICP_QAT_HW_CIPHER_CBC_MODE);
970 }
971
qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)972 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
973 const u8 *key, unsigned int keylen)
974 {
975 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
976 ICP_QAT_HW_CIPHER_CTR_MODE);
977 }
978
qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)979 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
980 const u8 *key, unsigned int keylen)
981 {
982 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
983 ICP_QAT_HW_CIPHER_XTS_MODE);
984 }
985
qat_alg_ablkcipher_encrypt(struct ablkcipher_request * req)986 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
987 {
988 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
989 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
990 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
991 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
992 struct icp_qat_fw_la_cipher_req_params *cipher_param;
993 struct icp_qat_fw_la_bulk_req *msg;
994 int ret, ctr = 0;
995
996 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
997 if (unlikely(ret))
998 return ret;
999
1000 msg = &qat_req->req;
1001 *msg = ctx->enc_fw_req;
1002 qat_req->ablkcipher_ctx = ctx;
1003 qat_req->ablkcipher_req = req;
1004 qat_req->cb = qat_ablkcipher_alg_callback;
1005 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1006 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1007 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1008 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1009 cipher_param->cipher_length = req->nbytes;
1010 cipher_param->cipher_offset = 0;
1011 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1012 do {
1013 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1014 } while (ret == -EAGAIN && ctr++ < 10);
1015
1016 if (ret == -EAGAIN) {
1017 qat_alg_free_bufl(ctx->inst, qat_req);
1018 return -EBUSY;
1019 }
1020 return -EINPROGRESS;
1021 }
1022
qat_alg_ablkcipher_decrypt(struct ablkcipher_request * req)1023 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1024 {
1025 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1026 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1027 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1028 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1029 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1030 struct icp_qat_fw_la_bulk_req *msg;
1031 int ret, ctr = 0;
1032
1033 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1034 if (unlikely(ret))
1035 return ret;
1036
1037 msg = &qat_req->req;
1038 *msg = ctx->dec_fw_req;
1039 qat_req->ablkcipher_ctx = ctx;
1040 qat_req->ablkcipher_req = req;
1041 qat_req->cb = qat_ablkcipher_alg_callback;
1042 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1043 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1044 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1045 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1046 cipher_param->cipher_length = req->nbytes;
1047 cipher_param->cipher_offset = 0;
1048 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1049 do {
1050 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1051 } while (ret == -EAGAIN && ctr++ < 10);
1052
1053 if (ret == -EAGAIN) {
1054 qat_alg_free_bufl(ctx->inst, qat_req);
1055 return -EBUSY;
1056 }
1057 return -EINPROGRESS;
1058 }
1059
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1060 static int qat_alg_aead_init(struct crypto_aead *tfm,
1061 enum icp_qat_hw_auth_algo hash,
1062 const char *hash_name)
1063 {
1064 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1065
1066 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1067 if (IS_ERR(ctx->hash_tfm))
1068 return PTR_ERR(ctx->hash_tfm);
1069 ctx->qat_hash_alg = hash;
1070 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1071 return 0;
1072 }
1073
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1074 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1075 {
1076 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1077 }
1078
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1079 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1080 {
1081 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1082 }
1083
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1084 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1085 {
1086 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1087 }
1088
qat_alg_aead_exit(struct crypto_aead * tfm)1089 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1090 {
1091 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1092 struct qat_crypto_instance *inst = ctx->inst;
1093 struct device *dev;
1094
1095 crypto_free_shash(ctx->hash_tfm);
1096
1097 if (!inst)
1098 return;
1099
1100 dev = &GET_DEV(inst->accel_dev);
1101 if (ctx->enc_cd) {
1102 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1103 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1104 ctx->enc_cd, ctx->enc_cd_paddr);
1105 }
1106 if (ctx->dec_cd) {
1107 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1108 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1109 ctx->dec_cd, ctx->dec_cd_paddr);
1110 }
1111 qat_crypto_put_instance(inst);
1112 }
1113
qat_alg_ablkcipher_init(struct crypto_tfm * tfm)1114 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1115 {
1116 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1117
1118 spin_lock_init(&ctx->lock);
1119 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1120 ctx->tfm = tfm;
1121 return 0;
1122 }
1123
qat_alg_ablkcipher_exit(struct crypto_tfm * tfm)1124 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1125 {
1126 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1127 struct qat_crypto_instance *inst = ctx->inst;
1128 struct device *dev;
1129
1130 if (!inst)
1131 return;
1132
1133 dev = &GET_DEV(inst->accel_dev);
1134 if (ctx->enc_cd) {
1135 memset(ctx->enc_cd, 0,
1136 sizeof(struct icp_qat_hw_cipher_algo_blk));
1137 dma_free_coherent(dev,
1138 sizeof(struct icp_qat_hw_cipher_algo_blk),
1139 ctx->enc_cd, ctx->enc_cd_paddr);
1140 }
1141 if (ctx->dec_cd) {
1142 memset(ctx->dec_cd, 0,
1143 sizeof(struct icp_qat_hw_cipher_algo_blk));
1144 dma_free_coherent(dev,
1145 sizeof(struct icp_qat_hw_cipher_algo_blk),
1146 ctx->dec_cd, ctx->dec_cd_paddr);
1147 }
1148 qat_crypto_put_instance(inst);
1149 }
1150
1151
1152 static struct aead_alg qat_aeads[] = { {
1153 .base = {
1154 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1155 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1156 .cra_priority = 4001,
1157 .cra_flags = CRYPTO_ALG_ASYNC,
1158 .cra_blocksize = AES_BLOCK_SIZE,
1159 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1160 .cra_module = THIS_MODULE,
1161 },
1162 .init = qat_alg_aead_sha1_init,
1163 .exit = qat_alg_aead_exit,
1164 .setkey = qat_alg_aead_setkey,
1165 .decrypt = qat_alg_aead_dec,
1166 .encrypt = qat_alg_aead_enc,
1167 .ivsize = AES_BLOCK_SIZE,
1168 .maxauthsize = SHA1_DIGEST_SIZE,
1169 }, {
1170 .base = {
1171 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1172 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1173 .cra_priority = 4001,
1174 .cra_flags = CRYPTO_ALG_ASYNC,
1175 .cra_blocksize = AES_BLOCK_SIZE,
1176 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1177 .cra_module = THIS_MODULE,
1178 },
1179 .init = qat_alg_aead_sha256_init,
1180 .exit = qat_alg_aead_exit,
1181 .setkey = qat_alg_aead_setkey,
1182 .decrypt = qat_alg_aead_dec,
1183 .encrypt = qat_alg_aead_enc,
1184 .ivsize = AES_BLOCK_SIZE,
1185 .maxauthsize = SHA256_DIGEST_SIZE,
1186 }, {
1187 .base = {
1188 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1189 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1190 .cra_priority = 4001,
1191 .cra_flags = CRYPTO_ALG_ASYNC,
1192 .cra_blocksize = AES_BLOCK_SIZE,
1193 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1194 .cra_module = THIS_MODULE,
1195 },
1196 .init = qat_alg_aead_sha512_init,
1197 .exit = qat_alg_aead_exit,
1198 .setkey = qat_alg_aead_setkey,
1199 .decrypt = qat_alg_aead_dec,
1200 .encrypt = qat_alg_aead_enc,
1201 .ivsize = AES_BLOCK_SIZE,
1202 .maxauthsize = SHA512_DIGEST_SIZE,
1203 } };
1204
1205 static struct crypto_alg qat_algs[] = { {
1206 .cra_name = "cbc(aes)",
1207 .cra_driver_name = "qat_aes_cbc",
1208 .cra_priority = 4001,
1209 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1210 .cra_blocksize = AES_BLOCK_SIZE,
1211 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1212 .cra_alignmask = 0,
1213 .cra_type = &crypto_ablkcipher_type,
1214 .cra_module = THIS_MODULE,
1215 .cra_init = qat_alg_ablkcipher_init,
1216 .cra_exit = qat_alg_ablkcipher_exit,
1217 .cra_u = {
1218 .ablkcipher = {
1219 .setkey = qat_alg_ablkcipher_cbc_setkey,
1220 .decrypt = qat_alg_ablkcipher_decrypt,
1221 .encrypt = qat_alg_ablkcipher_encrypt,
1222 .min_keysize = AES_MIN_KEY_SIZE,
1223 .max_keysize = AES_MAX_KEY_SIZE,
1224 .ivsize = AES_BLOCK_SIZE,
1225 },
1226 },
1227 }, {
1228 .cra_name = "ctr(aes)",
1229 .cra_driver_name = "qat_aes_ctr",
1230 .cra_priority = 4001,
1231 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1232 .cra_blocksize = AES_BLOCK_SIZE,
1233 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1234 .cra_alignmask = 0,
1235 .cra_type = &crypto_ablkcipher_type,
1236 .cra_module = THIS_MODULE,
1237 .cra_init = qat_alg_ablkcipher_init,
1238 .cra_exit = qat_alg_ablkcipher_exit,
1239 .cra_u = {
1240 .ablkcipher = {
1241 .setkey = qat_alg_ablkcipher_ctr_setkey,
1242 .decrypt = qat_alg_ablkcipher_decrypt,
1243 .encrypt = qat_alg_ablkcipher_encrypt,
1244 .min_keysize = AES_MIN_KEY_SIZE,
1245 .max_keysize = AES_MAX_KEY_SIZE,
1246 .ivsize = AES_BLOCK_SIZE,
1247 },
1248 },
1249 }, {
1250 .cra_name = "xts(aes)",
1251 .cra_driver_name = "qat_aes_xts",
1252 .cra_priority = 4001,
1253 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1254 .cra_blocksize = AES_BLOCK_SIZE,
1255 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1256 .cra_alignmask = 0,
1257 .cra_type = &crypto_ablkcipher_type,
1258 .cra_module = THIS_MODULE,
1259 .cra_init = qat_alg_ablkcipher_init,
1260 .cra_exit = qat_alg_ablkcipher_exit,
1261 .cra_u = {
1262 .ablkcipher = {
1263 .setkey = qat_alg_ablkcipher_xts_setkey,
1264 .decrypt = qat_alg_ablkcipher_decrypt,
1265 .encrypt = qat_alg_ablkcipher_encrypt,
1266 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1267 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1268 .ivsize = AES_BLOCK_SIZE,
1269 },
1270 },
1271 } };
1272
qat_algs_register(void)1273 int qat_algs_register(void)
1274 {
1275 int ret = 0, i;
1276
1277 mutex_lock(&algs_lock);
1278 if (++active_devs != 1)
1279 goto unlock;
1280
1281 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1282 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1283
1284 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1285 if (ret)
1286 goto unlock;
1287
1288 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1289 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1290
1291 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1292 if (ret)
1293 goto unreg_algs;
1294
1295 unlock:
1296 mutex_unlock(&algs_lock);
1297 return ret;
1298
1299 unreg_algs:
1300 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1301 goto unlock;
1302 }
1303
qat_algs_unregister(void)1304 void qat_algs_unregister(void)
1305 {
1306 mutex_lock(&algs_lock);
1307 if (--active_devs != 0)
1308 goto unlock;
1309
1310 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1311 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1312
1313 unlock:
1314 mutex_unlock(&algs_lock);
1315 }
1316