• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha.h>
10 #include <crypto/hash.h>
11 #include <crypto/hmac.h>
12 #include <crypto/algapi.h>
13 #include <crypto/authenc.h>
14 #include <crypto/xts.h>
15 #include <linux/dma-mapping.h>
16 #include "adf_accel_devices.h"
17 #include "adf_transport.h"
18 #include "adf_common_drv.h"
19 #include "qat_crypto.h"
20 #include "icp_qat_hw.h"
21 #include "icp_qat_fw.h"
22 #include "icp_qat_fw_la.h"
23 
24 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
25 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
26 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
27 				       ICP_QAT_HW_CIPHER_ENCRYPT)
28 
29 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
30 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
31 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
32 				       ICP_QAT_HW_CIPHER_DECRYPT)
33 
34 static DEFINE_MUTEX(algs_lock);
35 static unsigned int active_devs;
36 
37 struct qat_alg_buf {
38 	u32 len;
39 	u32 resrvd;
40 	u64 addr;
41 } __packed;
42 
43 struct qat_alg_buf_list {
44 	u64 resrvd;
45 	u32 num_bufs;
46 	u32 num_mapped_bufs;
47 	struct qat_alg_buf bufers[];
48 } __packed __aligned(64);
49 
50 /* Common content descriptor */
51 struct qat_alg_cd {
52 	union {
53 		struct qat_enc { /* Encrypt content desc */
54 			struct icp_qat_hw_cipher_algo_blk cipher;
55 			struct icp_qat_hw_auth_algo_blk hash;
56 		} qat_enc_cd;
57 		struct qat_dec { /* Decrypt content desc */
58 			struct icp_qat_hw_auth_algo_blk hash;
59 			struct icp_qat_hw_cipher_algo_blk cipher;
60 		} qat_dec_cd;
61 	};
62 } __aligned(64);
63 
64 struct qat_alg_aead_ctx {
65 	struct qat_alg_cd *enc_cd;
66 	struct qat_alg_cd *dec_cd;
67 	dma_addr_t enc_cd_paddr;
68 	dma_addr_t dec_cd_paddr;
69 	struct icp_qat_fw_la_bulk_req enc_fw_req;
70 	struct icp_qat_fw_la_bulk_req dec_fw_req;
71 	struct crypto_shash *hash_tfm;
72 	enum icp_qat_hw_auth_algo qat_hash_alg;
73 	struct qat_crypto_instance *inst;
74 	union {
75 		struct sha1_state sha1;
76 		struct sha256_state sha256;
77 		struct sha512_state sha512;
78 	};
79 	char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
80 	char opad[SHA512_BLOCK_SIZE];
81 };
82 
83 struct qat_alg_skcipher_ctx {
84 	struct icp_qat_hw_cipher_algo_blk *enc_cd;
85 	struct icp_qat_hw_cipher_algo_blk *dec_cd;
86 	dma_addr_t enc_cd_paddr;
87 	dma_addr_t dec_cd_paddr;
88 	struct icp_qat_fw_la_bulk_req enc_fw_req;
89 	struct icp_qat_fw_la_bulk_req dec_fw_req;
90 	struct qat_crypto_instance *inst;
91 	struct crypto_skcipher *ftfm;
92 	bool fallback;
93 };
94 
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)95 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
96 {
97 	switch (qat_hash_alg) {
98 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
99 		return ICP_QAT_HW_SHA1_STATE1_SZ;
100 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
101 		return ICP_QAT_HW_SHA256_STATE1_SZ;
102 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
103 		return ICP_QAT_HW_SHA512_STATE1_SZ;
104 	default:
105 		return -EFAULT;
106 	};
107 	return -EFAULT;
108 }
109 
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const u8 * auth_key,unsigned int auth_keylen)110 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
111 				  struct qat_alg_aead_ctx *ctx,
112 				  const u8 *auth_key,
113 				  unsigned int auth_keylen)
114 {
115 	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
116 	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
117 	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
118 	__be32 *hash_state_out;
119 	__be64 *hash512_state_out;
120 	int i, offset;
121 
122 	memset(ctx->ipad, 0, block_size);
123 	memset(ctx->opad, 0, block_size);
124 	shash->tfm = ctx->hash_tfm;
125 
126 	if (auth_keylen > block_size) {
127 		int ret = crypto_shash_digest(shash, auth_key,
128 					      auth_keylen, ctx->ipad);
129 		if (ret)
130 			return ret;
131 
132 		memcpy(ctx->opad, ctx->ipad, digest_size);
133 	} else {
134 		memcpy(ctx->ipad, auth_key, auth_keylen);
135 		memcpy(ctx->opad, auth_key, auth_keylen);
136 	}
137 
138 	for (i = 0; i < block_size; i++) {
139 		char *ipad_ptr = ctx->ipad + i;
140 		char *opad_ptr = ctx->opad + i;
141 		*ipad_ptr ^= HMAC_IPAD_VALUE;
142 		*opad_ptr ^= HMAC_OPAD_VALUE;
143 	}
144 
145 	if (crypto_shash_init(shash))
146 		return -EFAULT;
147 
148 	if (crypto_shash_update(shash, ctx->ipad, block_size))
149 		return -EFAULT;
150 
151 	hash_state_out = (__be32 *)hash->sha.state1;
152 	hash512_state_out = (__be64 *)hash_state_out;
153 
154 	switch (ctx->qat_hash_alg) {
155 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
156 		if (crypto_shash_export(shash, &ctx->sha1))
157 			return -EFAULT;
158 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
159 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
160 		break;
161 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
162 		if (crypto_shash_export(shash, &ctx->sha256))
163 			return -EFAULT;
164 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
165 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
166 		break;
167 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
168 		if (crypto_shash_export(shash, &ctx->sha512))
169 			return -EFAULT;
170 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
171 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
172 		break;
173 	default:
174 		return -EFAULT;
175 	}
176 
177 	if (crypto_shash_init(shash))
178 		return -EFAULT;
179 
180 	if (crypto_shash_update(shash, ctx->opad, block_size))
181 		return -EFAULT;
182 
183 	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
184 	if (offset < 0)
185 		return -EFAULT;
186 
187 	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
188 	hash512_state_out = (__be64 *)hash_state_out;
189 
190 	switch (ctx->qat_hash_alg) {
191 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
192 		if (crypto_shash_export(shash, &ctx->sha1))
193 			return -EFAULT;
194 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
195 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
196 		break;
197 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
198 		if (crypto_shash_export(shash, &ctx->sha256))
199 			return -EFAULT;
200 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
201 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
202 		break;
203 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
204 		if (crypto_shash_export(shash, &ctx->sha512))
205 			return -EFAULT;
206 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
207 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
208 		break;
209 	default:
210 		return -EFAULT;
211 	}
212 	memzero_explicit(ctx->ipad, block_size);
213 	memzero_explicit(ctx->opad, block_size);
214 	return 0;
215 }
216 
qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr * header)217 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
218 {
219 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
220 					   ICP_QAT_FW_CIPH_IV_64BIT_PTR);
221 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
222 				       ICP_QAT_FW_LA_UPDATE_STATE);
223 }
224 
qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr * header)225 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
226 {
227 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
228 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
229 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
230 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
231 }
232 
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header,int aead)233 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
234 				    int aead)
235 {
236 	header->hdr_flags =
237 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
238 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
239 	header->comn_req_flags =
240 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
241 					    QAT_COMN_PTR_TYPE_SGL);
242 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
243 				  ICP_QAT_FW_LA_PARTIAL_NONE);
244 	if (aead)
245 		qat_alg_init_hdr_no_iv_updt(header);
246 	else
247 		qat_alg_init_hdr_iv_updt(header);
248 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
249 				ICP_QAT_FW_LA_NO_PROTO);
250 }
251 
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)252 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
253 					 int alg,
254 					 struct crypto_authenc_keys *keys,
255 					 int mode)
256 {
257 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
258 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
259 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
260 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
261 	struct icp_qat_hw_auth_algo_blk *hash =
262 		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
263 		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
264 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
265 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
266 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
267 	void *ptr = &req_tmpl->cd_ctrl;
268 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
269 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
270 
271 	/* CD setup */
272 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
273 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
274 	hash->sha.inner_setup.auth_config.config =
275 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
276 					     ctx->qat_hash_alg, digestsize);
277 	hash->sha.inner_setup.auth_counter.counter =
278 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
279 
280 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
281 		return -EFAULT;
282 
283 	/* Request setup */
284 	qat_alg_init_common_hdr(header, 1);
285 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
286 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
287 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
288 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
289 				   ICP_QAT_FW_LA_RET_AUTH_RES);
290 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
291 				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
292 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
293 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
294 
295 	/* Cipher CD config setup */
296 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
297 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
298 	cipher_cd_ctrl->cipher_cfg_offset = 0;
299 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
300 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
301 	/* Auth CD config setup */
302 	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
303 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
304 	hash_cd_ctrl->inner_res_sz = digestsize;
305 	hash_cd_ctrl->final_sz = digestsize;
306 
307 	switch (ctx->qat_hash_alg) {
308 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
309 		hash_cd_ctrl->inner_state1_sz =
310 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
311 		hash_cd_ctrl->inner_state2_sz =
312 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
313 		break;
314 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
315 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
316 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
317 		break;
318 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
319 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
320 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
321 		break;
322 	default:
323 		break;
324 	}
325 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
326 			((sizeof(struct icp_qat_hw_auth_setup) +
327 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
328 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
329 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
330 	return 0;
331 }
332 
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)333 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
334 					 int alg,
335 					 struct crypto_authenc_keys *keys,
336 					 int mode)
337 {
338 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
339 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
340 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
341 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
342 	struct icp_qat_hw_cipher_algo_blk *cipher =
343 		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
344 		sizeof(struct icp_qat_hw_auth_setup) +
345 		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
346 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
347 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
348 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
349 	void *ptr = &req_tmpl->cd_ctrl;
350 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
351 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
352 	struct icp_qat_fw_la_auth_req_params *auth_param =
353 		(struct icp_qat_fw_la_auth_req_params *)
354 		((char *)&req_tmpl->serv_specif_rqpars +
355 		sizeof(struct icp_qat_fw_la_cipher_req_params));
356 
357 	/* CD setup */
358 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
359 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
360 	hash->sha.inner_setup.auth_config.config =
361 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
362 					     ctx->qat_hash_alg,
363 					     digestsize);
364 	hash->sha.inner_setup.auth_counter.counter =
365 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
366 
367 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
368 		return -EFAULT;
369 
370 	/* Request setup */
371 	qat_alg_init_common_hdr(header, 1);
372 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
373 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
374 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
375 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
376 				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
377 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
378 				   ICP_QAT_FW_LA_CMP_AUTH_RES);
379 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
380 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
381 
382 	/* Cipher CD config setup */
383 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
384 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
385 	cipher_cd_ctrl->cipher_cfg_offset =
386 		(sizeof(struct icp_qat_hw_auth_setup) +
387 		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
388 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
389 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
390 
391 	/* Auth CD config setup */
392 	hash_cd_ctrl->hash_cfg_offset = 0;
393 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
394 	hash_cd_ctrl->inner_res_sz = digestsize;
395 	hash_cd_ctrl->final_sz = digestsize;
396 
397 	switch (ctx->qat_hash_alg) {
398 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
399 		hash_cd_ctrl->inner_state1_sz =
400 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
401 		hash_cd_ctrl->inner_state2_sz =
402 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
403 		break;
404 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
405 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
406 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
407 		break;
408 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
409 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
410 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
411 		break;
412 	default:
413 		break;
414 	}
415 
416 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
417 			((sizeof(struct icp_qat_hw_auth_setup) +
418 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
419 	auth_param->auth_res_sz = digestsize;
420 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
421 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
422 	return 0;
423 }
424 
qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const u8 * key,unsigned int keylen)425 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
426 				      struct icp_qat_fw_la_bulk_req *req,
427 				      struct icp_qat_hw_cipher_algo_blk *cd,
428 				      const u8 *key, unsigned int keylen)
429 {
430 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
431 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
432 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
433 
434 	memcpy(cd->aes.key, key, keylen);
435 	qat_alg_init_common_hdr(header, 0);
436 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
437 	cd_pars->u.s.content_desc_params_sz =
438 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
439 	/* Cipher CD config setup */
440 	cd_ctrl->cipher_key_sz = keylen >> 3;
441 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
442 	cd_ctrl->cipher_cfg_offset = 0;
443 	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
444 	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
445 }
446 
qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)447 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
448 				      int alg, const u8 *key,
449 				      unsigned int keylen, int mode)
450 {
451 	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
452 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
453 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
454 
455 	qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
456 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
457 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
458 }
459 
qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)460 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
461 				      int alg, const u8 *key,
462 				      unsigned int keylen, int mode)
463 {
464 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
465 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
466 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
467 
468 	qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
469 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
470 
471 	if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
472 		dec_cd->aes.cipher_config.val =
473 					QAT_AES_HW_CONFIG_DEC(alg, mode);
474 	else
475 		dec_cd->aes.cipher_config.val =
476 					QAT_AES_HW_CONFIG_ENC(alg, mode);
477 }
478 
qat_alg_validate_key(int key_len,int * alg,int mode)479 static int qat_alg_validate_key(int key_len, int *alg, int mode)
480 {
481 	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
482 		switch (key_len) {
483 		case AES_KEYSIZE_128:
484 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
485 			break;
486 		case AES_KEYSIZE_192:
487 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
488 			break;
489 		case AES_KEYSIZE_256:
490 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
491 			break;
492 		default:
493 			return -EINVAL;
494 		}
495 	} else {
496 		switch (key_len) {
497 		case AES_KEYSIZE_128 << 1:
498 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
499 			break;
500 		case AES_KEYSIZE_256 << 1:
501 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
502 			break;
503 		default:
504 			return -EINVAL;
505 		}
506 	}
507 	return 0;
508 }
509 
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)510 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
511 				      unsigned int keylen,  int mode)
512 {
513 	struct crypto_authenc_keys keys;
514 	int alg;
515 
516 	if (crypto_authenc_extractkeys(&keys, key, keylen))
517 		goto bad_key;
518 
519 	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
520 		goto bad_key;
521 
522 	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
523 		goto error;
524 
525 	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
526 		goto error;
527 
528 	memzero_explicit(&keys, sizeof(keys));
529 	return 0;
530 bad_key:
531 	memzero_explicit(&keys, sizeof(keys));
532 	return -EINVAL;
533 error:
534 	memzero_explicit(&keys, sizeof(keys));
535 	return -EFAULT;
536 }
537 
qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)538 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
539 					  const u8 *key,
540 					  unsigned int keylen,
541 					  int mode)
542 {
543 	int alg;
544 
545 	if (qat_alg_validate_key(keylen, &alg, mode))
546 		return -EINVAL;
547 
548 	qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
549 	qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
550 	return 0;
551 }
552 
qat_alg_aead_rekey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)553 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
554 			      unsigned int keylen)
555 {
556 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
557 
558 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
559 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
560 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
561 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
562 
563 	return qat_alg_aead_init_sessions(tfm, key, keylen,
564 					  ICP_QAT_HW_CIPHER_CBC_MODE);
565 }
566 
qat_alg_aead_newkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)567 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
568 			       unsigned int keylen)
569 {
570 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
571 	struct qat_crypto_instance *inst = NULL;
572 	int node = get_current_node();
573 	struct device *dev;
574 	int ret;
575 
576 	inst = qat_crypto_get_instance_node(node);
577 	if (!inst)
578 		return -EINVAL;
579 	dev = &GET_DEV(inst->accel_dev);
580 	ctx->inst = inst;
581 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
582 					 &ctx->enc_cd_paddr,
583 					 GFP_ATOMIC);
584 	if (!ctx->enc_cd) {
585 		ret = -ENOMEM;
586 		goto out_free_inst;
587 	}
588 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
589 					 &ctx->dec_cd_paddr,
590 					 GFP_ATOMIC);
591 	if (!ctx->dec_cd) {
592 		ret = -ENOMEM;
593 		goto out_free_enc;
594 	}
595 
596 	ret = qat_alg_aead_init_sessions(tfm, key, keylen,
597 					 ICP_QAT_HW_CIPHER_CBC_MODE);
598 	if (ret)
599 		goto out_free_all;
600 
601 	return 0;
602 
603 out_free_all:
604 	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
605 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
606 			  ctx->dec_cd, ctx->dec_cd_paddr);
607 	ctx->dec_cd = NULL;
608 out_free_enc:
609 	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
610 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
611 			  ctx->enc_cd, ctx->enc_cd_paddr);
612 	ctx->enc_cd = NULL;
613 out_free_inst:
614 	ctx->inst = NULL;
615 	qat_crypto_put_instance(inst);
616 	return ret;
617 }
618 
qat_alg_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)619 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
620 			       unsigned int keylen)
621 {
622 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
623 
624 	if (ctx->enc_cd)
625 		return qat_alg_aead_rekey(tfm, key, keylen);
626 	else
627 		return qat_alg_aead_newkey(tfm, key, keylen);
628 }
629 
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)630 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631 			      struct qat_crypto_request *qat_req)
632 {
633 	struct device *dev = &GET_DEV(inst->accel_dev);
634 	struct qat_alg_buf_list *bl = qat_req->buf.bl;
635 	struct qat_alg_buf_list *blout = qat_req->buf.blout;
636 	dma_addr_t blp = qat_req->buf.blp;
637 	dma_addr_t blpout = qat_req->buf.bloutp;
638 	size_t sz = qat_req->buf.sz;
639 	size_t sz_out = qat_req->buf.sz_out;
640 	int i;
641 
642 	for (i = 0; i < bl->num_bufs; i++)
643 		dma_unmap_single(dev, bl->bufers[i].addr,
644 				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
645 
646 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
647 	kfree(bl);
648 	if (blp != blpout) {
649 		/* If out of place operation dma unmap only data */
650 		int bufless = blout->num_bufs - blout->num_mapped_bufs;
651 
652 		for (i = bufless; i < blout->num_bufs; i++) {
653 			dma_unmap_single(dev, blout->bufers[i].addr,
654 					 blout->bufers[i].len,
655 					 DMA_BIDIRECTIONAL);
656 		}
657 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
658 		kfree(blout);
659 	}
660 }
661 
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req)662 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
663 			       struct scatterlist *sgl,
664 			       struct scatterlist *sglout,
665 			       struct qat_crypto_request *qat_req)
666 {
667 	struct device *dev = &GET_DEV(inst->accel_dev);
668 	int i, sg_nctr = 0;
669 	int n = sg_nents(sgl);
670 	struct qat_alg_buf_list *bufl;
671 	struct qat_alg_buf_list *buflout = NULL;
672 	dma_addr_t blp;
673 	dma_addr_t bloutp;
674 	struct scatterlist *sg;
675 	size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
676 
677 	if (unlikely(!n))
678 		return -EINVAL;
679 
680 	bufl = kzalloc_node(sz, GFP_ATOMIC,
681 			    dev_to_node(&GET_DEV(inst->accel_dev)));
682 	if (unlikely(!bufl))
683 		return -ENOMEM;
684 
685 	for_each_sg(sgl, sg, n, i)
686 		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
687 
688 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
689 	if (unlikely(dma_mapping_error(dev, blp)))
690 		goto err_in;
691 
692 	for_each_sg(sgl, sg, n, i) {
693 		int y = sg_nctr;
694 
695 		if (!sg->length)
696 			continue;
697 
698 		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
699 						      sg->length,
700 						      DMA_BIDIRECTIONAL);
701 		bufl->bufers[y].len = sg->length;
702 		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
703 			goto err_in;
704 		sg_nctr++;
705 	}
706 	bufl->num_bufs = sg_nctr;
707 	qat_req->buf.bl = bufl;
708 	qat_req->buf.blp = blp;
709 	qat_req->buf.sz = sz;
710 	/* Handle out of place operation */
711 	if (sgl != sglout) {
712 		struct qat_alg_buf *bufers;
713 
714 		n = sg_nents(sglout);
715 		sz_out = struct_size(buflout, bufers, n + 1);
716 		sg_nctr = 0;
717 		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
718 				       dev_to_node(&GET_DEV(inst->accel_dev)));
719 		if (unlikely(!buflout))
720 			goto err_in;
721 
722 		bufers = buflout->bufers;
723 		for_each_sg(sglout, sg, n, i)
724 			bufers[i].addr = DMA_MAPPING_ERROR;
725 
726 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
727 		if (unlikely(dma_mapping_error(dev, bloutp)))
728 			goto err_out;
729 		for_each_sg(sglout, sg, n, i) {
730 			int y = sg_nctr;
731 
732 			if (!sg->length)
733 				continue;
734 
735 			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
736 							sg->length,
737 							DMA_BIDIRECTIONAL);
738 			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
739 				goto err_out;
740 			bufers[y].len = sg->length;
741 			sg_nctr++;
742 		}
743 		buflout->num_bufs = sg_nctr;
744 		buflout->num_mapped_bufs = sg_nctr;
745 		qat_req->buf.blout = buflout;
746 		qat_req->buf.bloutp = bloutp;
747 		qat_req->buf.sz_out = sz_out;
748 	} else {
749 		/* Otherwise set the src and dst to the same address */
750 		qat_req->buf.bloutp = qat_req->buf.blp;
751 		qat_req->buf.sz_out = 0;
752 	}
753 	return 0;
754 
755 err_out:
756 	n = sg_nents(sglout);
757 	for (i = 0; i < n; i++)
758 		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
759 			dma_unmap_single(dev, buflout->bufers[i].addr,
760 					 buflout->bufers[i].len,
761 					 DMA_BIDIRECTIONAL);
762 	if (!dma_mapping_error(dev, bloutp))
763 		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
764 	kfree(buflout);
765 
766 err_in:
767 	n = sg_nents(sgl);
768 	for (i = 0; i < n; i++)
769 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
770 			dma_unmap_single(dev, bufl->bufers[i].addr,
771 					 bufl->bufers[i].len,
772 					 DMA_BIDIRECTIONAL);
773 
774 	if (!dma_mapping_error(dev, blp))
775 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
776 	kfree(bufl);
777 
778 	dev_err(dev, "Failed to map buf for dma\n");
779 	return -ENOMEM;
780 }
781 
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)782 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
783 				  struct qat_crypto_request *qat_req)
784 {
785 	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
786 	struct qat_crypto_instance *inst = ctx->inst;
787 	struct aead_request *areq = qat_req->aead_req;
788 	u8 stat_filed = qat_resp->comn_resp.comn_status;
789 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
790 
791 	qat_alg_free_bufl(inst, qat_req);
792 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
793 		res = -EBADMSG;
794 	areq->base.complete(&areq->base, res);
795 }
796 
qat_skcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)797 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
798 				      struct qat_crypto_request *qat_req)
799 {
800 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
801 	struct qat_crypto_instance *inst = ctx->inst;
802 	struct skcipher_request *sreq = qat_req->skcipher_req;
803 	u8 stat_filed = qat_resp->comn_resp.comn_status;
804 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
805 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
806 
807 	qat_alg_free_bufl(inst, qat_req);
808 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
809 		res = -EINVAL;
810 
811 	memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
812 	dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
813 			  qat_req->iv_paddr);
814 
815 	sreq->base.complete(&sreq->base, res);
816 }
817 
qat_alg_callback(void * resp)818 void qat_alg_callback(void *resp)
819 {
820 	struct icp_qat_fw_la_resp *qat_resp = resp;
821 	struct qat_crypto_request *qat_req =
822 				(void *)(__force long)qat_resp->opaque_data;
823 
824 	qat_req->cb(qat_resp, qat_req);
825 }
826 
qat_alg_aead_dec(struct aead_request * areq)827 static int qat_alg_aead_dec(struct aead_request *areq)
828 {
829 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
830 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
831 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
832 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
833 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
834 	struct icp_qat_fw_la_auth_req_params *auth_param;
835 	struct icp_qat_fw_la_bulk_req *msg;
836 	int digst_size = crypto_aead_authsize(aead_tfm);
837 	int ret, ctr = 0;
838 	u32 cipher_len;
839 
840 	cipher_len = areq->cryptlen - digst_size;
841 	if (cipher_len % AES_BLOCK_SIZE != 0)
842 		return -EINVAL;
843 
844 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
845 	if (unlikely(ret))
846 		return ret;
847 
848 	msg = &qat_req->req;
849 	*msg = ctx->dec_fw_req;
850 	qat_req->aead_ctx = ctx;
851 	qat_req->aead_req = areq;
852 	qat_req->cb = qat_aead_alg_callback;
853 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
854 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
855 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
856 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
857 	cipher_param->cipher_length = cipher_len;
858 	cipher_param->cipher_offset = areq->assoclen;
859 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
860 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
861 	auth_param->auth_off = 0;
862 	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
863 	do {
864 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
865 	} while (ret == -EAGAIN && ctr++ < 10);
866 
867 	if (ret == -EAGAIN) {
868 		qat_alg_free_bufl(ctx->inst, qat_req);
869 		return -EBUSY;
870 	}
871 	return -EINPROGRESS;
872 }
873 
qat_alg_aead_enc(struct aead_request * areq)874 static int qat_alg_aead_enc(struct aead_request *areq)
875 {
876 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
877 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
878 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
879 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
880 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
881 	struct icp_qat_fw_la_auth_req_params *auth_param;
882 	struct icp_qat_fw_la_bulk_req *msg;
883 	u8 *iv = areq->iv;
884 	int ret, ctr = 0;
885 
886 	if (areq->cryptlen % AES_BLOCK_SIZE != 0)
887 		return -EINVAL;
888 
889 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
890 	if (unlikely(ret))
891 		return ret;
892 
893 	msg = &qat_req->req;
894 	*msg = ctx->enc_fw_req;
895 	qat_req->aead_ctx = ctx;
896 	qat_req->aead_req = areq;
897 	qat_req->cb = qat_aead_alg_callback;
898 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
899 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
900 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
901 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
902 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
903 
904 	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
905 	cipher_param->cipher_length = areq->cryptlen;
906 	cipher_param->cipher_offset = areq->assoclen;
907 
908 	auth_param->auth_off = 0;
909 	auth_param->auth_len = areq->assoclen + areq->cryptlen;
910 
911 	do {
912 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
913 	} while (ret == -EAGAIN && ctr++ < 10);
914 
915 	if (ret == -EAGAIN) {
916 		qat_alg_free_bufl(ctx->inst, qat_req);
917 		return -EBUSY;
918 	}
919 	return -EINPROGRESS;
920 }
921 
qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)922 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
923 				  const u8 *key, unsigned int keylen,
924 				  int mode)
925 {
926 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
927 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
928 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
929 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
930 
931 	return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
932 }
933 
qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)934 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
935 				   const u8 *key, unsigned int keylen,
936 				   int mode)
937 {
938 	struct qat_crypto_instance *inst = NULL;
939 	struct device *dev;
940 	int node = get_current_node();
941 	int ret;
942 
943 	inst = qat_crypto_get_instance_node(node);
944 	if (!inst)
945 		return -EINVAL;
946 	dev = &GET_DEV(inst->accel_dev);
947 	ctx->inst = inst;
948 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
949 					 &ctx->enc_cd_paddr,
950 					 GFP_ATOMIC);
951 	if (!ctx->enc_cd) {
952 		ret = -ENOMEM;
953 		goto out_free_instance;
954 	}
955 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
956 					 &ctx->dec_cd_paddr,
957 					 GFP_ATOMIC);
958 	if (!ctx->dec_cd) {
959 		ret = -ENOMEM;
960 		goto out_free_enc;
961 	}
962 
963 	ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
964 	if (ret)
965 		goto out_free_all;
966 
967 	return 0;
968 
969 out_free_all:
970 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
971 	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
972 			  ctx->dec_cd, ctx->dec_cd_paddr);
973 	ctx->dec_cd = NULL;
974 out_free_enc:
975 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
976 	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
977 			  ctx->enc_cd, ctx->enc_cd_paddr);
978 	ctx->enc_cd = NULL;
979 out_free_instance:
980 	ctx->inst = NULL;
981 	qat_crypto_put_instance(inst);
982 	return ret;
983 }
984 
qat_alg_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,int mode)985 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
986 				   const u8 *key, unsigned int keylen,
987 				   int mode)
988 {
989 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
990 
991 	if (ctx->enc_cd)
992 		return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
993 	else
994 		return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
995 }
996 
qat_alg_skcipher_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)997 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
998 				       const u8 *key, unsigned int keylen)
999 {
1000 	return qat_alg_skcipher_setkey(tfm, key, keylen,
1001 				       ICP_QAT_HW_CIPHER_CBC_MODE);
1002 }
1003 
qat_alg_skcipher_ctr_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1004 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1005 				       const u8 *key, unsigned int keylen)
1006 {
1007 	return qat_alg_skcipher_setkey(tfm, key, keylen,
1008 				       ICP_QAT_HW_CIPHER_CTR_MODE);
1009 }
1010 
qat_alg_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1011 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1012 				       const u8 *key, unsigned int keylen)
1013 {
1014 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1015 	int ret;
1016 
1017 	ret = xts_verify_key(tfm, key, keylen);
1018 	if (ret)
1019 		return ret;
1020 
1021 	if (keylen >> 1 == AES_KEYSIZE_192) {
1022 		ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1023 		if (ret)
1024 			return ret;
1025 
1026 		ctx->fallback = true;
1027 
1028 		return 0;
1029 	}
1030 
1031 	ctx->fallback = false;
1032 
1033 	return qat_alg_skcipher_setkey(tfm, key, keylen,
1034 				       ICP_QAT_HW_CIPHER_XTS_MODE);
1035 }
1036 
qat_alg_skcipher_encrypt(struct skcipher_request * req)1037 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1038 {
1039 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1040 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1041 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1042 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1043 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1044 	struct icp_qat_fw_la_bulk_req *msg;
1045 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1046 	int ret, ctr = 0;
1047 
1048 	if (req->cryptlen == 0)
1049 		return 0;
1050 
1051 	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1052 					 &qat_req->iv_paddr, GFP_ATOMIC);
1053 	if (!qat_req->iv)
1054 		return -ENOMEM;
1055 
1056 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1057 	if (unlikely(ret)) {
1058 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1059 				  qat_req->iv_paddr);
1060 		return ret;
1061 	}
1062 
1063 	msg = &qat_req->req;
1064 	*msg = ctx->enc_fw_req;
1065 	qat_req->skcipher_ctx = ctx;
1066 	qat_req->skcipher_req = req;
1067 	qat_req->cb = qat_skcipher_alg_callback;
1068 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1069 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1070 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1071 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1072 	cipher_param->cipher_length = req->cryptlen;
1073 	cipher_param->cipher_offset = 0;
1074 	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1075 	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1076 	do {
1077 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1078 	} while (ret == -EAGAIN && ctr++ < 10);
1079 
1080 	if (ret == -EAGAIN) {
1081 		qat_alg_free_bufl(ctx->inst, qat_req);
1082 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1083 				  qat_req->iv_paddr);
1084 		return -EBUSY;
1085 	}
1086 	return -EINPROGRESS;
1087 }
1088 
qat_alg_skcipher_blk_encrypt(struct skcipher_request * req)1089 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1090 {
1091 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1092 		return -EINVAL;
1093 
1094 	return qat_alg_skcipher_encrypt(req);
1095 }
1096 
qat_alg_skcipher_xts_encrypt(struct skcipher_request * req)1097 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1098 {
1099 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1100 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1101 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1102 
1103 	if (req->cryptlen < XTS_BLOCK_SIZE)
1104 		return -EINVAL;
1105 
1106 	if (ctx->fallback) {
1107 		memcpy(nreq, req, sizeof(*req));
1108 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1109 		return crypto_skcipher_encrypt(nreq);
1110 	}
1111 
1112 	return qat_alg_skcipher_encrypt(req);
1113 }
1114 
qat_alg_skcipher_decrypt(struct skcipher_request * req)1115 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1116 {
1117 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1118 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1119 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1120 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1121 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1122 	struct icp_qat_fw_la_bulk_req *msg;
1123 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1124 	int ret, ctr = 0;
1125 
1126 	if (req->cryptlen == 0)
1127 		return 0;
1128 
1129 	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1130 					 &qat_req->iv_paddr, GFP_ATOMIC);
1131 	if (!qat_req->iv)
1132 		return -ENOMEM;
1133 
1134 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1135 	if (unlikely(ret)) {
1136 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1137 				  qat_req->iv_paddr);
1138 		return ret;
1139 	}
1140 
1141 	msg = &qat_req->req;
1142 	*msg = ctx->dec_fw_req;
1143 	qat_req->skcipher_ctx = ctx;
1144 	qat_req->skcipher_req = req;
1145 	qat_req->cb = qat_skcipher_alg_callback;
1146 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1147 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1148 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1149 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1150 	cipher_param->cipher_length = req->cryptlen;
1151 	cipher_param->cipher_offset = 0;
1152 	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1153 	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1154 	do {
1155 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1156 	} while (ret == -EAGAIN && ctr++ < 10);
1157 
1158 	if (ret == -EAGAIN) {
1159 		qat_alg_free_bufl(ctx->inst, qat_req);
1160 		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1161 				  qat_req->iv_paddr);
1162 		return -EBUSY;
1163 	}
1164 	return -EINPROGRESS;
1165 }
1166 
qat_alg_skcipher_blk_decrypt(struct skcipher_request * req)1167 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1168 {
1169 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1170 		return -EINVAL;
1171 
1172 	return qat_alg_skcipher_decrypt(req);
1173 }
1174 
qat_alg_skcipher_xts_decrypt(struct skcipher_request * req)1175 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1176 {
1177 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1178 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1179 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1180 
1181 	if (req->cryptlen < XTS_BLOCK_SIZE)
1182 		return -EINVAL;
1183 
1184 	if (ctx->fallback) {
1185 		memcpy(nreq, req, sizeof(*req));
1186 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1187 		return crypto_skcipher_decrypt(nreq);
1188 	}
1189 
1190 	return qat_alg_skcipher_decrypt(req);
1191 }
1192 
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1193 static int qat_alg_aead_init(struct crypto_aead *tfm,
1194 			     enum icp_qat_hw_auth_algo hash,
1195 			     const char *hash_name)
1196 {
1197 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1198 
1199 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1200 	if (IS_ERR(ctx->hash_tfm))
1201 		return PTR_ERR(ctx->hash_tfm);
1202 	ctx->qat_hash_alg = hash;
1203 	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1204 	return 0;
1205 }
1206 
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1207 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1208 {
1209 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1210 }
1211 
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1212 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1213 {
1214 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1215 }
1216 
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1217 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1218 {
1219 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1220 }
1221 
qat_alg_aead_exit(struct crypto_aead * tfm)1222 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1223 {
1224 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1225 	struct qat_crypto_instance *inst = ctx->inst;
1226 	struct device *dev;
1227 
1228 	crypto_free_shash(ctx->hash_tfm);
1229 
1230 	if (!inst)
1231 		return;
1232 
1233 	dev = &GET_DEV(inst->accel_dev);
1234 	if (ctx->enc_cd) {
1235 		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1236 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1237 				  ctx->enc_cd, ctx->enc_cd_paddr);
1238 	}
1239 	if (ctx->dec_cd) {
1240 		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1241 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1242 				  ctx->dec_cd, ctx->dec_cd_paddr);
1243 	}
1244 	qat_crypto_put_instance(inst);
1245 }
1246 
qat_alg_skcipher_init_tfm(struct crypto_skcipher * tfm)1247 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1248 {
1249 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1250 	return 0;
1251 }
1252 
qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher * tfm)1253 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1254 {
1255 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1256 	int reqsize;
1257 
1258 	ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1259 					  CRYPTO_ALG_NEED_FALLBACK);
1260 	if (IS_ERR(ctx->ftfm))
1261 		return PTR_ERR(ctx->ftfm);
1262 
1263 	reqsize = max(sizeof(struct qat_crypto_request),
1264 		      sizeof(struct skcipher_request) +
1265 		      crypto_skcipher_reqsize(ctx->ftfm));
1266 	crypto_skcipher_set_reqsize(tfm, reqsize);
1267 
1268 	return 0;
1269 }
1270 
qat_alg_skcipher_exit_tfm(struct crypto_skcipher * tfm)1271 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1272 {
1273 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1274 	struct qat_crypto_instance *inst = ctx->inst;
1275 	struct device *dev;
1276 
1277 	if (!inst)
1278 		return;
1279 
1280 	dev = &GET_DEV(inst->accel_dev);
1281 	if (ctx->enc_cd) {
1282 		memset(ctx->enc_cd, 0,
1283 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1284 		dma_free_coherent(dev,
1285 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1286 				  ctx->enc_cd, ctx->enc_cd_paddr);
1287 	}
1288 	if (ctx->dec_cd) {
1289 		memset(ctx->dec_cd, 0,
1290 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1291 		dma_free_coherent(dev,
1292 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1293 				  ctx->dec_cd, ctx->dec_cd_paddr);
1294 	}
1295 	qat_crypto_put_instance(inst);
1296 }
1297 
qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher * tfm)1298 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1299 {
1300 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1301 
1302 	if (ctx->ftfm)
1303 		crypto_free_skcipher(ctx->ftfm);
1304 
1305 	qat_alg_skcipher_exit_tfm(tfm);
1306 }
1307 
1308 static struct aead_alg qat_aeads[] = { {
1309 	.base = {
1310 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1311 		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
1312 		.cra_priority = 4001,
1313 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1314 		.cra_blocksize = AES_BLOCK_SIZE,
1315 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1316 		.cra_module = THIS_MODULE,
1317 	},
1318 	.init = qat_alg_aead_sha1_init,
1319 	.exit = qat_alg_aead_exit,
1320 	.setkey = qat_alg_aead_setkey,
1321 	.decrypt = qat_alg_aead_dec,
1322 	.encrypt = qat_alg_aead_enc,
1323 	.ivsize = AES_BLOCK_SIZE,
1324 	.maxauthsize = SHA1_DIGEST_SIZE,
1325 }, {
1326 	.base = {
1327 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1328 		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
1329 		.cra_priority = 4001,
1330 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1331 		.cra_blocksize = AES_BLOCK_SIZE,
1332 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1333 		.cra_module = THIS_MODULE,
1334 	},
1335 	.init = qat_alg_aead_sha256_init,
1336 	.exit = qat_alg_aead_exit,
1337 	.setkey = qat_alg_aead_setkey,
1338 	.decrypt = qat_alg_aead_dec,
1339 	.encrypt = qat_alg_aead_enc,
1340 	.ivsize = AES_BLOCK_SIZE,
1341 	.maxauthsize = SHA256_DIGEST_SIZE,
1342 }, {
1343 	.base = {
1344 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1345 		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
1346 		.cra_priority = 4001,
1347 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1348 		.cra_blocksize = AES_BLOCK_SIZE,
1349 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1350 		.cra_module = THIS_MODULE,
1351 	},
1352 	.init = qat_alg_aead_sha512_init,
1353 	.exit = qat_alg_aead_exit,
1354 	.setkey = qat_alg_aead_setkey,
1355 	.decrypt = qat_alg_aead_dec,
1356 	.encrypt = qat_alg_aead_enc,
1357 	.ivsize = AES_BLOCK_SIZE,
1358 	.maxauthsize = SHA512_DIGEST_SIZE,
1359 } };
1360 
1361 static struct skcipher_alg qat_skciphers[] = { {
1362 	.base.cra_name = "cbc(aes)",
1363 	.base.cra_driver_name = "qat_aes_cbc",
1364 	.base.cra_priority = 4001,
1365 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1366 	.base.cra_blocksize = AES_BLOCK_SIZE,
1367 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1368 	.base.cra_alignmask = 0,
1369 	.base.cra_module = THIS_MODULE,
1370 
1371 	.init = qat_alg_skcipher_init_tfm,
1372 	.exit = qat_alg_skcipher_exit_tfm,
1373 	.setkey = qat_alg_skcipher_cbc_setkey,
1374 	.decrypt = qat_alg_skcipher_blk_decrypt,
1375 	.encrypt = qat_alg_skcipher_blk_encrypt,
1376 	.min_keysize = AES_MIN_KEY_SIZE,
1377 	.max_keysize = AES_MAX_KEY_SIZE,
1378 	.ivsize = AES_BLOCK_SIZE,
1379 }, {
1380 	.base.cra_name = "ctr(aes)",
1381 	.base.cra_driver_name = "qat_aes_ctr",
1382 	.base.cra_priority = 4001,
1383 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1384 	.base.cra_blocksize = 1,
1385 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1386 	.base.cra_alignmask = 0,
1387 	.base.cra_module = THIS_MODULE,
1388 
1389 	.init = qat_alg_skcipher_init_tfm,
1390 	.exit = qat_alg_skcipher_exit_tfm,
1391 	.setkey = qat_alg_skcipher_ctr_setkey,
1392 	.decrypt = qat_alg_skcipher_decrypt,
1393 	.encrypt = qat_alg_skcipher_encrypt,
1394 	.min_keysize = AES_MIN_KEY_SIZE,
1395 	.max_keysize = AES_MAX_KEY_SIZE,
1396 	.ivsize = AES_BLOCK_SIZE,
1397 }, {
1398 	.base.cra_name = "xts(aes)",
1399 	.base.cra_driver_name = "qat_aes_xts",
1400 	.base.cra_priority = 4001,
1401 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1402 			  CRYPTO_ALG_ALLOCATES_MEMORY,
1403 	.base.cra_blocksize = AES_BLOCK_SIZE,
1404 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1405 	.base.cra_alignmask = 0,
1406 	.base.cra_module = THIS_MODULE,
1407 
1408 	.init = qat_alg_skcipher_init_xts_tfm,
1409 	.exit = qat_alg_skcipher_exit_xts_tfm,
1410 	.setkey = qat_alg_skcipher_xts_setkey,
1411 	.decrypt = qat_alg_skcipher_xts_decrypt,
1412 	.encrypt = qat_alg_skcipher_xts_encrypt,
1413 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1414 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1415 	.ivsize = AES_BLOCK_SIZE,
1416 } };
1417 
qat_algs_register(void)1418 int qat_algs_register(void)
1419 {
1420 	int ret = 0;
1421 
1422 	mutex_lock(&algs_lock);
1423 	if (++active_devs != 1)
1424 		goto unlock;
1425 
1426 	ret = crypto_register_skciphers(qat_skciphers,
1427 					ARRAY_SIZE(qat_skciphers));
1428 	if (ret)
1429 		goto unlock;
1430 
1431 	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1432 	if (ret)
1433 		goto unreg_algs;
1434 
1435 unlock:
1436 	mutex_unlock(&algs_lock);
1437 	return ret;
1438 
1439 unreg_algs:
1440 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1441 	goto unlock;
1442 }
1443 
qat_algs_unregister(void)1444 void qat_algs_unregister(void)
1445 {
1446 	mutex_lock(&algs_lock);
1447 	if (--active_devs != 0)
1448 		goto unlock;
1449 
1450 	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1451 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1452 
1453 unlock:
1454 	mutex_unlock(&algs_lock);
1455 }
1456