1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * K3 SA2UL crypto accelerator driver
4 *
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Keerthy
8 * Vitaly Andrianov
9 * Tero Kristo
10 */
11 #include <linux/clk.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dmapool.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18
19 #include <crypto/aes.h>
20 #include <crypto/authenc.h>
21 #include <crypto/des.h>
22 #include <crypto/internal/aead.h>
23 #include <crypto/internal/hash.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/sha.h>
27
28 #include "sa2ul.h"
29
30 /* Byte offset for key in encryption security context */
31 #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
32 /* Byte offset for Aux-1 in encryption security context */
33 #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
34
35 #define SA_CMDL_UPD_ENC 0x0001
36 #define SA_CMDL_UPD_AUTH 0x0002
37 #define SA_CMDL_UPD_ENC_IV 0x0004
38 #define SA_CMDL_UPD_AUTH_IV 0x0008
39 #define SA_CMDL_UPD_AUX_KEY 0x0010
40
41 #define SA_AUTH_SUBKEY_LEN 16
42 #define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
43 #define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
44
45 #define MODE_CONTROL_BYTES 27
46 #define SA_HASH_PROCESSING 0
47 #define SA_CRYPTO_PROCESSING 0
48 #define SA_UPLOAD_HASH_TO_TLR BIT(6)
49
50 #define SA_SW0_FLAGS_MASK 0xF0000
51 #define SA_SW0_CMDL_INFO_MASK 0x1F00000
52 #define SA_SW0_CMDL_PRESENT BIT(4)
53 #define SA_SW0_ENG_ID_MASK 0x3E000000
54 #define SA_SW0_DEST_INFO_PRESENT BIT(30)
55 #define SA_SW2_EGRESS_LENGTH 0xFF000000
56 #define SA_BASIC_HASH 0x10
57
58 #define SHA256_DIGEST_WORDS 8
59 /* Make 32-bit word from 4 bytes */
60 #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
61 ((b2) << 8) | (b3))
62
63 /* size of SCCTL structure in bytes */
64 #define SA_SCCTL_SZ 16
65
66 /* Max Authentication tag size */
67 #define SA_MAX_AUTH_TAG_SZ 64
68
69 #define PRIV_ID 0x1
70 #define PRIV 0x1
71
72 static struct device *sa_k3_dev;
73
74 /**
75 * struct sa_cmdl_cfg - Command label configuration descriptor
76 * @aalg: authentication algorithm ID
77 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
78 * @auth_eng_id: Authentication Engine ID
79 * @iv_size: Initialization Vector size
80 * @akey: Authentication key
81 * @akey_len: Authentication key length
82 * @enc: True, if this is an encode request
83 */
84 struct sa_cmdl_cfg {
85 int aalg;
86 u8 enc_eng_id;
87 u8 auth_eng_id;
88 u8 iv_size;
89 const u8 *akey;
90 u16 akey_len;
91 bool enc;
92 };
93
94 /**
95 * struct algo_data - Crypto algorithm specific data
96 * @enc_eng: Encryption engine info structure
97 * @auth_eng: Authentication engine info structure
98 * @auth_ctrl: Authentication control word
99 * @hash_size: Size of digest
100 * @iv_idx: iv index in psdata
101 * @iv_out_size: iv out size
102 * @ealg_id: Encryption Algorithm ID
103 * @aalg_id: Authentication algorithm ID
104 * @mci_enc: Mode Control Instruction for Encryption algorithm
105 * @mci_dec: Mode Control Instruction for Decryption
106 * @inv_key: Whether the encryption algorithm demands key inversion
107 * @ctx: Pointer to the algorithm context
108 * @keyed_mac: Whether the authentication algorithm has key
109 * @prep_iopad: Function pointer to generate intermediate ipad/opad
110 */
111 struct algo_data {
112 struct sa_eng_info enc_eng;
113 struct sa_eng_info auth_eng;
114 u8 auth_ctrl;
115 u8 hash_size;
116 u8 iv_idx;
117 u8 iv_out_size;
118 u8 ealg_id;
119 u8 aalg_id;
120 u8 *mci_enc;
121 u8 *mci_dec;
122 bool inv_key;
123 struct sa_tfm_ctx *ctx;
124 bool keyed_mac;
125 void (*prep_iopad)(struct algo_data *algo, const u8 *key,
126 u16 key_sz, __be32 *ipad, __be32 *opad);
127 };
128
129 /**
130 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
131 * @type: Type of the crypto algorithm.
132 * @alg: Union of crypto algorithm definitions.
133 * @registered: Flag indicating if the crypto algorithm is already registered
134 */
135 struct sa_alg_tmpl {
136 u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
137 union {
138 struct skcipher_alg skcipher;
139 struct ahash_alg ahash;
140 struct aead_alg aead;
141 } alg;
142 bool registered;
143 };
144
145 /**
146 * struct sa_mapped_sg: scatterlist information for tx and rx
147 * @mapped: Set to true if the @sgt is mapped
148 * @dir: mapping direction used for @sgt
149 * @split_sg: Set if the sg is split and needs to be freed up
150 * @static_sg: Static scatterlist entry for overriding data
151 * @sgt: scatterlist table for DMA API use
152 */
153 struct sa_mapped_sg {
154 bool mapped;
155 enum dma_data_direction dir;
156 struct scatterlist static_sg;
157 struct scatterlist *split_sg;
158 struct sg_table sgt;
159 };
160 /**
161 * struct sa_rx_data: RX Packet miscellaneous data place holder
162 * @req: crypto request data pointer
163 * @ddev: pointer to the DMA device
164 * @tx_in: dma_async_tx_descriptor pointer for rx channel
165 * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
166 * @enc: Flag indicating either encryption or decryption
167 * @enc_iv_size: Initialisation vector size
168 * @iv_idx: Initialisation vector index
169 */
170 struct sa_rx_data {
171 void *req;
172 struct device *ddev;
173 struct dma_async_tx_descriptor *tx_in;
174 struct sa_mapped_sg mapped_sg[2];
175 u8 enc;
176 u8 enc_iv_size;
177 u8 iv_idx;
178 };
179
180 /**
181 * struct sa_req: SA request definition
182 * @dev: device for the request
183 * @size: total data to the xmitted via DMA
184 * @enc_offset: offset of cipher data
185 * @enc_size: data to be passed to cipher engine
186 * @enc_iv: cipher IV
187 * @auth_offset: offset of the authentication data
188 * @auth_size: size of the authentication data
189 * @auth_iv: authentication IV
190 * @type: algorithm type for the request
191 * @cmdl: command label pointer
192 * @base: pointer to the base request
193 * @ctx: pointer to the algorithm context data
194 * @enc: true if this is an encode request
195 * @src: source data
196 * @dst: destination data
197 * @callback: DMA callback for the request
198 * @mdata_size: metadata size passed to DMA
199 */
200 struct sa_req {
201 struct device *dev;
202 u16 size;
203 u8 enc_offset;
204 u16 enc_size;
205 u8 *enc_iv;
206 u8 auth_offset;
207 u16 auth_size;
208 u8 *auth_iv;
209 u32 type;
210 u32 *cmdl;
211 struct crypto_async_request *base;
212 struct sa_tfm_ctx *ctx;
213 bool enc;
214 struct scatterlist *src;
215 struct scatterlist *dst;
216 dma_async_tx_callback callback;
217 u16 mdata_size;
218 };
219
220 /*
221 * Mode Control Instructions for various Key lengths 128, 192, 256
222 * For CBC (Cipher Block Chaining) mode for encryption
223 */
224 static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
225 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
228 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
231 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
234 };
235
236 /*
237 * Mode Control Instructions for various Key lengths 128, 192, 256
238 * For CBC (Cipher Block Chaining) mode for decryption
239 */
240 static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
241 { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
244 { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
247 { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
250 };
251
252 /*
253 * Mode Control Instructions for various Key lengths 128, 192, 256
254 * For CBC (Cipher Block Chaining) mode for encryption
255 */
256 static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
257 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
260 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
263 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
266 };
267
268 /*
269 * Mode Control Instructions for various Key lengths 128, 192, 256
270 * For CBC (Cipher Block Chaining) mode for decryption
271 */
272 static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
273 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
274 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
276 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
279 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
282 };
283
284 /*
285 * Mode Control Instructions for various Key lengths 128, 192, 256
286 * For ECB (Electronic Code Book) mode for encryption
287 */
288 static u8 mci_ecb_enc_array[3][27] = {
289 { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
292 { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
295 { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
298 };
299
300 /*
301 * Mode Control Instructions for various Key lengths 128, 192, 256
302 * For ECB (Electronic Code Book) mode for decryption
303 */
304 static u8 mci_ecb_dec_array[3][27] = {
305 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
308 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
311 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
314 };
315
316 /*
317 * Mode Control Instructions for DES algorithm
318 * For CBC (Cipher Block Chaining) mode and ECB mode
319 * encryption and for decryption respectively
320 */
321 static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
322 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00,
325 };
326
327 static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
328 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00,
331 };
332
333 static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
334 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00,
337 };
338
339 static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
340 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00,
343 };
344
345 /*
346 * Perform 16 byte or 128 bit swizzling
347 * The SA2UL Expects the security context to
348 * be in little Endian and the bus width is 128 bits or 16 bytes
349 * Hence swap 16 bytes at a time from higher to lower address
350 */
sa_swiz_128(u8 * in,u16 len)351 static void sa_swiz_128(u8 *in, u16 len)
352 {
353 u8 data[16];
354 int i, j;
355
356 for (i = 0; i < len; i += 16) {
357 memcpy(data, &in[i], 16);
358 for (j = 0; j < 16; j++)
359 in[i + j] = data[15 - j];
360 }
361 }
362
363 /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
prepare_kiopad(u8 * k_ipad,u8 * k_opad,const u8 * key,u16 key_sz)364 static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
365 {
366 int i;
367
368 for (i = 0; i < key_sz; i++) {
369 k_ipad[i] = key[i] ^ 0x36;
370 k_opad[i] = key[i] ^ 0x5c;
371 }
372
373 /* Instead of XOR with 0 */
374 for (; i < SHA1_BLOCK_SIZE; i++) {
375 k_ipad[i] = 0x36;
376 k_opad[i] = 0x5c;
377 }
378 }
379
sa_export_shash(struct shash_desc * hash,int block_size,int digest_size,__be32 * out)380 static void sa_export_shash(struct shash_desc *hash, int block_size,
381 int digest_size, __be32 *out)
382 {
383 union {
384 struct sha1_state sha1;
385 struct sha256_state sha256;
386 struct sha512_state sha512;
387 } sha;
388 void *state;
389 u32 *result;
390 int i;
391
392 switch (digest_size) {
393 case SHA1_DIGEST_SIZE:
394 state = &sha.sha1;
395 result = sha.sha1.state;
396 break;
397 case SHA256_DIGEST_SIZE:
398 state = &sha.sha256;
399 result = sha.sha256.state;
400 break;
401 default:
402 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
403 digest_size);
404 return;
405 }
406
407 crypto_shash_export(hash, state);
408
409 for (i = 0; i < digest_size >> 2; i++)
410 out[i] = cpu_to_be32(result[i]);
411 }
412
sa_prepare_iopads(struct algo_data * data,const u8 * key,u16 key_sz,__be32 * ipad,__be32 * opad)413 static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
414 u16 key_sz, __be32 *ipad, __be32 *opad)
415 {
416 SHASH_DESC_ON_STACK(shash, data->ctx->shash);
417 int block_size = crypto_shash_blocksize(data->ctx->shash);
418 int digest_size = crypto_shash_digestsize(data->ctx->shash);
419 u8 k_ipad[SHA1_BLOCK_SIZE];
420 u8 k_opad[SHA1_BLOCK_SIZE];
421
422 shash->tfm = data->ctx->shash;
423
424 prepare_kiopad(k_ipad, k_opad, key, key_sz);
425
426 memzero_explicit(ipad, block_size);
427 memzero_explicit(opad, block_size);
428
429 crypto_shash_init(shash);
430 crypto_shash_update(shash, k_ipad, block_size);
431 sa_export_shash(shash, block_size, digest_size, ipad);
432
433 crypto_shash_init(shash);
434 crypto_shash_update(shash, k_opad, block_size);
435
436 sa_export_shash(shash, block_size, digest_size, opad);
437 }
438
439 /* Derive the inverse key used in AES-CBC decryption operation */
sa_aes_inv_key(u8 * inv_key,const u8 * key,u16 key_sz)440 static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
441 {
442 struct crypto_aes_ctx ctx;
443 int key_pos;
444
445 if (aes_expandkey(&ctx, key, key_sz)) {
446 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
447 return -EINVAL;
448 }
449
450 /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
451 if (key_sz == AES_KEYSIZE_192) {
452 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
453 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
454 }
455
456 /* Based crypto_aes_expand_key logic */
457 switch (key_sz) {
458 case AES_KEYSIZE_128:
459 case AES_KEYSIZE_192:
460 key_pos = key_sz + 24;
461 break;
462
463 case AES_KEYSIZE_256:
464 key_pos = key_sz + 24 - 4;
465 break;
466
467 default:
468 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
469 return -EINVAL;
470 }
471
472 memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
473 return 0;
474 }
475
476 /* Set Security context for the encryption engine */
sa_set_sc_enc(struct algo_data * ad,const u8 * key,u16 key_sz,u8 enc,u8 * sc_buf)477 static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
478 u8 enc, u8 *sc_buf)
479 {
480 const u8 *mci = NULL;
481
482 /* Set Encryption mode selector to crypto processing */
483 sc_buf[0] = SA_CRYPTO_PROCESSING;
484
485 if (enc)
486 mci = ad->mci_enc;
487 else
488 mci = ad->mci_dec;
489 /* Set the mode control instructions in security context */
490 if (mci)
491 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
492
493 /* For AES-CBC decryption get the inverse key */
494 if (ad->inv_key && !enc) {
495 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
496 return -EINVAL;
497 /* For all other cases: key is used */
498 } else {
499 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
500 }
501
502 return 0;
503 }
504
505 /* Set Security context for the authentication engine */
sa_set_sc_auth(struct algo_data * ad,const u8 * key,u16 key_sz,u8 * sc_buf)506 static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
507 u8 *sc_buf)
508 {
509 __be32 ipad[64], opad[64];
510
511 /* Set Authentication mode selector to hash processing */
512 sc_buf[0] = SA_HASH_PROCESSING;
513 /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
514 sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
515 sc_buf[1] |= ad->auth_ctrl;
516
517 /* Copy the keys or ipad/opad */
518 if (ad->keyed_mac) {
519 ad->prep_iopad(ad, key, key_sz, ipad, opad);
520
521 /* Copy ipad to AuthKey */
522 memcpy(&sc_buf[32], ipad, ad->hash_size);
523 /* Copy opad to Aux-1 */
524 memcpy(&sc_buf[64], opad, ad->hash_size);
525 } else {
526 /* basic hash */
527 sc_buf[1] |= SA_BASIC_HASH;
528 }
529 }
530
sa_copy_iv(__be32 * out,const u8 * iv,bool size16)531 static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
532 {
533 int j;
534
535 for (j = 0; j < ((size16) ? 4 : 2); j++) {
536 *out = cpu_to_be32(*((u32 *)iv));
537 iv += 4;
538 out++;
539 }
540 }
541
542 /* Format general command label */
sa_format_cmdl_gen(struct sa_cmdl_cfg * cfg,u8 * cmdl,struct sa_cmdl_upd_info * upd_info)543 static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
544 struct sa_cmdl_upd_info *upd_info)
545 {
546 u8 enc_offset = 0, auth_offset = 0, total = 0;
547 u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
548 u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
549 u32 *word_ptr = (u32 *)cmdl;
550 int i;
551
552 /* Clear the command label */
553 memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
554
555 /* Iniialize the command update structure */
556 memzero_explicit(upd_info, sizeof(*upd_info));
557
558 if (cfg->enc_eng_id && cfg->auth_eng_id) {
559 if (cfg->enc) {
560 auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
561 enc_next_eng = cfg->auth_eng_id;
562
563 if (cfg->iv_size)
564 auth_offset += cfg->iv_size;
565 } else {
566 enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
567 auth_next_eng = cfg->enc_eng_id;
568 }
569 }
570
571 if (cfg->enc_eng_id) {
572 upd_info->flags |= SA_CMDL_UPD_ENC;
573 upd_info->enc_size.index = enc_offset >> 2;
574 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
575 /* Encryption command label */
576 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
577
578 /* Encryption modes requiring IV */
579 if (cfg->iv_size) {
580 upd_info->flags |= SA_CMDL_UPD_ENC_IV;
581 upd_info->enc_iv.index =
582 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
583 upd_info->enc_iv.size = cfg->iv_size;
584
585 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
586 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
587
588 cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
589 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
590 total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
591 } else {
592 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
593 SA_CMDL_HEADER_SIZE_BYTES;
594 total += SA_CMDL_HEADER_SIZE_BYTES;
595 }
596 }
597
598 if (cfg->auth_eng_id) {
599 upd_info->flags |= SA_CMDL_UPD_AUTH;
600 upd_info->auth_size.index = auth_offset >> 2;
601 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
602 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
603 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
604 SA_CMDL_HEADER_SIZE_BYTES;
605 total += SA_CMDL_HEADER_SIZE_BYTES;
606 }
607
608 total = roundup(total, 8);
609
610 for (i = 0; i < total / 4; i++)
611 word_ptr[i] = swab32(word_ptr[i]);
612
613 return total;
614 }
615
616 /* Update Command label */
sa_update_cmdl(struct sa_req * req,u32 * cmdl,struct sa_cmdl_upd_info * upd_info)617 static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
618 struct sa_cmdl_upd_info *upd_info)
619 {
620 int i = 0, j;
621
622 if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
623 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
624 cmdl[upd_info->enc_size.index] |= req->enc_size;
625 cmdl[upd_info->enc_offset.index] &=
626 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
627 cmdl[upd_info->enc_offset.index] |=
628 ((u32)req->enc_offset <<
629 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
630
631 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
632 __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
633 u32 *enc_iv = (u32 *)req->enc_iv;
634
635 for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
636 data[j] = cpu_to_be32(*enc_iv);
637 enc_iv++;
638 }
639 }
640 }
641
642 if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
643 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
644 cmdl[upd_info->auth_size.index] |= req->auth_size;
645 cmdl[upd_info->auth_offset.index] &=
646 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
647 cmdl[upd_info->auth_offset.index] |=
648 ((u32)req->auth_offset <<
649 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
650 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
651 sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
652 req->auth_iv,
653 (upd_info->auth_iv.size > 8));
654 }
655 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
656 int offset = (req->auth_size & 0xF) ? 4 : 0;
657
658 memcpy(&cmdl[upd_info->aux_key_info.index],
659 &upd_info->aux_key[offset], 16);
660 }
661 }
662 }
663
664 /* Format SWINFO words to be sent to SA */
665 static
sa_set_swinfo(u8 eng_id,u16 sc_id,dma_addr_t sc_phys,u8 cmdl_present,u8 cmdl_offset,u8 flags,u8 hash_size,u32 * swinfo)666 void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
667 u8 cmdl_present, u8 cmdl_offset, u8 flags,
668 u8 hash_size, u32 *swinfo)
669 {
670 swinfo[0] = sc_id;
671 swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
672 if (likely(cmdl_present))
673 swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
674 __ffs(SA_SW0_CMDL_INFO_MASK));
675 swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
676
677 swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
678 swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
679 swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
680 swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
681 }
682
683 /* Dump the security context */
sa_dump_sc(u8 * buf,dma_addr_t dma_addr)684 static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
685 {
686 #ifdef DEBUG
687 dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
688 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
689 16, 1, buf, SA_CTX_MAX_SZ, false);
690 #endif
691 }
692
693 static
sa_init_sc(struct sa_ctx_info * ctx,const u8 * enc_key,u16 enc_key_sz,const u8 * auth_key,u16 auth_key_sz,struct algo_data * ad,u8 enc,u32 * swinfo)694 int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
695 u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
696 struct algo_data *ad, u8 enc, u32 *swinfo)
697 {
698 int enc_sc_offset = 0;
699 int auth_sc_offset = 0;
700 u8 *sc_buf = ctx->sc;
701 u16 sc_id = ctx->sc_id;
702 u8 first_engine = 0;
703
704 memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
705
706 if (ad->auth_eng.eng_id) {
707 if (enc)
708 first_engine = ad->enc_eng.eng_id;
709 else
710 first_engine = ad->auth_eng.eng_id;
711
712 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
713 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
714 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
715 if (!ad->hash_size)
716 return -EINVAL;
717 ad->hash_size = roundup(ad->hash_size, 8);
718
719 } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
720 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
721 first_engine = ad->enc_eng.eng_id;
722 sc_buf[1] = SA_SCCTL_FE_ENC;
723 ad->hash_size = ad->iv_out_size;
724 }
725
726 /* SCCTL Owner info: 0=host, 1=CP_ACE */
727 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
728 memcpy(&sc_buf[2], &sc_id, 2);
729 sc_buf[4] = 0x0;
730 sc_buf[5] = PRIV_ID;
731 sc_buf[6] = PRIV;
732 sc_buf[7] = 0x0;
733
734 /* Prepare context for encryption engine */
735 if (ad->enc_eng.sc_size) {
736 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
737 &sc_buf[enc_sc_offset]))
738 return -EINVAL;
739 }
740
741 /* Prepare context for authentication engine */
742 if (ad->auth_eng.sc_size)
743 sa_set_sc_auth(ad, auth_key, auth_key_sz,
744 &sc_buf[auth_sc_offset]);
745
746 /* Set the ownership of context to CP_ACE */
747 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
748
749 /* swizzle the security context */
750 sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
751
752 sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
753 SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
754
755 sa_dump_sc(sc_buf, ctx->sc_phys);
756
757 return 0;
758 }
759
760 /* Free the per direction context memory */
sa_free_ctx_info(struct sa_ctx_info * ctx,struct sa_crypto_data * data)761 static void sa_free_ctx_info(struct sa_ctx_info *ctx,
762 struct sa_crypto_data *data)
763 {
764 unsigned long bn;
765
766 bn = ctx->sc_id - data->sc_id_start;
767 spin_lock(&data->scid_lock);
768 __clear_bit(bn, data->ctx_bm);
769 data->sc_id--;
770 spin_unlock(&data->scid_lock);
771
772 if (ctx->sc) {
773 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
774 ctx->sc = NULL;
775 }
776 }
777
sa_init_ctx_info(struct sa_ctx_info * ctx,struct sa_crypto_data * data)778 static int sa_init_ctx_info(struct sa_ctx_info *ctx,
779 struct sa_crypto_data *data)
780 {
781 unsigned long bn;
782 int err;
783
784 spin_lock(&data->scid_lock);
785 bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
786 __set_bit(bn, data->ctx_bm);
787 data->sc_id++;
788 spin_unlock(&data->scid_lock);
789
790 ctx->sc_id = (u16)(data->sc_id_start + bn);
791
792 ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
793 if (!ctx->sc) {
794 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
795 err = -ENOMEM;
796 goto scid_rollback;
797 }
798
799 return 0;
800
801 scid_rollback:
802 spin_lock(&data->scid_lock);
803 __clear_bit(bn, data->ctx_bm);
804 data->sc_id--;
805 spin_unlock(&data->scid_lock);
806
807 return err;
808 }
809
sa_cipher_cra_exit(struct crypto_skcipher * tfm)810 static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
811 {
812 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
813 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
814
815 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
816 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
817 ctx->dec.sc_id, &ctx->dec.sc_phys);
818
819 sa_free_ctx_info(&ctx->enc, data);
820 sa_free_ctx_info(&ctx->dec, data);
821
822 crypto_free_sync_skcipher(ctx->fallback.skcipher);
823 }
824
sa_cipher_cra_init(struct crypto_skcipher * tfm)825 static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
826 {
827 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
828 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
829 const char *name = crypto_tfm_alg_name(&tfm->base);
830 int ret;
831
832 memzero_explicit(ctx, sizeof(*ctx));
833 ctx->dev_data = data;
834
835 ret = sa_init_ctx_info(&ctx->enc, data);
836 if (ret)
837 return ret;
838 ret = sa_init_ctx_info(&ctx->dec, data);
839 if (ret) {
840 sa_free_ctx_info(&ctx->enc, data);
841 return ret;
842 }
843
844 ctx->fallback.skcipher =
845 crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
846
847 if (IS_ERR(ctx->fallback.skcipher)) {
848 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
849 return PTR_ERR(ctx->fallback.skcipher);
850 }
851
852 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
853 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
854 ctx->dec.sc_id, &ctx->dec.sc_phys);
855 return 0;
856 }
857
sa_cipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,struct algo_data * ad)858 static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
859 unsigned int keylen, struct algo_data *ad)
860 {
861 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
862 int cmdl_len;
863 struct sa_cmdl_cfg cfg;
864 int ret;
865
866 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
867 keylen != AES_KEYSIZE_256)
868 return -EINVAL;
869
870 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
871 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
872
873 memzero_explicit(&cfg, sizeof(cfg));
874 cfg.enc_eng_id = ad->enc_eng.eng_id;
875 cfg.iv_size = crypto_skcipher_ivsize(tfm);
876
877 crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher,
878 CRYPTO_TFM_REQ_MASK);
879 crypto_sync_skcipher_set_flags(ctx->fallback.skcipher,
880 tfm->base.crt_flags &
881 CRYPTO_TFM_REQ_MASK);
882 ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen);
883 if (ret)
884 return ret;
885
886 /* Setup Encryption Security Context & Command label template */
887 if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
888 &ctx->enc.epib[1]))
889 goto badkey;
890
891 cmdl_len = sa_format_cmdl_gen(&cfg,
892 (u8 *)ctx->enc.cmdl,
893 &ctx->enc.cmdl_upd_info);
894 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
895 goto badkey;
896
897 ctx->enc.cmdl_size = cmdl_len;
898
899 /* Setup Decryption Security Context & Command label template */
900 if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
901 &ctx->dec.epib[1]))
902 goto badkey;
903
904 cfg.enc_eng_id = ad->enc_eng.eng_id;
905 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
906 &ctx->dec.cmdl_upd_info);
907
908 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
909 goto badkey;
910
911 ctx->dec.cmdl_size = cmdl_len;
912 ctx->iv_idx = ad->iv_idx;
913
914 return 0;
915
916 badkey:
917 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
918 return -EINVAL;
919 }
920
sa_aes_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)921 static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
922 unsigned int keylen)
923 {
924 struct algo_data ad = { 0 };
925 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
926 int key_idx = (keylen >> 3) - 2;
927
928 if (key_idx >= 3)
929 return -EINVAL;
930
931 ad.mci_enc = mci_cbc_enc_array[key_idx];
932 ad.mci_dec = mci_cbc_dec_array[key_idx];
933 ad.inv_key = true;
934 ad.ealg_id = SA_EALG_ID_AES_CBC;
935 ad.iv_idx = 4;
936 ad.iv_out_size = 16;
937
938 return sa_cipher_setkey(tfm, key, keylen, &ad);
939 }
940
sa_aes_ecb_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)941 static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
942 unsigned int keylen)
943 {
944 struct algo_data ad = { 0 };
945 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
946 int key_idx = (keylen >> 3) - 2;
947
948 if (key_idx >= 3)
949 return -EINVAL;
950
951 ad.mci_enc = mci_ecb_enc_array[key_idx];
952 ad.mci_dec = mci_ecb_dec_array[key_idx];
953 ad.inv_key = true;
954 ad.ealg_id = SA_EALG_ID_AES_ECB;
955
956 return sa_cipher_setkey(tfm, key, keylen, &ad);
957 }
958
sa_3des_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)959 static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
960 unsigned int keylen)
961 {
962 struct algo_data ad = { 0 };
963
964 ad.mci_enc = mci_cbc_3des_enc_array;
965 ad.mci_dec = mci_cbc_3des_dec_array;
966 ad.ealg_id = SA_EALG_ID_3DES_CBC;
967 ad.iv_idx = 6;
968 ad.iv_out_size = 8;
969
970 return sa_cipher_setkey(tfm, key, keylen, &ad);
971 }
972
sa_3des_ecb_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)973 static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
974 unsigned int keylen)
975 {
976 struct algo_data ad = { 0 };
977
978 ad.mci_enc = mci_ecb_3des_enc_array;
979 ad.mci_dec = mci_ecb_3des_dec_array;
980
981 return sa_cipher_setkey(tfm, key, keylen, &ad);
982 }
983
sa_sync_from_device(struct sa_rx_data * rxd)984 static void sa_sync_from_device(struct sa_rx_data *rxd)
985 {
986 struct sg_table *sgt;
987
988 if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
989 sgt = &rxd->mapped_sg[0].sgt;
990 else
991 sgt = &rxd->mapped_sg[1].sgt;
992
993 dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
994 }
995
sa_free_sa_rx_data(struct sa_rx_data * rxd)996 static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
997 {
998 int i;
999
1000 for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1001 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1002
1003 if (mapped_sg->mapped) {
1004 dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1005 mapped_sg->dir, 0);
1006 kfree(mapped_sg->split_sg);
1007 }
1008 }
1009
1010 kfree(rxd);
1011 }
1012
sa_aes_dma_in_callback(void * data)1013 static void sa_aes_dma_in_callback(void *data)
1014 {
1015 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1016 struct skcipher_request *req;
1017 u32 *result;
1018 __be32 *mdptr;
1019 size_t ml, pl;
1020 int i;
1021
1022 sa_sync_from_device(rxd);
1023 req = container_of(rxd->req, struct skcipher_request, base);
1024
1025 if (req->iv) {
1026 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1027 &ml);
1028 result = (u32 *)req->iv;
1029
1030 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1031 result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1032 }
1033
1034 sa_free_sa_rx_data(rxd);
1035
1036 skcipher_request_complete(req, 0);
1037 }
1038
1039 static void
sa_prepare_tx_desc(u32 * mdptr,u32 pslen,u32 * psdata,u32 epiblen,u32 * epib)1040 sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1041 {
1042 u32 *out, *in;
1043 int i;
1044
1045 for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1046 *out++ = *in++;
1047
1048 mdptr[4] = (0xFFFF << 16);
1049 for (out = &mdptr[5], in = psdata, i = 0;
1050 i < pslen / sizeof(u32); i++)
1051 *out++ = *in++;
1052 }
1053
sa_run(struct sa_req * req)1054 static int sa_run(struct sa_req *req)
1055 {
1056 struct sa_rx_data *rxd;
1057 gfp_t gfp_flags;
1058 u32 cmdl[SA_MAX_CMDL_WORDS];
1059 struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1060 struct device *ddev;
1061 struct dma_chan *dma_rx;
1062 int sg_nents, src_nents, dst_nents;
1063 struct scatterlist *src, *dst;
1064 size_t pl, ml, split_size;
1065 struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1066 int ret;
1067 struct dma_async_tx_descriptor *tx_out;
1068 u32 *mdptr;
1069 bool diff_dst;
1070 enum dma_data_direction dir_src;
1071 struct sa_mapped_sg *mapped_sg;
1072
1073 gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1074 GFP_KERNEL : GFP_ATOMIC;
1075
1076 rxd = kzalloc(sizeof(*rxd), gfp_flags);
1077 if (!rxd)
1078 return -ENOMEM;
1079
1080 if (req->src != req->dst) {
1081 diff_dst = true;
1082 dir_src = DMA_TO_DEVICE;
1083 } else {
1084 diff_dst = false;
1085 dir_src = DMA_BIDIRECTIONAL;
1086 }
1087
1088 /*
1089 * SA2UL has an interesting feature where the receive DMA channel
1090 * is selected based on the data passed to the engine. Within the
1091 * transition range, there is also a space where it is impossible
1092 * to determine where the data will end up, and this should be
1093 * avoided. This will be handled by the SW fallback mechanism by
1094 * the individual algorithm implementations.
1095 */
1096 if (req->size >= 256)
1097 dma_rx = pdata->dma_rx2;
1098 else
1099 dma_rx = pdata->dma_rx1;
1100
1101 ddev = dma_rx->device->dev;
1102 rxd->ddev = ddev;
1103
1104 memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1105
1106 sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1107
1108 if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1109 if (req->enc)
1110 req->type |=
1111 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1112 else
1113 req->type |=
1114 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1115 }
1116
1117 cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1118
1119 /*
1120 * Map the packets, first we check if the data fits into a single
1121 * sg entry and use that if possible. If it does not fit, we check
1122 * if we need to do sg_split to align the scatterlist data on the
1123 * actual data size being processed by the crypto engine.
1124 */
1125 src = req->src;
1126 sg_nents = sg_nents_for_len(src, req->size);
1127
1128 split_size = req->size;
1129
1130 mapped_sg = &rxd->mapped_sg[0];
1131 if (sg_nents == 1 && split_size <= req->src->length) {
1132 src = &mapped_sg->static_sg;
1133 src_nents = 1;
1134 sg_init_table(src, 1);
1135 sg_set_page(src, sg_page(req->src), split_size,
1136 req->src->offset);
1137
1138 mapped_sg->sgt.sgl = src;
1139 mapped_sg->sgt.orig_nents = src_nents;
1140 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1141 if (ret) {
1142 kfree(rxd);
1143 return ret;
1144 }
1145
1146 mapped_sg->dir = dir_src;
1147 mapped_sg->mapped = true;
1148 } else {
1149 mapped_sg->sgt.sgl = req->src;
1150 mapped_sg->sgt.orig_nents = sg_nents;
1151 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1152 if (ret) {
1153 kfree(rxd);
1154 return ret;
1155 }
1156
1157 mapped_sg->dir = dir_src;
1158 mapped_sg->mapped = true;
1159
1160 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1161 &split_size, &src, &src_nents, gfp_flags);
1162 if (ret) {
1163 src_nents = mapped_sg->sgt.nents;
1164 src = mapped_sg->sgt.sgl;
1165 } else {
1166 mapped_sg->split_sg = src;
1167 }
1168 }
1169
1170 dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1171
1172 if (!diff_dst) {
1173 dst_nents = src_nents;
1174 dst = src;
1175 } else {
1176 dst_nents = sg_nents_for_len(req->dst, req->size);
1177 mapped_sg = &rxd->mapped_sg[1];
1178
1179 if (dst_nents == 1 && split_size <= req->dst->length) {
1180 dst = &mapped_sg->static_sg;
1181 dst_nents = 1;
1182 sg_init_table(dst, 1);
1183 sg_set_page(dst, sg_page(req->dst), split_size,
1184 req->dst->offset);
1185
1186 mapped_sg->sgt.sgl = dst;
1187 mapped_sg->sgt.orig_nents = dst_nents;
1188 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1189 DMA_FROM_DEVICE, 0);
1190 if (ret)
1191 goto err_cleanup;
1192
1193 mapped_sg->dir = DMA_FROM_DEVICE;
1194 mapped_sg->mapped = true;
1195 } else {
1196 mapped_sg->sgt.sgl = req->dst;
1197 mapped_sg->sgt.orig_nents = dst_nents;
1198 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1199 DMA_FROM_DEVICE, 0);
1200 if (ret)
1201 goto err_cleanup;
1202
1203 mapped_sg->dir = DMA_FROM_DEVICE;
1204 mapped_sg->mapped = true;
1205
1206 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1207 0, 1, &split_size, &dst, &dst_nents,
1208 gfp_flags);
1209 if (ret) {
1210 dst_nents = mapped_sg->sgt.nents;
1211 dst = mapped_sg->sgt.sgl;
1212 } else {
1213 mapped_sg->split_sg = dst;
1214 }
1215 }
1216 }
1217
1218 rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1219 DMA_DEV_TO_MEM,
1220 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1221 if (!rxd->tx_in) {
1222 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1223 ret = -EINVAL;
1224 goto err_cleanup;
1225 }
1226
1227 rxd->req = (void *)req->base;
1228 rxd->enc = req->enc;
1229 rxd->iv_idx = req->ctx->iv_idx;
1230 rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1231 rxd->tx_in->callback = req->callback;
1232 rxd->tx_in->callback_param = rxd;
1233
1234 tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1235 src_nents, DMA_MEM_TO_DEV,
1236 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1237
1238 if (!tx_out) {
1239 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1240 ret = -EINVAL;
1241 goto err_cleanup;
1242 }
1243
1244 /*
1245 * Prepare metadata for DMA engine. This essentially describes the
1246 * crypto algorithm to be used, data sizes, different keys etc.
1247 */
1248 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1249
1250 sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1251 sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1252 sa_ctx->epib);
1253
1254 ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1255 dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1256
1257 dmaengine_submit(tx_out);
1258 dmaengine_submit(rxd->tx_in);
1259
1260 dma_async_issue_pending(dma_rx);
1261 dma_async_issue_pending(pdata->dma_tx);
1262
1263 return -EINPROGRESS;
1264
1265 err_cleanup:
1266 sa_free_sa_rx_data(rxd);
1267
1268 return ret;
1269 }
1270
sa_cipher_run(struct skcipher_request * req,u8 * iv,int enc)1271 static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1272 {
1273 struct sa_tfm_ctx *ctx =
1274 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1275 struct crypto_alg *alg = req->base.tfm->__crt_alg;
1276 struct sa_req sa_req = { 0 };
1277 int ret;
1278
1279 if (!req->cryptlen)
1280 return 0;
1281
1282 if (req->cryptlen % alg->cra_blocksize)
1283 return -EINVAL;
1284
1285 /* Use SW fallback if the data size is not supported */
1286 if (req->cryptlen > SA_MAX_DATA_SZ ||
1287 (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1288 req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1289 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
1290
1291 skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher);
1292 skcipher_request_set_callback(subreq, req->base.flags,
1293 NULL, NULL);
1294 skcipher_request_set_crypt(subreq, req->src, req->dst,
1295 req->cryptlen, req->iv);
1296 if (enc)
1297 ret = crypto_skcipher_encrypt(subreq);
1298 else
1299 ret = crypto_skcipher_decrypt(subreq);
1300
1301 skcipher_request_zero(subreq);
1302 return ret;
1303 }
1304
1305 sa_req.size = req->cryptlen;
1306 sa_req.enc_size = req->cryptlen;
1307 sa_req.src = req->src;
1308 sa_req.dst = req->dst;
1309 sa_req.enc_iv = iv;
1310 sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1311 sa_req.enc = enc;
1312 sa_req.callback = sa_aes_dma_in_callback;
1313 sa_req.mdata_size = 44;
1314 sa_req.base = &req->base;
1315 sa_req.ctx = ctx;
1316
1317 return sa_run(&sa_req);
1318 }
1319
sa_encrypt(struct skcipher_request * req)1320 static int sa_encrypt(struct skcipher_request *req)
1321 {
1322 return sa_cipher_run(req, req->iv, 1);
1323 }
1324
sa_decrypt(struct skcipher_request * req)1325 static int sa_decrypt(struct skcipher_request *req)
1326 {
1327 return sa_cipher_run(req, req->iv, 0);
1328 }
1329
sa_sha_dma_in_callback(void * data)1330 static void sa_sha_dma_in_callback(void *data)
1331 {
1332 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1333 struct ahash_request *req;
1334 struct crypto_ahash *tfm;
1335 unsigned int authsize;
1336 int i;
1337 size_t ml, pl;
1338 u32 *result;
1339 __be32 *mdptr;
1340
1341 sa_sync_from_device(rxd);
1342 req = container_of(rxd->req, struct ahash_request, base);
1343 tfm = crypto_ahash_reqtfm(req);
1344 authsize = crypto_ahash_digestsize(tfm);
1345
1346 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1347 result = (u32 *)req->result;
1348
1349 for (i = 0; i < (authsize / 4); i++)
1350 result[i] = be32_to_cpu(mdptr[i + 4]);
1351
1352 sa_free_sa_rx_data(rxd);
1353
1354 ahash_request_complete(req, 0);
1355 }
1356
zero_message_process(struct ahash_request * req)1357 static int zero_message_process(struct ahash_request *req)
1358 {
1359 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1360 int sa_digest_size = crypto_ahash_digestsize(tfm);
1361
1362 switch (sa_digest_size) {
1363 case SHA1_DIGEST_SIZE:
1364 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1365 break;
1366 case SHA256_DIGEST_SIZE:
1367 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1368 break;
1369 case SHA512_DIGEST_SIZE:
1370 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1371 break;
1372 default:
1373 return -EINVAL;
1374 }
1375
1376 return 0;
1377 }
1378
sa_sha_run(struct ahash_request * req)1379 static int sa_sha_run(struct ahash_request *req)
1380 {
1381 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1382 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1383 struct sa_req sa_req = { 0 };
1384 size_t auth_len;
1385
1386 auth_len = req->nbytes;
1387
1388 if (!auth_len)
1389 return zero_message_process(req);
1390
1391 if (auth_len > SA_MAX_DATA_SZ ||
1392 (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1393 auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1394 struct ahash_request *subreq = &rctx->fallback_req;
1395 int ret = 0;
1396
1397 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1398 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1399
1400 crypto_ahash_init(subreq);
1401
1402 subreq->nbytes = auth_len;
1403 subreq->src = req->src;
1404 subreq->result = req->result;
1405
1406 ret |= crypto_ahash_update(subreq);
1407
1408 subreq->nbytes = 0;
1409
1410 ret |= crypto_ahash_final(subreq);
1411
1412 return ret;
1413 }
1414
1415 sa_req.size = auth_len;
1416 sa_req.auth_size = auth_len;
1417 sa_req.src = req->src;
1418 sa_req.dst = req->src;
1419 sa_req.enc = true;
1420 sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1421 sa_req.callback = sa_sha_dma_in_callback;
1422 sa_req.mdata_size = 28;
1423 sa_req.ctx = ctx;
1424 sa_req.base = &req->base;
1425
1426 return sa_run(&sa_req);
1427 }
1428
sa_sha_setup(struct sa_tfm_ctx * ctx,struct algo_data * ad)1429 static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
1430 {
1431 int bs = crypto_shash_blocksize(ctx->shash);
1432 int cmdl_len;
1433 struct sa_cmdl_cfg cfg;
1434
1435 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1436 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1437 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1438
1439 memset(ctx->authkey, 0, bs);
1440 memset(&cfg, 0, sizeof(cfg));
1441 cfg.aalg = ad->aalg_id;
1442 cfg.enc_eng_id = ad->enc_eng.eng_id;
1443 cfg.auth_eng_id = ad->auth_eng.eng_id;
1444 cfg.iv_size = 0;
1445 cfg.akey = NULL;
1446 cfg.akey_len = 0;
1447
1448 /* Setup Encryption Security Context & Command label template */
1449 if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
1450 &ctx->enc.epib[1]))
1451 goto badkey;
1452
1453 cmdl_len = sa_format_cmdl_gen(&cfg,
1454 (u8 *)ctx->enc.cmdl,
1455 &ctx->enc.cmdl_upd_info);
1456 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1457 goto badkey;
1458
1459 ctx->enc.cmdl_size = cmdl_len;
1460
1461 return 0;
1462
1463 badkey:
1464 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1465 return -EINVAL;
1466 }
1467
sa_sha_cra_init_alg(struct crypto_tfm * tfm,const char * alg_base)1468 static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1469 {
1470 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1471 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1472 int ret;
1473
1474 memset(ctx, 0, sizeof(*ctx));
1475 ctx->dev_data = data;
1476 ret = sa_init_ctx_info(&ctx->enc, data);
1477 if (ret)
1478 return ret;
1479
1480 if (alg_base) {
1481 ctx->shash = crypto_alloc_shash(alg_base, 0,
1482 CRYPTO_ALG_NEED_FALLBACK);
1483 if (IS_ERR(ctx->shash)) {
1484 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1485 alg_base);
1486 return PTR_ERR(ctx->shash);
1487 }
1488 /* for fallback */
1489 ctx->fallback.ahash =
1490 crypto_alloc_ahash(alg_base, 0,
1491 CRYPTO_ALG_NEED_FALLBACK);
1492 if (IS_ERR(ctx->fallback.ahash)) {
1493 dev_err(ctx->dev_data->dev,
1494 "Could not load fallback driver\n");
1495 return PTR_ERR(ctx->fallback.ahash);
1496 }
1497 }
1498
1499 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1500 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1501 ctx->dec.sc_id, &ctx->dec.sc_phys);
1502
1503 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1504 sizeof(struct sa_sha_req_ctx) +
1505 crypto_ahash_reqsize(ctx->fallback.ahash));
1506
1507 return 0;
1508 }
1509
sa_sha_digest(struct ahash_request * req)1510 static int sa_sha_digest(struct ahash_request *req)
1511 {
1512 return sa_sha_run(req);
1513 }
1514
sa_sha_init(struct ahash_request * req)1515 static int sa_sha_init(struct ahash_request *req)
1516 {
1517 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1518 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1519 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1520
1521 dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1522 crypto_ahash_digestsize(tfm), rctx);
1523
1524 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1525 rctx->fallback_req.base.flags =
1526 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1527
1528 return crypto_ahash_init(&rctx->fallback_req);
1529 }
1530
sa_sha_update(struct ahash_request * req)1531 static int sa_sha_update(struct ahash_request *req)
1532 {
1533 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1534 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1535 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1536
1537 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1538 rctx->fallback_req.base.flags =
1539 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1540 rctx->fallback_req.nbytes = req->nbytes;
1541 rctx->fallback_req.src = req->src;
1542
1543 return crypto_ahash_update(&rctx->fallback_req);
1544 }
1545
sa_sha_final(struct ahash_request * req)1546 static int sa_sha_final(struct ahash_request *req)
1547 {
1548 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1549 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1550 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1551
1552 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1553 rctx->fallback_req.base.flags =
1554 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1555 rctx->fallback_req.result = req->result;
1556
1557 return crypto_ahash_final(&rctx->fallback_req);
1558 }
1559
sa_sha_finup(struct ahash_request * req)1560 static int sa_sha_finup(struct ahash_request *req)
1561 {
1562 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1563 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1564 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1565
1566 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1567 rctx->fallback_req.base.flags =
1568 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1569
1570 rctx->fallback_req.nbytes = req->nbytes;
1571 rctx->fallback_req.src = req->src;
1572 rctx->fallback_req.result = req->result;
1573
1574 return crypto_ahash_finup(&rctx->fallback_req);
1575 }
1576
sa_sha_import(struct ahash_request * req,const void * in)1577 static int sa_sha_import(struct ahash_request *req, const void *in)
1578 {
1579 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1580 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1581 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1582
1583 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1584 rctx->fallback_req.base.flags = req->base.flags &
1585 CRYPTO_TFM_REQ_MAY_SLEEP;
1586
1587 return crypto_ahash_import(&rctx->fallback_req, in);
1588 }
1589
sa_sha_export(struct ahash_request * req,void * out)1590 static int sa_sha_export(struct ahash_request *req, void *out)
1591 {
1592 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1593 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1594 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1595 struct ahash_request *subreq = &rctx->fallback_req;
1596
1597 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1598 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1599
1600 return crypto_ahash_export(subreq, out);
1601 }
1602
sa_sha1_cra_init(struct crypto_tfm * tfm)1603 static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1604 {
1605 struct algo_data ad = { 0 };
1606 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1607
1608 sa_sha_cra_init_alg(tfm, "sha1");
1609
1610 ad.aalg_id = SA_AALG_ID_SHA1;
1611 ad.hash_size = SHA1_DIGEST_SIZE;
1612 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1613
1614 sa_sha_setup(ctx, &ad);
1615
1616 return 0;
1617 }
1618
sa_sha256_cra_init(struct crypto_tfm * tfm)1619 static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1620 {
1621 struct algo_data ad = { 0 };
1622 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1623
1624 sa_sha_cra_init_alg(tfm, "sha256");
1625
1626 ad.aalg_id = SA_AALG_ID_SHA2_256;
1627 ad.hash_size = SHA256_DIGEST_SIZE;
1628 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1629
1630 sa_sha_setup(ctx, &ad);
1631
1632 return 0;
1633 }
1634
sa_sha512_cra_init(struct crypto_tfm * tfm)1635 static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1636 {
1637 struct algo_data ad = { 0 };
1638 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1639
1640 sa_sha_cra_init_alg(tfm, "sha512");
1641
1642 ad.aalg_id = SA_AALG_ID_SHA2_512;
1643 ad.hash_size = SHA512_DIGEST_SIZE;
1644 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1645
1646 sa_sha_setup(ctx, &ad);
1647
1648 return 0;
1649 }
1650
sa_sha_cra_exit(struct crypto_tfm * tfm)1651 static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1652 {
1653 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1654 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1655
1656 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1657 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1658 ctx->dec.sc_id, &ctx->dec.sc_phys);
1659
1660 if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1661 sa_free_ctx_info(&ctx->enc, data);
1662
1663 crypto_free_shash(ctx->shash);
1664 crypto_free_ahash(ctx->fallback.ahash);
1665 }
1666
sa_aead_dma_in_callback(void * data)1667 static void sa_aead_dma_in_callback(void *data)
1668 {
1669 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1670 struct aead_request *req;
1671 struct crypto_aead *tfm;
1672 unsigned int start;
1673 unsigned int authsize;
1674 u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1675 size_t pl, ml;
1676 int i;
1677 int err = 0;
1678 u16 auth_len;
1679 u32 *mdptr;
1680
1681 sa_sync_from_device(rxd);
1682 req = container_of(rxd->req, struct aead_request, base);
1683 tfm = crypto_aead_reqtfm(req);
1684 start = req->assoclen + req->cryptlen;
1685 authsize = crypto_aead_authsize(tfm);
1686
1687 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1688 for (i = 0; i < (authsize / 4); i++)
1689 mdptr[i + 4] = swab32(mdptr[i + 4]);
1690
1691 auth_len = req->assoclen + req->cryptlen;
1692
1693 if (rxd->enc) {
1694 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1695 1);
1696 } else {
1697 auth_len -= authsize;
1698 start -= authsize;
1699 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1700 0);
1701
1702 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1703 }
1704
1705 sa_free_sa_rx_data(rxd);
1706
1707 aead_request_complete(req, err);
1708 }
1709
sa_cra_init_aead(struct crypto_aead * tfm,const char * hash,const char * fallback)1710 static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1711 const char *fallback)
1712 {
1713 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1714 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1715 int ret;
1716
1717 memzero_explicit(ctx, sizeof(*ctx));
1718
1719 ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1720 if (IS_ERR(ctx->shash)) {
1721 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1722 return PTR_ERR(ctx->shash);
1723 }
1724
1725 ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1726 CRYPTO_ALG_NEED_FALLBACK);
1727
1728 if (IS_ERR(ctx->fallback.aead)) {
1729 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1730 fallback);
1731 return PTR_ERR(ctx->fallback.aead);
1732 }
1733
1734 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1735 crypto_aead_reqsize(ctx->fallback.aead));
1736
1737 ret = sa_init_ctx_info(&ctx->enc, data);
1738 if (ret)
1739 return ret;
1740
1741 ret = sa_init_ctx_info(&ctx->dec, data);
1742 if (ret) {
1743 sa_free_ctx_info(&ctx->enc, data);
1744 return ret;
1745 }
1746
1747 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1748 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1749 ctx->dec.sc_id, &ctx->dec.sc_phys);
1750
1751 return ret;
1752 }
1753
sa_cra_init_aead_sha1(struct crypto_aead * tfm)1754 static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1755 {
1756 return sa_cra_init_aead(tfm, "sha1",
1757 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1758 }
1759
sa_cra_init_aead_sha256(struct crypto_aead * tfm)1760 static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1761 {
1762 return sa_cra_init_aead(tfm, "sha256",
1763 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1764 }
1765
sa_exit_tfm_aead(struct crypto_aead * tfm)1766 static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1767 {
1768 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1769 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1770
1771 crypto_free_shash(ctx->shash);
1772 crypto_free_aead(ctx->fallback.aead);
1773
1774 sa_free_ctx_info(&ctx->enc, data);
1775 sa_free_ctx_info(&ctx->dec, data);
1776 }
1777
1778 /* AEAD algorithm configuration interface function */
sa_aead_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen,struct algo_data * ad)1779 static int sa_aead_setkey(struct crypto_aead *authenc,
1780 const u8 *key, unsigned int keylen,
1781 struct algo_data *ad)
1782 {
1783 struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1784 struct crypto_authenc_keys keys;
1785 int cmdl_len;
1786 struct sa_cmdl_cfg cfg;
1787 int key_idx;
1788
1789 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1790 return -EINVAL;
1791
1792 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1793 key_idx = (keys.enckeylen >> 3) - 2;
1794 if (key_idx >= 3)
1795 return -EINVAL;
1796
1797 ad->ctx = ctx;
1798 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1799 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1800 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1801 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1802 ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1803 ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1804 ad->inv_key = true;
1805 ad->keyed_mac = true;
1806 ad->ealg_id = SA_EALG_ID_AES_CBC;
1807 ad->prep_iopad = sa_prepare_iopads;
1808
1809 memset(&cfg, 0, sizeof(cfg));
1810 cfg.enc = true;
1811 cfg.aalg = ad->aalg_id;
1812 cfg.enc_eng_id = ad->enc_eng.eng_id;
1813 cfg.auth_eng_id = ad->auth_eng.eng_id;
1814 cfg.iv_size = crypto_aead_ivsize(authenc);
1815 cfg.akey = keys.authkey;
1816 cfg.akey_len = keys.authkeylen;
1817
1818 /* Setup Encryption Security Context & Command label template */
1819 if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
1820 keys.authkey, keys.authkeylen,
1821 ad, 1, &ctx->enc.epib[1]))
1822 return -EINVAL;
1823
1824 cmdl_len = sa_format_cmdl_gen(&cfg,
1825 (u8 *)ctx->enc.cmdl,
1826 &ctx->enc.cmdl_upd_info);
1827 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1828 return -EINVAL;
1829
1830 ctx->enc.cmdl_size = cmdl_len;
1831
1832 /* Setup Decryption Security Context & Command label template */
1833 if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
1834 keys.authkey, keys.authkeylen,
1835 ad, 0, &ctx->dec.epib[1]))
1836 return -EINVAL;
1837
1838 cfg.enc = false;
1839 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1840 &ctx->dec.cmdl_upd_info);
1841
1842 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1843 return -EINVAL;
1844
1845 ctx->dec.cmdl_size = cmdl_len;
1846
1847 crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1848 crypto_aead_set_flags(ctx->fallback.aead,
1849 crypto_aead_get_flags(authenc) &
1850 CRYPTO_TFM_REQ_MASK);
1851 crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1852
1853 return 0;
1854 }
1855
sa_aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1856 static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1857 {
1858 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1859
1860 return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1861 }
1862
sa_aead_cbc_sha1_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)1863 static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1864 const u8 *key, unsigned int keylen)
1865 {
1866 struct algo_data ad = { 0 };
1867
1868 ad.ealg_id = SA_EALG_ID_AES_CBC;
1869 ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1870 ad.hash_size = SHA1_DIGEST_SIZE;
1871 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1872
1873 return sa_aead_setkey(authenc, key, keylen, &ad);
1874 }
1875
sa_aead_cbc_sha256_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)1876 static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1877 const u8 *key, unsigned int keylen)
1878 {
1879 struct algo_data ad = { 0 };
1880
1881 ad.ealg_id = SA_EALG_ID_AES_CBC;
1882 ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1883 ad.hash_size = SHA256_DIGEST_SIZE;
1884 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1885
1886 return sa_aead_setkey(authenc, key, keylen, &ad);
1887 }
1888
sa_aead_run(struct aead_request * req,u8 * iv,int enc)1889 static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1890 {
1891 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1892 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1893 struct sa_req sa_req = { 0 };
1894 size_t auth_size, enc_size;
1895
1896 enc_size = req->cryptlen;
1897 auth_size = req->assoclen + req->cryptlen;
1898
1899 if (!enc) {
1900 enc_size -= crypto_aead_authsize(tfm);
1901 auth_size -= crypto_aead_authsize(tfm);
1902 }
1903
1904 if (auth_size > SA_MAX_DATA_SZ ||
1905 (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1906 auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1907 struct aead_request *subreq = aead_request_ctx(req);
1908 int ret;
1909
1910 aead_request_set_tfm(subreq, ctx->fallback.aead);
1911 aead_request_set_callback(subreq, req->base.flags,
1912 req->base.complete, req->base.data);
1913 aead_request_set_crypt(subreq, req->src, req->dst,
1914 req->cryptlen, req->iv);
1915 aead_request_set_ad(subreq, req->assoclen);
1916
1917 ret = enc ? crypto_aead_encrypt(subreq) :
1918 crypto_aead_decrypt(subreq);
1919 return ret;
1920 }
1921
1922 sa_req.enc_offset = req->assoclen;
1923 sa_req.enc_size = enc_size;
1924 sa_req.auth_size = auth_size;
1925 sa_req.size = auth_size;
1926 sa_req.enc_iv = iv;
1927 sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1928 sa_req.enc = enc;
1929 sa_req.callback = sa_aead_dma_in_callback;
1930 sa_req.mdata_size = 52;
1931 sa_req.base = &req->base;
1932 sa_req.ctx = ctx;
1933 sa_req.src = req->src;
1934 sa_req.dst = req->dst;
1935
1936 return sa_run(&sa_req);
1937 }
1938
1939 /* AEAD algorithm encrypt interface function */
sa_aead_encrypt(struct aead_request * req)1940 static int sa_aead_encrypt(struct aead_request *req)
1941 {
1942 return sa_aead_run(req, req->iv, 1);
1943 }
1944
1945 /* AEAD algorithm decrypt interface function */
sa_aead_decrypt(struct aead_request * req)1946 static int sa_aead_decrypt(struct aead_request *req)
1947 {
1948 return sa_aead_run(req, req->iv, 0);
1949 }
1950
1951 static struct sa_alg_tmpl sa_algs[] = {
1952 {
1953 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1954 .alg.skcipher = {
1955 .base.cra_name = "cbc(aes)",
1956 .base.cra_driver_name = "cbc-aes-sa2ul",
1957 .base.cra_priority = 30000,
1958 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1959 CRYPTO_ALG_KERN_DRIVER_ONLY |
1960 CRYPTO_ALG_ASYNC |
1961 CRYPTO_ALG_NEED_FALLBACK,
1962 .base.cra_blocksize = AES_BLOCK_SIZE,
1963 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1964 .base.cra_module = THIS_MODULE,
1965 .init = sa_cipher_cra_init,
1966 .exit = sa_cipher_cra_exit,
1967 .min_keysize = AES_MIN_KEY_SIZE,
1968 .max_keysize = AES_MAX_KEY_SIZE,
1969 .ivsize = AES_BLOCK_SIZE,
1970 .setkey = sa_aes_cbc_setkey,
1971 .encrypt = sa_encrypt,
1972 .decrypt = sa_decrypt,
1973 }
1974 },
1975 {
1976 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1977 .alg.skcipher = {
1978 .base.cra_name = "ecb(aes)",
1979 .base.cra_driver_name = "ecb-aes-sa2ul",
1980 .base.cra_priority = 30000,
1981 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1982 CRYPTO_ALG_KERN_DRIVER_ONLY |
1983 CRYPTO_ALG_ASYNC |
1984 CRYPTO_ALG_NEED_FALLBACK,
1985 .base.cra_blocksize = AES_BLOCK_SIZE,
1986 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1987 .base.cra_module = THIS_MODULE,
1988 .init = sa_cipher_cra_init,
1989 .exit = sa_cipher_cra_exit,
1990 .min_keysize = AES_MIN_KEY_SIZE,
1991 .max_keysize = AES_MAX_KEY_SIZE,
1992 .setkey = sa_aes_ecb_setkey,
1993 .encrypt = sa_encrypt,
1994 .decrypt = sa_decrypt,
1995 }
1996 },
1997 {
1998 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1999 .alg.skcipher = {
2000 .base.cra_name = "cbc(des3_ede)",
2001 .base.cra_driver_name = "cbc-des3-sa2ul",
2002 .base.cra_priority = 30000,
2003 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2004 CRYPTO_ALG_KERN_DRIVER_ONLY |
2005 CRYPTO_ALG_ASYNC |
2006 CRYPTO_ALG_NEED_FALLBACK,
2007 .base.cra_blocksize = DES_BLOCK_SIZE,
2008 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2009 .base.cra_module = THIS_MODULE,
2010 .init = sa_cipher_cra_init,
2011 .exit = sa_cipher_cra_exit,
2012 .min_keysize = 3 * DES_KEY_SIZE,
2013 .max_keysize = 3 * DES_KEY_SIZE,
2014 .ivsize = DES_BLOCK_SIZE,
2015 .setkey = sa_3des_cbc_setkey,
2016 .encrypt = sa_encrypt,
2017 .decrypt = sa_decrypt,
2018 }
2019 },
2020 {
2021 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2022 .alg.skcipher = {
2023 .base.cra_name = "ecb(des3_ede)",
2024 .base.cra_driver_name = "ecb-des3-sa2ul",
2025 .base.cra_priority = 30000,
2026 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2027 CRYPTO_ALG_KERN_DRIVER_ONLY |
2028 CRYPTO_ALG_ASYNC |
2029 CRYPTO_ALG_NEED_FALLBACK,
2030 .base.cra_blocksize = DES_BLOCK_SIZE,
2031 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2032 .base.cra_module = THIS_MODULE,
2033 .init = sa_cipher_cra_init,
2034 .exit = sa_cipher_cra_exit,
2035 .min_keysize = 3 * DES_KEY_SIZE,
2036 .max_keysize = 3 * DES_KEY_SIZE,
2037 .setkey = sa_3des_ecb_setkey,
2038 .encrypt = sa_encrypt,
2039 .decrypt = sa_decrypt,
2040 }
2041 },
2042 {
2043 .type = CRYPTO_ALG_TYPE_AHASH,
2044 .alg.ahash = {
2045 .halg.base = {
2046 .cra_name = "sha1",
2047 .cra_driver_name = "sha1-sa2ul",
2048 .cra_priority = 400,
2049 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2050 CRYPTO_ALG_ASYNC |
2051 CRYPTO_ALG_KERN_DRIVER_ONLY |
2052 CRYPTO_ALG_NEED_FALLBACK,
2053 .cra_blocksize = SHA1_BLOCK_SIZE,
2054 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2055 .cra_module = THIS_MODULE,
2056 .cra_init = sa_sha1_cra_init,
2057 .cra_exit = sa_sha_cra_exit,
2058 },
2059 .halg.digestsize = SHA1_DIGEST_SIZE,
2060 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2061 sizeof(struct sha1_state),
2062 .init = sa_sha_init,
2063 .update = sa_sha_update,
2064 .final = sa_sha_final,
2065 .finup = sa_sha_finup,
2066 .digest = sa_sha_digest,
2067 .export = sa_sha_export,
2068 .import = sa_sha_import,
2069 },
2070 },
2071 {
2072 .type = CRYPTO_ALG_TYPE_AHASH,
2073 .alg.ahash = {
2074 .halg.base = {
2075 .cra_name = "sha256",
2076 .cra_driver_name = "sha256-sa2ul",
2077 .cra_priority = 400,
2078 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2079 CRYPTO_ALG_ASYNC |
2080 CRYPTO_ALG_KERN_DRIVER_ONLY |
2081 CRYPTO_ALG_NEED_FALLBACK,
2082 .cra_blocksize = SHA256_BLOCK_SIZE,
2083 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2084 .cra_module = THIS_MODULE,
2085 .cra_init = sa_sha256_cra_init,
2086 .cra_exit = sa_sha_cra_exit,
2087 },
2088 .halg.digestsize = SHA256_DIGEST_SIZE,
2089 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2090 sizeof(struct sha256_state),
2091 .init = sa_sha_init,
2092 .update = sa_sha_update,
2093 .final = sa_sha_final,
2094 .finup = sa_sha_finup,
2095 .digest = sa_sha_digest,
2096 .export = sa_sha_export,
2097 .import = sa_sha_import,
2098 },
2099 },
2100 {
2101 .type = CRYPTO_ALG_TYPE_AHASH,
2102 .alg.ahash = {
2103 .halg.base = {
2104 .cra_name = "sha512",
2105 .cra_driver_name = "sha512-sa2ul",
2106 .cra_priority = 400,
2107 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2108 CRYPTO_ALG_ASYNC |
2109 CRYPTO_ALG_KERN_DRIVER_ONLY |
2110 CRYPTO_ALG_NEED_FALLBACK,
2111 .cra_blocksize = SHA512_BLOCK_SIZE,
2112 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2113 .cra_module = THIS_MODULE,
2114 .cra_init = sa_sha512_cra_init,
2115 .cra_exit = sa_sha_cra_exit,
2116 },
2117 .halg.digestsize = SHA512_DIGEST_SIZE,
2118 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2119 sizeof(struct sha512_state),
2120 .init = sa_sha_init,
2121 .update = sa_sha_update,
2122 .final = sa_sha_final,
2123 .finup = sa_sha_finup,
2124 .digest = sa_sha_digest,
2125 .export = sa_sha_export,
2126 .import = sa_sha_import,
2127 },
2128 },
2129 {
2130 .type = CRYPTO_ALG_TYPE_AEAD,
2131 .alg.aead = {
2132 .base = {
2133 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2134 .cra_driver_name =
2135 "authenc(hmac(sha1),cbc(aes))-sa2ul",
2136 .cra_blocksize = AES_BLOCK_SIZE,
2137 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2138 CRYPTO_ALG_KERN_DRIVER_ONLY |
2139 CRYPTO_ALG_ASYNC |
2140 CRYPTO_ALG_NEED_FALLBACK,
2141 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2142 .cra_module = THIS_MODULE,
2143 .cra_priority = 3000,
2144 },
2145 .ivsize = AES_BLOCK_SIZE,
2146 .maxauthsize = SHA1_DIGEST_SIZE,
2147
2148 .init = sa_cra_init_aead_sha1,
2149 .exit = sa_exit_tfm_aead,
2150 .setkey = sa_aead_cbc_sha1_setkey,
2151 .setauthsize = sa_aead_setauthsize,
2152 .encrypt = sa_aead_encrypt,
2153 .decrypt = sa_aead_decrypt,
2154 },
2155 },
2156 {
2157 .type = CRYPTO_ALG_TYPE_AEAD,
2158 .alg.aead = {
2159 .base = {
2160 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2161 .cra_driver_name =
2162 "authenc(hmac(sha256),cbc(aes))-sa2ul",
2163 .cra_blocksize = AES_BLOCK_SIZE,
2164 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2165 CRYPTO_ALG_KERN_DRIVER_ONLY |
2166 CRYPTO_ALG_ASYNC |
2167 CRYPTO_ALG_NEED_FALLBACK,
2168 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2169 .cra_module = THIS_MODULE,
2170 .cra_alignmask = 0,
2171 .cra_priority = 3000,
2172 },
2173 .ivsize = AES_BLOCK_SIZE,
2174 .maxauthsize = SHA256_DIGEST_SIZE,
2175
2176 .init = sa_cra_init_aead_sha256,
2177 .exit = sa_exit_tfm_aead,
2178 .setkey = sa_aead_cbc_sha256_setkey,
2179 .setauthsize = sa_aead_setauthsize,
2180 .encrypt = sa_aead_encrypt,
2181 .decrypt = sa_aead_decrypt,
2182 },
2183 },
2184 };
2185
2186 /* Register the algorithms in crypto framework */
sa_register_algos(const struct device * dev)2187 static void sa_register_algos(const struct device *dev)
2188 {
2189 char *alg_name;
2190 u32 type;
2191 int i, err;
2192
2193 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2194 type = sa_algs[i].type;
2195 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2196 alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2197 err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2198 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2199 alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2200 err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2201 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2202 alg_name = sa_algs[i].alg.aead.base.cra_name;
2203 err = crypto_register_aead(&sa_algs[i].alg.aead);
2204 } else {
2205 dev_err(dev,
2206 "un-supported crypto algorithm (%d)",
2207 sa_algs[i].type);
2208 continue;
2209 }
2210
2211 if (err)
2212 dev_err(dev, "Failed to register '%s'\n", alg_name);
2213 else
2214 sa_algs[i].registered = true;
2215 }
2216 }
2217
2218 /* Unregister the algorithms in crypto framework */
sa_unregister_algos(const struct device * dev)2219 static void sa_unregister_algos(const struct device *dev)
2220 {
2221 u32 type;
2222 int i;
2223
2224 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2225 type = sa_algs[i].type;
2226 if (!sa_algs[i].registered)
2227 continue;
2228 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2229 crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2230 else if (type == CRYPTO_ALG_TYPE_AHASH)
2231 crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2232 else if (type == CRYPTO_ALG_TYPE_AEAD)
2233 crypto_unregister_aead(&sa_algs[i].alg.aead);
2234
2235 sa_algs[i].registered = false;
2236 }
2237 }
2238
sa_init_mem(struct sa_crypto_data * dev_data)2239 static int sa_init_mem(struct sa_crypto_data *dev_data)
2240 {
2241 struct device *dev = &dev_data->pdev->dev;
2242 /* Setup dma pool for security context buffers */
2243 dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2244 SA_CTX_MAX_SZ, 64, 0);
2245 if (!dev_data->sc_pool) {
2246 dev_err(dev, "Failed to create dma pool");
2247 return -ENOMEM;
2248 }
2249
2250 return 0;
2251 }
2252
sa_dma_init(struct sa_crypto_data * dd)2253 static int sa_dma_init(struct sa_crypto_data *dd)
2254 {
2255 int ret;
2256 struct dma_slave_config cfg;
2257
2258 dd->dma_rx1 = NULL;
2259 dd->dma_tx = NULL;
2260 dd->dma_rx2 = NULL;
2261
2262 ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2263 if (ret)
2264 return ret;
2265
2266 dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2267 if (IS_ERR(dd->dma_rx1))
2268 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2269 "Unable to request rx1 DMA channel\n");
2270
2271 dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2272 if (IS_ERR(dd->dma_rx2)) {
2273 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2274 "Unable to request rx2 DMA channel\n");
2275 goto err_dma_rx2;
2276 }
2277
2278 dd->dma_tx = dma_request_chan(dd->dev, "tx");
2279 if (IS_ERR(dd->dma_tx)) {
2280 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2281 "Unable to request tx DMA channel\n");
2282 goto err_dma_tx;
2283 }
2284
2285 memzero_explicit(&cfg, sizeof(cfg));
2286
2287 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2288 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2289 cfg.src_maxburst = 4;
2290 cfg.dst_maxburst = 4;
2291
2292 ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2293 if (ret) {
2294 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2295 ret);
2296 goto err_dma_config;
2297 }
2298
2299 ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2300 if (ret) {
2301 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2302 ret);
2303 goto err_dma_config;
2304 }
2305
2306 ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2307 if (ret) {
2308 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2309 ret);
2310 goto err_dma_config;
2311 }
2312
2313 return 0;
2314
2315 err_dma_config:
2316 dma_release_channel(dd->dma_tx);
2317 err_dma_tx:
2318 dma_release_channel(dd->dma_rx2);
2319 err_dma_rx2:
2320 dma_release_channel(dd->dma_rx1);
2321
2322 return ret;
2323 }
2324
sa_link_child(struct device * dev,void * data)2325 static int sa_link_child(struct device *dev, void *data)
2326 {
2327 struct device *parent = data;
2328
2329 device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2330
2331 return 0;
2332 }
2333
sa_ul_probe(struct platform_device * pdev)2334 static int sa_ul_probe(struct platform_device *pdev)
2335 {
2336 struct device *dev = &pdev->dev;
2337 struct device_node *node = dev->of_node;
2338 struct resource *res;
2339 static void __iomem *saul_base;
2340 struct sa_crypto_data *dev_data;
2341 u32 val;
2342 int ret;
2343
2344 dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2345 if (!dev_data)
2346 return -ENOMEM;
2347
2348 sa_k3_dev = dev;
2349 dev_data->dev = dev;
2350 dev_data->pdev = pdev;
2351 platform_set_drvdata(pdev, dev_data);
2352 dev_set_drvdata(sa_k3_dev, dev_data);
2353
2354 pm_runtime_enable(dev);
2355 ret = pm_runtime_resume_and_get(dev);
2356 if (ret < 0) {
2357 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
2358 ret);
2359 pm_runtime_disable(dev);
2360 return ret;
2361 }
2362
2363 sa_init_mem(dev_data);
2364 ret = sa_dma_init(dev_data);
2365 if (ret)
2366 goto destroy_dma_pool;
2367
2368 spin_lock_init(&dev_data->scid_lock);
2369 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2370 saul_base = devm_ioremap_resource(dev, res);
2371
2372 dev_data->base = saul_base;
2373 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2374 SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2375 SA_EEC_TRNG_EN;
2376
2377 writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2378
2379 sa_register_algos(dev);
2380
2381 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2382 if (ret)
2383 goto release_dma;
2384
2385 device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
2386
2387 return 0;
2388
2389 release_dma:
2390 sa_unregister_algos(&pdev->dev);
2391
2392 dma_release_channel(dev_data->dma_rx2);
2393 dma_release_channel(dev_data->dma_rx1);
2394 dma_release_channel(dev_data->dma_tx);
2395
2396 destroy_dma_pool:
2397 dma_pool_destroy(dev_data->sc_pool);
2398
2399 pm_runtime_put_sync(&pdev->dev);
2400 pm_runtime_disable(&pdev->dev);
2401
2402 return ret;
2403 }
2404
sa_ul_remove(struct platform_device * pdev)2405 static int sa_ul_remove(struct platform_device *pdev)
2406 {
2407 struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2408
2409 sa_unregister_algos(&pdev->dev);
2410
2411 dma_release_channel(dev_data->dma_rx2);
2412 dma_release_channel(dev_data->dma_rx1);
2413 dma_release_channel(dev_data->dma_tx);
2414
2415 dma_pool_destroy(dev_data->sc_pool);
2416
2417 platform_set_drvdata(pdev, NULL);
2418
2419 pm_runtime_put_sync(&pdev->dev);
2420 pm_runtime_disable(&pdev->dev);
2421
2422 return 0;
2423 }
2424
2425 static const struct of_device_id of_match[] = {
2426 {.compatible = "ti,j721e-sa2ul",},
2427 {.compatible = "ti,am654-sa2ul",},
2428 {},
2429 };
2430 MODULE_DEVICE_TABLE(of, of_match);
2431
2432 static struct platform_driver sa_ul_driver = {
2433 .probe = sa_ul_probe,
2434 .remove = sa_ul_remove,
2435 .driver = {
2436 .name = "saul-crypto",
2437 .of_match_table = of_match,
2438 },
2439 };
2440 module_platform_driver(sa_ul_driver);
2441 MODULE_LICENSE("GPL v2");
2442