• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1commit e456233df67552708dc684d24404cbfa6ca75451
2Author: zhaoxc0502 <zhaoxc0502@thundersoft.com>
3Date:   Sat Jul 16 10:34:11 2022 +0800
4
5    0016_linux_drivers_crypto
6
7    Change-Id: I7e862e8b6ebf135f53df0e7dd7d7e0dd299c448e
8
9diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
10index 84ea7cba5..283afcd42 100644
11--- a/drivers/crypto/caam/Kconfig
12+++ b/drivers/crypto/caam/Kconfig
13@@ -8,6 +8,17 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
14 config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
15 	tristate
16
17+config CRYPTO_DEV_FSL_CAAM_KEYBLOB_API_DESC
18+	tristate
19+
20+config CRYPTO_DEV_FSL_CAAM_SECVIO
21+	tristate "CAAM/SNVS Security Violation Handler"
22+	depends on ARCH_MXC
23+	help
24+	  Enables installation of an interrupt handler with registrable
25+          handler functions which can be specified to act on the consequences
26+          of a security violation.
27+
28 config CRYPTO_DEV_FSL_CAAM
29 	tristate "Freescale CAAM-Multicore platform driver backend"
30 	depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
31@@ -109,7 +120,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
32
33 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
34 	bool "Queue Interface as Crypto API backend"
35-	depends on FSL_DPAA && NET
36+	depends on FSL_SDK_DPA && NET
37 	default y
38 	select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
39 	select CRYPTO_AUTHENC
40@@ -151,6 +162,71 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
41 	  Selecting this will register the SEC4 hardware rng to
42 	  the hw_random API for supplying the kernel entropy pool.
43
44+config CRYPTO_DEV_FSL_CAAM_TK_API
45+	bool "Register tagged key cryptography implementations with Crypto API"
46+	default y
47+	select CRYPTO_DEV_FSL_CAAM_CRYPTO_API
48+	select CRYPTO_DEV_FSL_CAAM_KEYBLOB_API_DESC
49+	help
50+	  Selecting this will register algorithms supporting tagged key and
51+	  generate black keys and encapsulate them into black blobs.
52+
53+	  Tagged keys are black keys that contain metadata indicating what
54+	  they are and how to handle them.
55+	  CAAM protects data in a data structure called a Blob, which provides
56+	  both confidentiality and integrity protection.
57+
58+config CRYPTO_DEV_FSL_CAAM_RNG_TEST
59+	bool "Test caam rng"
60+	depends on CRYPTO_DEV_FSL_CAAM_RNG_API
61+	help
62+	  Selecting this will enable a self-test to run for the
63+	  caam RNG. This test is several minutes long and executes
64+	  just before the RNG is registered with the hw_random API.
65+
66+config CRYPTO_DEV_FSL_CAAM_SM
67+	bool "CAAM Secure Memory / Keystore API"
68+	default y
69+	help
70+	  Enables use of a prototype kernel-level Keystore API with CAAM
71+	  Secure Memory for insertion/extraction of bus-protected secrets.
72+
73+config CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE
74+	int "Size of each keystore slot in Secure Memory"
75+	depends on CRYPTO_DEV_FSL_CAAM_SM
76+	range 5 9
77+	default 7
78+	help
79+	  Select size of allocation units to divide Secure Memory pages into
80+	  (the size of a "slot" as referenced inside the API code).
81+	  Established as powers of two.
82+	  Examples:
83+		5 => 32 bytes
84+		6 => 64 bytes
85+		7 => 128 bytes
86+		8 => 256 bytes
87+		9 => 512 bytes
88+
89+config CRYPTO_DEV_FSL_CAAM_SM_TEST
90+	tristate "CAAM Secure Memory - Keystore Test/Example"
91+	depends on CRYPTO_DEV_FSL_CAAM_SM
92+	depends on m
93+	help
94+	  Example thread to exercise the Keystore API and to verify that
95+	  stored and recovered secrets can be used for general purpose
96+	  encryption/decryption.
97+
98+config CRYPTO_DEV_FSL_CAAM_JR_UIO
99+	tristate "Freescale Job Ring UIO support"
100+	depends on UIO
101+	default m
102+	help
103+	  Selecting this will allow job ring UIO support for
104+	  Userspace drivers
105+
106+	  To compile this as a module, choose M here: the module
107+	  will be called fsl_jr_uio.
108+
109 endif # CRYPTO_DEV_FSL_CAAM_JR
110
111 endif # CRYPTO_DEV_FSL_CAAM
112diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
113index 3570286eb..1f7670419 100644
114--- a/drivers/crypto/caam/Makefile
115+++ b/drivers/crypto/caam/Makefile
116@@ -13,14 +13,20 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
117 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
118 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
119 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
120+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_KEYBLOB_API_DESC) += caamkeyblob_desc.o
121+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR_UIO) += fsl_jr_uio.o
122
123 caam-y := ctrl.o
124 caam_jr-y := jr.o key_gen.o
125+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API) += tag_object.o caamkeyblob.o caamkeygen.o
126 caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
127 caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
128 caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
129 caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
130 caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
131+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM) += sm_store.o
132+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST) += sm_test.o
133+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO) += secvio.o
134
135 caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
136 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
137diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
138index 8697ae53b..46dcf0728 100644
139--- a/drivers/crypto/caam/caamalg.c
140+++ b/drivers/crypto/caam/caamalg.c
141@@ -3,7 +3,7 @@
142  * caam - Freescale FSL CAAM support for crypto API
143  *
144  * Copyright 2008-2011 Freescale Semiconductor, Inc.
145- * Copyright 2016-2019 NXP
146+ * Copyright 2016-2020 NXP
147  *
148  * Based on talitos crypto API driver.
149  *
150@@ -60,6 +60,10 @@
151 #include <crypto/xts.h>
152 #include <asm/unaligned.h>
153
154+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API
155+#include "tag_object.h"
156+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API */
157+
158 /*
159  * crypto alg
160  */
161@@ -86,6 +90,7 @@ struct caam_alg_entry {
162 	bool rfc3686;
163 	bool geniv;
164 	bool nodkp;
165+	bool support_tagged_key;
166 };
167
168 struct caam_aead_alg {
169@@ -738,12 +743,19 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
170 	u32 *desc;
171 	const bool is_rfc3686 = alg->caam.rfc3686;
172
173-	print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
174-			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
175+	/*
176+	 * If the algorithm has support for tagged key,
177+	 * this is already set in tk_skcipher_setkey().
178+	 * Otherwise, set here the algorithm details.
179+	 */
180+	if (!alg->caam.support_tagged_key) {
181+		ctx->cdata.keylen = keylen;
182+		ctx->cdata.key_virt = key;
183+		ctx->cdata.key_inline = true;
184+	}
185
186-	ctx->cdata.keylen = keylen;
187-	ctx->cdata.key_virt = key;
188-	ctx->cdata.key_inline = true;
189+	print_hex_dump_debug("key in @" __stringify(__LINE__) ": ",
190+			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
191
192 	/* skcipher_encrypt shared descriptor */
193 	desc = ctx->sh_desc_enc;
194@@ -815,6 +827,63 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
195 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
196 }
197
198+
199+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API
200+static int tk_skcipher_setkey(struct crypto_skcipher *skcipher,
201+			      const u8 *key, unsigned int keylen)
202+{
203+	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
204+	struct device *jrdev = ctx->jrdev;
205+	struct header_conf *header;
206+	struct tagged_object *tag_obj;
207+	int ret;
208+
209+	ctx->cdata.key_inline = true;
210+
211+	/* Check if one can retrieve the tag object header configuration */
212+	if (keylen <= TAG_OVERHEAD_SIZE)
213+		return -EINVAL;
214+
215+	/* Retrieve the tag object */
216+	tag_obj = (struct tagged_object *)key;
217+
218+	/*
219+	 * Check tag object header configuration
220+	 * and retrieve the tag object header configuration
221+	 */
222+	if (is_valid_header_conf(&tag_obj->header)) {
223+		header = &tag_obj->header;
224+	} else {
225+		dev_err(jrdev,
226+			"unable to get tag object header configuration\n");
227+		return -EINVAL;
228+	}
229+
230+	/* Check if the tag object header is a black key */
231+	if (!is_black_key(header)) {
232+		dev_err(jrdev,
233+			"tagged key provided is not a black key\n");
234+		return -EINVAL;
235+	}
236+
237+	/* Retrieve the black key configuration */
238+	get_key_conf(header,
239+		     &ctx->cdata.key_real_len,
240+		     &ctx->cdata.keylen,
241+		     &ctx->cdata.key_cmd_opt);
242+
243+	/* Retrieve the address of the data of the tagged object */
244+	ctx->cdata.key_virt = &tag_obj->object;
245+
246+	/* Validate key length for AES algorithms */
247+	ret = aes_check_keylen(ctx->cdata.key_real_len);
248+	if (ret)
249+		return ret;
250+
251+	return skcipher_setkey(skcipher, NULL, 0, 0);
252+}
253+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API */
254+
255 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
256 			       const u8 *key, unsigned int keylen)
257 {
258@@ -1874,6 +1943,25 @@ static struct caam_skcipher_alg driver_algs[] = {
259 		},
260 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
261 	},
262+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API
263+	{
264+		.skcipher = {
265+			.base = {
266+				.cra_name = "tk(cbc(aes))",
267+				.cra_driver_name = "tk-cbc-aes-caam",
268+				.cra_blocksize = AES_BLOCK_SIZE,
269+			},
270+			.setkey = tk_skcipher_setkey,
271+			.encrypt = skcipher_encrypt,
272+			.decrypt = skcipher_decrypt,
273+			.min_keysize = TAG_MIN_SIZE,
274+			.max_keysize = CAAM_MAX_KEY_SIZE,
275+			.ivsize = AES_BLOCK_SIZE,
276+		},
277+		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
278+		.caam.support_tagged_key = true,
279+	},
280+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API */
281 	{
282 		.skcipher = {
283 			.base = {
284@@ -1994,6 +2082,24 @@ static struct caam_skcipher_alg driver_algs[] = {
285 		},
286 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
287 	},
288+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API
289+	{
290+		.skcipher = {
291+			.base = {
292+				.cra_name = "tk(ecb(aes))",
293+				.cra_driver_name = "tk-ecb-aes-caam",
294+				.cra_blocksize = AES_BLOCK_SIZE,
295+			},
296+			.setkey = tk_skcipher_setkey,
297+			.encrypt = skcipher_encrypt,
298+			.decrypt = skcipher_decrypt,
299+			.min_keysize = TAG_MIN_SIZE,
300+			.max_keysize = CAAM_MAX_KEY_SIZE,
301+		},
302+		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
303+		.caam.support_tagged_key = true,
304+	},
305+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API */
306 	{
307 		.skcipher = {
308 			.base = {
309@@ -3520,13 +3626,14 @@ int caam_algapi_init(struct device *ctrldev)
310 	 * First, detect presence and attributes of DES, AES, and MD blocks.
311 	 */
312 	if (priv->era < 10) {
313+		struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
314 		u32 cha_vid, cha_inst, aes_rn;
315
316-		cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
317+		cha_vid = rd_reg32(&perfmon->cha_id_ls);
318 		aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
319 		md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
320
321-		cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
322+		cha_inst = rd_reg32(&perfmon->cha_num_ls);
323 		des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
324 			   CHA_ID_LS_DES_SHIFT;
325 		aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
326@@ -3534,23 +3641,23 @@ int caam_algapi_init(struct device *ctrldev)
327 		ccha_inst = 0;
328 		ptha_inst = 0;
329
330-		aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
331-			 CHA_ID_LS_AES_MASK;
332+		aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK;
333 		gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
334 	} else {
335+		struct version_regs __iomem *vreg = &priv->jr[0]->vreg;
336 		u32 aesa, mdha;
337
338-		aesa = rd_reg32(&priv->ctrl->vreg.aesa);
339-		mdha = rd_reg32(&priv->ctrl->vreg.mdha);
340+		aesa = rd_reg32(&vreg->aesa);
341+		mdha = rd_reg32(&vreg->mdha);
342
343 		aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
344 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
345
346-		des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
347+		des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK;
348 		aes_inst = aesa & CHA_VER_NUM_MASK;
349 		md_inst = mdha & CHA_VER_NUM_MASK;
350-		ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
351-		ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
352+		ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK;
353+		ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK;
354
355 		gcm_support = aesa & CHA_VER_MISC_AES_GCM;
356 	}
357diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
358index 7571e1ac9..fa1309e90 100644
359--- a/drivers/crypto/caam/caamalg_desc.c
360+++ b/drivers/crypto/caam/caamalg_desc.c
361@@ -622,6 +622,420 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
362 }
363 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
364
365+/**
366+ * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
367+ * @desc: pointer to buffer used for descriptor construction
368+ * @cdata: pointer to block cipher transform definitions
369+ *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
370+ *         with OP_ALG_AAI_CBC
371+ * @adata: pointer to authentication transform definitions.
372+ *         A split key is required for SEC Era < 6; the size of the split key
373+ *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
374+ *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
375+ * @assoclen: associated data length
376+ * @ivsize: initialization vector size
377+ * @authsize: authentication data size
378+ * @blocksize: block cipher size
379+ * @era: SEC Era
380+ */
381+void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
382+			   struct alginfo *adata, unsigned int assoclen,
383+			   unsigned int ivsize, unsigned int authsize,
384+			   unsigned int blocksize, int era)
385+{
386+	u32 *key_jump_cmd, *zero_payload_jump_cmd;
387+	u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
388+
389+	/*
390+	 * Compute the index (in bytes) for the LOAD with destination of
391+	 * Class 1 Data Size Register and for the LOAD that generates padding
392+	 */
393+	if (adata->key_inline) {
394+		idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
395+				cdata->keylen - 4 * CAAM_CMD_SZ;
396+		idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
397+			     cdata->keylen - 2 * CAAM_CMD_SZ;
398+	} else {
399+		idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
400+				4 * CAAM_CMD_SZ;
401+		idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
402+			     2 * CAAM_CMD_SZ;
403+	}
404+
405+	stidx = 1 << HDR_START_IDX_SHIFT;
406+	init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
407+
408+	/* skip key loading if they are loaded due to sharing */
409+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
410+				   JUMP_COND_SHRD);
411+
412+	if (era < 6) {
413+		if (adata->key_inline)
414+			append_key_as_imm(desc, adata->key_virt,
415+					  adata->keylen_pad, adata->keylen,
416+					  CLASS_2 | KEY_DEST_MDHA_SPLIT |
417+					  KEY_ENC);
418+		else
419+			append_key(desc, adata->key_dma, adata->keylen,
420+				   CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
421+	} else {
422+		append_proto_dkp(desc, adata);
423+	}
424+
425+	if (cdata->key_inline)
426+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
427+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
428+	else
429+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
430+			   KEY_DEST_CLASS_REG);
431+
432+	set_jump_tgt_here(desc, key_jump_cmd);
433+
434+	/* class 2 operation */
435+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
436+			 OP_ALG_ENCRYPT);
437+	/* class 1 operation */
438+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
439+			 OP_ALG_ENCRYPT);
440+
441+	/* payloadlen = input data length - (assoclen + ivlen) */
442+	append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
443+
444+	/* math1 = payloadlen + icvlen */
445+	append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
446+
447+	/* padlen = block_size - math1 % block_size */
448+	append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
449+	append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
450+
451+	/* cryptlen = payloadlen + icvlen + padlen */
452+	append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
453+
454+	/*
455+	 * update immediate data with the padding length value
456+	 * for the LOAD in the class 1 data size register.
457+	 */
458+	append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
459+			(idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
460+	append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
461+			(idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
462+
463+	/* overwrite PL field for the padding iNFO FIFO entry  */
464+	append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
465+			(idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
466+	append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
467+			(idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
468+
469+	/* store encrypted payload, icv and padding */
470+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
471+
472+	/* if payload length is zero, jump to zero-payload commands */
473+	append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
474+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
475+					    JUMP_COND_MATH_Z);
476+
477+	/* load iv in context1 */
478+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
479+		   LDST_CLASS_1_CCB | ivsize);
480+
481+	/* read assoc for authentication */
482+	append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
483+			     FIFOLD_TYPE_MSG);
484+	/* insnoop payload */
485+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
486+			     FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
487+
488+	/* jump the zero-payload commands */
489+	append_jump(desc, JUMP_TEST_ALL | 3);
490+
491+	/* zero-payload commands */
492+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
493+
494+	/* load iv in context1 */
495+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
496+		   LDST_CLASS_1_CCB | ivsize);
497+
498+	/* assoc data is the only data for authentication */
499+	append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
500+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
501+
502+	/* send icv to encryption */
503+	append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
504+		    authsize);
505+
506+	/* update class 1 data size register with padding length */
507+	append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
508+			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
509+
510+	/* generate padding and send it to encryption */
511+	genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
512+	      NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
513+	append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
514+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
515+
516+#ifdef DEBUG
517+	print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
518+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
519+		       desc_bytes(desc), 1);
520+#endif
521+}
522+EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
523+
524+/**
525+ * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
526+ * @desc: pointer to buffer used for descriptor construction
527+ * @cdata: pointer to block cipher transform definitions
528+ *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
529+ *         with OP_ALG_AAI_CBC
530+ * @adata: pointer to authentication transform definitions.
531+ *         A split key is required for SEC Era < 6; the size of the split key
532+ *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
533+ *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
534+ * @assoclen: associated data length
535+ * @ivsize: initialization vector size
536+ * @authsize: authentication data size
537+ * @blocksize: block cipher size
538+ * @era: SEC Era
539+ */
540+void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
541+			   struct alginfo *adata, unsigned int assoclen,
542+			   unsigned int ivsize, unsigned int authsize,
543+			   unsigned int blocksize, int era)
544+{
545+	u32 stidx, jumpback;
546+	u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
547+	/*
548+	 * Pointer Size bool determines the size of address pointers.
549+	 * false - Pointers fit in one 32-bit word.
550+	 * true - Pointers fit in two 32-bit words.
551+	 */
552+	bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
553+
554+	stidx = 1 << HDR_START_IDX_SHIFT;
555+	init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
556+
557+	/* skip key loading if they are loaded due to sharing */
558+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
559+				   JUMP_COND_SHRD);
560+
561+	if (era < 6)
562+		append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
563+			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
564+	else
565+		append_proto_dkp(desc, adata);
566+
567+	append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
568+		   KEY_DEST_CLASS_REG);
569+
570+	set_jump_tgt_here(desc, key_jump_cmd);
571+
572+	/* class 2 operation */
573+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
574+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
575+	/* class 1 operation */
576+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
577+			 OP_ALG_DECRYPT);
578+
579+	/* VSIL = input data length - 2 * block_size */
580+	append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
581+				blocksize);
582+
583+	/*
584+	 * payloadlen + icvlen + padlen = input data length - (assoclen +
585+	 * ivsize)
586+	 */
587+	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
588+
589+	/* skip data to the last but one cipher block */
590+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
591+
592+	/* load iv for the last cipher block */
593+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
594+		   LDST_CLASS_1_CCB | ivsize);
595+
596+	/* read last cipher block */
597+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
598+			     FIFOLD_TYPE_LAST1 | blocksize);
599+
600+	/* move decrypted block into math0 and math1 */
601+	append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
602+		    blocksize);
603+
604+	/* reset AES CHA */
605+	append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
606+			    LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
607+
608+	/* rewind input sequence */
609+	append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
610+
611+	/* key1 is in decryption form */
612+	append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
613+			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
614+
615+	/* load iv in context1 */
616+	append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
617+		   LDST_SRCDST_WORD_CLASS_CTX | ivsize);
618+
619+	/* read sequence number */
620+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
621+	/* load Type, Version and Len fields in math0 */
622+	append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
623+		   LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
624+
625+	/* compute (padlen - 1) */
626+	append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
627+
628+	/* math2 = icvlen + (padlen - 1) + 1 */
629+	append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
630+
631+	append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
632+
633+	/* VSOL = payloadlen + icvlen + padlen */
634+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
635+
636+	if (caam_little_end)
637+		append_moveb(desc, MOVE_WAITCOMP |
638+			     MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
639+
640+	/* update Len field */
641+	append_math_sub(desc, REG0, REG0, REG2, 8);
642+
643+	/* store decrypted payload, icv and padding */
644+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
645+
646+	/* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
647+	append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
648+
649+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
650+					    JUMP_COND_MATH_Z);
651+
652+	/* send Type, Version and Len(pre ICV) fields to authentication */
653+	append_move(desc, MOVE_WAITCOMP |
654+		    MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
655+		    (3 << MOVE_OFFSET_SHIFT) | 5);
656+
657+	/* outsnooping payload */
658+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
659+			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
660+			     FIFOLDST_VLF);
661+	skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
662+
663+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
664+	/* send Type, Version and Len(pre ICV) fields to authentication */
665+	append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
666+		    MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
667+		    (3 << MOVE_OFFSET_SHIFT) | 5);
668+
669+	set_jump_tgt_here(desc, skip_zero_jump_cmd);
670+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
671+
672+	/* load icvlen and padlen */
673+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
674+			     FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
675+
676+	/* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
677+	append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
678+
679+	/*
680+	 * Start a new input sequence using the SEQ OUT PTR command options,
681+	 * pointer and length used when the current output sequence was defined.
682+	 */
683+	if (ps) {
684+		/*
685+		 * Move the lower 32 bits of Shared Descriptor address, the
686+		 * SEQ OUT PTR command, Output Pointer (2 words) and
687+		 * Output Length into math registers.
688+		 */
689+		if (caam_little_end)
690+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
691+				    MOVE_DEST_MATH0 |
692+				    (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
693+		else
694+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
695+				    MOVE_DEST_MATH0 |
696+				    (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
697+
698+		/* Transform SEQ OUT PTR command in SEQ IN PTR command */
699+		append_math_and_imm_u32(desc, REG0, REG0, IMM,
700+					~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
701+		/* Append a JUMP command after the copied fields */
702+		jumpback = CMD_JUMP | (char)-9;
703+		append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
704+				    LDST_SRCDST_WORD_DECO_MATH2 |
705+				    (4 << LDST_OFFSET_SHIFT));
706+		append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
707+		/* Move the updated fields back to the Job Descriptor */
708+		if (caam_little_end)
709+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
710+				    MOVE_DEST_DESCBUF |
711+				    (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
712+		else
713+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
714+				    MOVE_DEST_DESCBUF |
715+				    (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
716+
717+		/*
718+		 * Read the new SEQ IN PTR command, Input Pointer, Input Length
719+		 * and then jump back to the next command from the
720+		 * Shared Descriptor.
721+		 */
722+		append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
723+	} else {
724+		/*
725+		 * Move the SEQ OUT PTR command, Output Pointer (1 word) and
726+		 * Output Length into math registers.
727+		 */
728+		if (caam_little_end)
729+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
730+				    MOVE_DEST_MATH0 |
731+				    (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
732+		else
733+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
734+				    MOVE_DEST_MATH0 |
735+				    (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
736+
737+		/* Transform SEQ OUT PTR command in SEQ IN PTR command */
738+		append_math_and_imm_u64(desc, REG0, REG0, IMM,
739+					~(((u64)(CMD_SEQ_IN_PTR ^
740+						 CMD_SEQ_OUT_PTR)) << 32));
741+		/* Append a JUMP command after the copied fields */
742+		jumpback = CMD_JUMP | (char)-7;
743+		append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
744+				    LDST_SRCDST_WORD_DECO_MATH1 |
745+				    (4 << LDST_OFFSET_SHIFT));
746+		append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
747+		/* Move the updated fields back to the Job Descriptor */
748+		if (caam_little_end)
749+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
750+				    MOVE_DEST_DESCBUF |
751+				    (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
752+		else
753+			append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
754+				    MOVE_DEST_DESCBUF |
755+				    (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
756+
757+		/*
758+		 * Read the new SEQ IN PTR command, Input Pointer, Input Length
759+		 * and then jump back to the next command from the
760+		 * Shared Descriptor.
761+		 */
762+		 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
763+	}
764+
765+	/* skip payload */
766+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
767+	/* check icv */
768+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
769+			     FIFOLD_TYPE_LAST2 | authsize);
770+
771+#ifdef DEBUG
772+	print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
773+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
774+		       desc_bytes(desc), 1);
775+#endif
776+}
777+EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
778+
779 /**
780  * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
781  * @desc: pointer to buffer used for descriptor construction
782@@ -1390,8 +1804,18 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
783 				   JUMP_COND_SHRD);
784
785 	/* Load class1 key only */
786-	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
787-			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
788+	if (IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API) &&
789+	    cdata->key_cmd_opt)
790+		/*
791+		 * Black keys can be loaded using only a KEY command
792+		 * with ENC=1 and the proper setting of the EKT bit.
793+		 */
794+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
795+				  cdata->key_real_len, CLASS_1 |
796+				  KEY_DEST_CLASS_REG | cdata->key_cmd_opt);
797+	else
798+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
799+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
800
801 	/* Load nonce into CONTEXT1 reg */
802 	if (is_rfc3686) {
803@@ -1465,8 +1889,18 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
804 				   JUMP_COND_SHRD);
805
806 	/* Load class1 key only */
807-	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
808-			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
809+	if (IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API) &&
810+	    cdata->key_cmd_opt)
811+		/*
812+		 * Black keys can be loaded using only a KEY command
813+		 * with ENC=1 and the proper setting of the EKT bit.
814+		 */
815+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
816+				  cdata->key_real_len, CLASS_1 |
817+				  KEY_DEST_CLASS_REG | cdata->key_cmd_opt);
818+	else
819+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
820+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
821
822 	/* Load nonce into CONTEXT1 reg */
823 	if (is_rfc3686) {
824diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
825index f2893393b..99f0d1471 100644
826--- a/drivers/crypto/caam/caamalg_desc.h
827+++ b/drivers/crypto/caam/caamalg_desc.h
828@@ -17,6 +17,9 @@
829 #define DESC_QI_AEAD_DEC_LEN		(DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
830 #define DESC_QI_AEAD_GIVENC_LEN		(DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
831
832+#define DESC_TLS_BASE			(4 * CAAM_CMD_SZ)
833+#define DESC_TLS10_ENC_LEN		(DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
834+
835 /* Note: Nonce is counted in cdata.keylen */
836 #define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
837
838@@ -72,6 +75,16 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
839 			       u32 *nonce, const u32 ctx1_iv_off,
840 			       const bool is_qi, int era);
841
842+void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
843+			   struct alginfo *adata, unsigned int assoclen,
844+			   unsigned int ivsize, unsigned int authsize,
845+			   unsigned int blocksize, int era);
846+
847+void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
848+			   struct alginfo *adata, unsigned int assoclen,
849+			   unsigned int ivsize, unsigned int authsize,
850+			   unsigned int blocksize, int era);
851+
852 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
853 			   unsigned int ivsize, unsigned int icvsize,
854 			   const bool is_qi);
855diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
856index a24ae966d..81a8ee300 100644
857--- a/drivers/crypto/caam/caamalg_qi.c
858+++ b/drivers/crypto/caam/caamalg_qi.c
859@@ -297,6 +297,166 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
860 	return err;
861 }
862
863+static int tls_set_sh_desc(struct crypto_aead *tls)
864+{
865+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
866+	unsigned int ivsize = crypto_aead_ivsize(tls);
867+	unsigned int blocksize = crypto_aead_blocksize(tls);
868+	unsigned int assoclen = 13; /* always 13 bytes for TLS */
869+	unsigned int data_len[2];
870+	u32 inl_mask;
871+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
872+
873+	if (!ctx->cdata.keylen || !ctx->authsize)
874+		return 0;
875+
876+	/*
877+	 * TLS 1.0 encrypt shared descriptor
878+	 * Job Descriptor and Shared Descriptor
879+	 * must fit into the 64-word Descriptor h/w Buffer
880+	 */
881+	data_len[0] = ctx->adata.keylen_pad;
882+	data_len[1] = ctx->cdata.keylen;
883+
884+	if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
885+			      &inl_mask, ARRAY_SIZE(data_len)) < 0)
886+		return -EINVAL;
887+
888+	if (inl_mask & 1)
889+		ctx->adata.key_virt = ctx->key;
890+	else
891+		ctx->adata.key_dma = ctx->key_dma;
892+
893+	if (inl_mask & 2)
894+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
895+	else
896+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
897+
898+	ctx->adata.key_inline = !!(inl_mask & 1);
899+	ctx->cdata.key_inline = !!(inl_mask & 2);
900+
901+	cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
902+			      assoclen, ivsize, ctx->authsize, blocksize,
903+			      ctrlpriv->era);
904+
905+	/*
906+	 * TLS 1.0 decrypt shared descriptor
907+	 * Keys do not fit inline, regardless of algorithms used
908+	 */
909+	ctx->adata.key_inline = false;
910+	ctx->adata.key_dma = ctx->key_dma;
911+	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
912+
913+	cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
914+			      assoclen, ivsize, ctx->authsize, blocksize,
915+			      ctrlpriv->era);
916+
917+	return 0;
918+}
919+
920+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
921+{
922+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
923+
924+	ctx->authsize = authsize;
925+	tls_set_sh_desc(tls);
926+
927+	return 0;
928+}
929+
930+static int tls_setkey(struct crypto_aead *tls, const u8 *key,
931+		      unsigned int keylen)
932+{
933+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
934+	struct device *jrdev = ctx->jrdev;
935+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
936+	struct crypto_authenc_keys keys;
937+	int ret = 0;
938+
939+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
940+		goto badkey;
941+
942+#ifdef DEBUG
943+	dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
944+		keys.authkeylen + keys.enckeylen, keys.enckeylen,
945+		keys.authkeylen);
946+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
947+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
948+#endif
949+
950+	/*
951+	 * If DKP is supported, use it in the shared descriptor to generate
952+	 * the split key.
953+	 */
954+	if (ctrlpriv->era >= 6) {
955+		ctx->adata.keylen = keys.authkeylen;
956+		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
957+						      OP_ALG_ALGSEL_MASK);
958+
959+		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
960+			goto badkey;
961+
962+		memcpy(ctx->key, keys.authkey, keys.authkeylen);
963+		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
964+		       keys.enckeylen);
965+		dma_sync_single_for_device(jrdev, ctx->key_dma,
966+					   ctx->adata.keylen_pad +
967+					   keys.enckeylen, ctx->dir);
968+		goto skip_split_key;
969+	}
970+
971+	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
972+			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
973+			    keys.enckeylen);
974+	if (ret)
975+		goto badkey;
976+
977+	/* postpend encryption key to auth split key */
978+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
979+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
980+				   keys.enckeylen, ctx->dir);
981+
982+#ifdef DEBUG
983+	dev_err(jrdev, "split keylen %d split keylen padded %d\n",
984+		ctx->adata.keylen, ctx->adata.keylen_pad);
985+	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
986+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
987+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
988+#endif
989+
990+skip_split_key:
991+	ctx->cdata.keylen = keys.enckeylen;
992+
993+	ret = tls_set_sh_desc(tls);
994+	if (ret)
995+		goto badkey;
996+
997+	/* Now update the driver contexts with the new shared descriptor */
998+	if (ctx->drv_ctx[ENCRYPT]) {
999+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
1000+					  ctx->sh_desc_enc);
1001+		if (ret) {
1002+			dev_err(jrdev, "driver enc context update failed\n");
1003+			goto badkey;
1004+		}
1005+	}
1006+
1007+	if (ctx->drv_ctx[DECRYPT]) {
1008+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
1009+					  ctx->sh_desc_dec);
1010+		if (ret) {
1011+			dev_err(jrdev, "driver dec context update failed\n");
1012+			goto badkey;
1013+		}
1014+	}
1015+
1016+	memzero_explicit(&keys, sizeof(keys));
1017+	return ret;
1018+badkey:
1019+	memzero_explicit(&keys, sizeof(keys));
1020+	return -EINVAL;
1021+}
1022+
1023 static int gcm_set_sh_desc(struct crypto_aead *aead)
1024 {
1025 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1026@@ -806,6 +966,29 @@ struct aead_edesc {
1027 	struct qm_sg_entry sgt[];
1028 };
1029
1030+/*
1031+ * tls_edesc - s/w-extended tls descriptor
1032+ * @src_nents: number of segments in input scatterlist
1033+ * @dst_nents: number of segments in output scatterlist
1034+ * @iv_dma: dma address of iv for checking continuity and link table
1035+ * @qm_sg_bytes: length of dma mapped h/w link table
1036+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
1037+ * @qm_sg_dma: bus physical mapped address of h/w link table
1038+ * @drv_req: driver-specific request structure
1039+ * @sgt: the h/w link table, followed by IV
1040+ */
1041+struct tls_edesc {
1042+	int src_nents;
1043+	int dst_nents;
1044+	dma_addr_t iv_dma;
1045+	int qm_sg_bytes;
1046+	dma_addr_t qm_sg_dma;
1047+	struct scatterlist tmp[2];
1048+	struct scatterlist *dst;
1049+	struct caam_drv_req drv_req;
1050+	struct qm_sg_entry sgt[0];
1051+};
1052+
1053 /*
1054  * skcipher_edesc - s/w-extended skcipher descriptor
1055  * @src_nents: number of segments in input scatterlist
1056@@ -898,6 +1081,18 @@ static void aead_unmap(struct device *dev,
1057 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1058 }
1059
1060+static void tls_unmap(struct device *dev,
1061+		      struct tls_edesc *edesc,
1062+		      struct aead_request *req)
1063+{
1064+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1065+	int ivsize = crypto_aead_ivsize(aead);
1066+
1067+	caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
1068+		   edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE,
1069+		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
1070+}
1071+
1072 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1073 			   struct skcipher_request *req)
1074 {
1075@@ -1190,6 +1385,238 @@ static int aead_decrypt(struct aead_request *req)
1076 	return aead_crypt(req, false);
1077 }
1078
1079+static void tls_done(struct caam_drv_req *drv_req, u32 status)
1080+{
1081+	struct device *qidev;
1082+	struct tls_edesc *edesc;
1083+	struct aead_request *aead_req = drv_req->app_ctx;
1084+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
1085+	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
1086+	int ecode = 0;
1087+
1088+	qidev = caam_ctx->qidev;
1089+
1090+	if (unlikely(status))
1091+		ecode = caam_jr_strstatus(qidev, status);
1092+
1093+	edesc = container_of(drv_req, typeof(*edesc), drv_req);
1094+	tls_unmap(qidev, edesc, aead_req);
1095+
1096+	aead_request_complete(aead_req, ecode);
1097+	qi_cache_free(edesc);
1098+}
1099+
1100+/*
1101+ * allocate and map the tls extended descriptor
1102+ */
1103+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
1104+{
1105+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1106+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1107+	unsigned int blocksize = crypto_aead_blocksize(aead);
1108+	unsigned int padsize, authsize;
1109+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1110+						 typeof(*alg), aead);
1111+	struct device *qidev = ctx->qidev;
1112+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1113+		      GFP_KERNEL : GFP_ATOMIC;
1114+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1115+	struct tls_edesc *edesc;
1116+	dma_addr_t qm_sg_dma, iv_dma = 0;
1117+	int ivsize = crypto_aead_ivsize(aead);
1118+	u8 *iv;
1119+	int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
1120+	int src_len, dst_len, data_len;
1121+	struct qm_sg_entry *sg_table, *fd_sgt;
1122+	struct caam_drv_ctx *drv_ctx;
1123+	struct scatterlist *dst;
1124+
1125+	if (encrypt) {
1126+		padsize = blocksize - ((req->cryptlen + ctx->authsize) %
1127+					blocksize);
1128+		authsize = ctx->authsize + padsize;
1129+	} else {
1130+		authsize = ctx->authsize;
1131+	}
1132+
1133+	drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
1134+	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1135+		return (struct tls_edesc *)drv_ctx;
1136+
1137+	/* allocate space for base edesc, link tables and IV */
1138+	edesc = qi_cache_alloc(GFP_DMA | flags);
1139+	if (unlikely(!edesc)) {
1140+		dev_err(qidev, "could not allocate extended descriptor\n");
1141+		return ERR_PTR(-ENOMEM);
1142+	}
1143+
1144+	data_len = req->assoclen + req->cryptlen;
1145+	dst_len = req->cryptlen + (encrypt ? authsize : 0);
1146+
1147+	if (likely(req->src == req->dst)) {
1148+		src_len = req->assoclen + dst_len;
1149+
1150+		src_nents = sg_nents_for_len(req->src, src_len);
1151+		if (unlikely(src_nents < 0)) {
1152+			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1153+				src_len);
1154+			qi_cache_free(edesc);
1155+			return ERR_PTR(src_nents);
1156+		}
1157+
1158+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1159+					      DMA_BIDIRECTIONAL);
1160+		if (unlikely(!mapped_src_nents)) {
1161+			dev_err(qidev, "unable to map source\n");
1162+			qi_cache_free(edesc);
1163+			return ERR_PTR(-ENOMEM);
1164+		}
1165+		dst = req->dst;
1166+	} else {
1167+		src_len = data_len;
1168+
1169+		src_nents = sg_nents_for_len(req->src, src_len);
1170+		if (unlikely(src_nents < 0)) {
1171+			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1172+				src_len);
1173+			qi_cache_free(edesc);
1174+			return ERR_PTR(src_nents);
1175+		}
1176+
1177+		dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
1178+
1179+		dst_nents = sg_nents_for_len(dst, dst_len);
1180+		if (unlikely(dst_nents < 0)) {
1181+			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1182+				dst_len);
1183+			qi_cache_free(edesc);
1184+			return ERR_PTR(dst_nents);
1185+		}
1186+
1187+		if (src_nents) {
1188+			mapped_src_nents = dma_map_sg(qidev, req->src,
1189+						      src_nents, DMA_TO_DEVICE);
1190+			if (unlikely(!mapped_src_nents)) {
1191+				dev_err(qidev, "unable to map source\n");
1192+				qi_cache_free(edesc);
1193+				return ERR_PTR(-ENOMEM);
1194+			}
1195+		} else {
1196+			mapped_src_nents = 0;
1197+		}
1198+
1199+		mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
1200+					      DMA_FROM_DEVICE);
1201+		if (unlikely(!mapped_dst_nents)) {
1202+			dev_err(qidev, "unable to map destination\n");
1203+			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1204+			qi_cache_free(edesc);
1205+			return ERR_PTR(-ENOMEM);
1206+		}
1207+	}
1208+
1209+	/*
1210+	 * Create S/G table: IV, src, dst.
1211+	 * Input is not contiguous.
1212+	 */
1213+	qm_sg_ents = 1 + mapped_src_nents +
1214+		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1215+	sg_table = &edesc->sgt[0];
1216+	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1217+
1218+	iv = (u8 *)(sg_table + qm_sg_ents);
1219+	/* Make sure IV is located in a DMAable area */
1220+	memcpy(iv, req->iv, ivsize);
1221+	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1222+	if (dma_mapping_error(qidev, iv_dma)) {
1223+		dev_err(qidev, "unable to map IV\n");
1224+		caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
1225+			   DMA_NONE, 0, 0);
1226+		qi_cache_free(edesc);
1227+		return ERR_PTR(-ENOMEM);
1228+	}
1229+
1230+	edesc->src_nents = src_nents;
1231+	edesc->dst_nents = dst_nents;
1232+	edesc->dst = dst;
1233+	edesc->iv_dma = iv_dma;
1234+	edesc->drv_req.app_ctx = req;
1235+	edesc->drv_req.cbk = tls_done;
1236+	edesc->drv_req.drv_ctx = drv_ctx;
1237+
1238+	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1239+	qm_sg_index = 1;
1240+
1241+	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
1242+	qm_sg_index += mapped_src_nents;
1243+
1244+	if (mapped_dst_nents > 1)
1245+		sg_to_qm_sg_last(dst, dst_len, sg_table + qm_sg_index, 0);
1246+
1247+	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1248+	if (dma_mapping_error(qidev, qm_sg_dma)) {
1249+		dev_err(qidev, "unable to map S/G table\n");
1250+		caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
1251+			   ivsize, DMA_TO_DEVICE, 0, 0);
1252+		qi_cache_free(edesc);
1253+		return ERR_PTR(-ENOMEM);
1254+	}
1255+
1256+	edesc->qm_sg_dma = qm_sg_dma;
1257+	edesc->qm_sg_bytes = qm_sg_bytes;
1258+
1259+	fd_sgt = &edesc->drv_req.fd_sgt[0];
1260+
1261+	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, ivsize + data_len, 0);
1262+
1263+	if (req->dst == req->src)
1264+		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1265+				    (sg_nents_for_len(req->src, req->assoclen) +
1266+				     1) * sizeof(*sg_table), dst_len, 0);
1267+	else if (mapped_dst_nents == 1)
1268+		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), dst_len, 0);
1269+	else
1270+		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1271+				     qm_sg_index, dst_len, 0);
1272+
1273+	return edesc;
1274+}
1275+
1276+static int tls_crypt(struct aead_request *req, bool encrypt)
1277+{
1278+	struct tls_edesc *edesc;
1279+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1280+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1281+	int ret;
1282+
1283+	if (unlikely(caam_congested))
1284+		return -EAGAIN;
1285+
1286+	edesc = tls_edesc_alloc(req, encrypt);
1287+	if (IS_ERR_OR_NULL(edesc))
1288+		return PTR_ERR(edesc);
1289+
1290+	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1291+	if (!ret) {
1292+		ret = -EINPROGRESS;
1293+	} else {
1294+		tls_unmap(ctx->qidev, edesc, req);
1295+		qi_cache_free(edesc);
1296+	}
1297+
1298+	return ret;
1299+}
1300+
1301+static int tls_encrypt(struct aead_request *req)
1302+{
1303+	return tls_crypt(req, true);
1304+}
1305+
1306+static int tls_decrypt(struct aead_request *req)
1307+{
1308+	return tls_crypt(req, false);
1309+}
1310+
1311 static int ipsec_gcm_encrypt(struct aead_request *req)
1312 {
1313 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1314@@ -2440,6 +2867,26 @@ static struct caam_aead_alg driver_aeads[] = {
1315 			.geniv = true,
1316 		}
1317 	},
1318+	{
1319+		.aead = {
1320+			.base = {
1321+				.cra_name = "tls10(hmac(sha1),cbc(aes))",
1322+				.cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
1323+				.cra_blocksize = AES_BLOCK_SIZE,
1324+			},
1325+			.setkey = tls_setkey,
1326+			.setauthsize = tls_setauthsize,
1327+			.encrypt = tls_encrypt,
1328+			.decrypt = tls_decrypt,
1329+			.ivsize = AES_BLOCK_SIZE,
1330+			.maxauthsize = SHA1_DIGEST_SIZE,
1331+		},
1332+		.caam = {
1333+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1334+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1335+					   OP_ALG_AAI_HMAC_PRECOMP,
1336+		}
1337+	}
1338 };
1339
1340 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1341@@ -2447,6 +2894,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1342 {
1343 	struct caam_drv_private *priv;
1344 	struct device *dev;
1345+	/* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
1346+	static const u8 digest_size[] = {
1347+		MD5_DIGEST_SIZE,
1348+		SHA1_DIGEST_SIZE,
1349+		SHA224_DIGEST_SIZE,
1350+		SHA256_DIGEST_SIZE,
1351+		SHA384_DIGEST_SIZE,
1352+		SHA512_DIGEST_SIZE
1353+	};
1354+	u8 op_id;
1355
1356 	/*
1357 	 * distribute tfms across job rings to ensure in-order
1358@@ -2478,6 +2935,21 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1359 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1360
1361 	ctx->qidev = dev;
1362+	if (ctx->adata.algtype) {
1363+		op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
1364+				>> OP_ALG_ALGSEL_SHIFT;
1365+		if (op_id < ARRAY_SIZE(digest_size)) {
1366+			ctx->authsize = digest_size[op_id];
1367+		} else {
1368+			dev_err(ctx->jrdev,
1369+				"incorrect op_id %d; must be less than %zu\n",
1370+				op_id, ARRAY_SIZE(digest_size));
1371+			caam_jr_free(ctx->jrdev);
1372+			return -EINVAL;
1373+		}
1374+	} else {
1375+		ctx->authsize = 0;
1376+	}
1377
1378 	spin_lock_init(&ctx->lock);
1379 	ctx->drv_ctx[ENCRYPT] = NULL;
1380diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
1381index a780e6278..4592b634a 100644
1382--- a/drivers/crypto/caam/caamalg_qi2.c
1383+++ b/drivers/crypto/caam/caamalg_qi2.c
1384@@ -582,6 +582,254 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1385 	return edesc;
1386 }
1387
1388+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
1389+					 bool encrypt)
1390+{
1391+	struct crypto_aead *tls = crypto_aead_reqtfm(req);
1392+	unsigned int blocksize = crypto_aead_blocksize(tls);
1393+	unsigned int padsize, authsize;
1394+	struct caam_request *req_ctx = aead_request_ctx(req);
1395+	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1396+	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1397+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
1398+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
1399+						 typeof(*alg), aead);
1400+	struct device *dev = ctx->dev;
1401+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1402+		      GFP_KERNEL : GFP_ATOMIC;
1403+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1404+	struct tls_edesc *edesc;
1405+	dma_addr_t qm_sg_dma, iv_dma = 0;
1406+	int ivsize = crypto_aead_ivsize(tls);
1407+	u8 *iv;
1408+	int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
1409+	int src_len, dst_len, data_len;
1410+	struct dpaa2_sg_entry *sg_table;
1411+	struct scatterlist *dst;
1412+
1413+	if (encrypt) {
1414+		padsize = blocksize - ((req->cryptlen + ctx->authsize) %
1415+					blocksize);
1416+		authsize = ctx->authsize + padsize;
1417+	} else {
1418+		authsize = ctx->authsize;
1419+	}
1420+
1421+	/* allocate space for base edesc, link tables and IV */
1422+	edesc = qi_cache_zalloc(GFP_DMA | flags);
1423+	if (unlikely(!edesc)) {
1424+		dev_err(dev, "could not allocate extended descriptor\n");
1425+		return ERR_PTR(-ENOMEM);
1426+	}
1427+
1428+	data_len = req->assoclen + req->cryptlen;
1429+	dst_len = req->cryptlen + (encrypt ? authsize : 0);
1430+
1431+	if (likely(req->src == req->dst)) {
1432+		src_len = req->assoclen + dst_len;
1433+
1434+		src_nents = sg_nents_for_len(req->src, src_len);
1435+		if (unlikely(src_nents < 0)) {
1436+			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1437+				src_len);
1438+			qi_cache_free(edesc);
1439+			return ERR_PTR(src_nents);
1440+		}
1441+
1442+		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1443+					      DMA_BIDIRECTIONAL);
1444+		if (unlikely(!mapped_src_nents)) {
1445+			dev_err(dev, "unable to map source\n");
1446+			qi_cache_free(edesc);
1447+			return ERR_PTR(-ENOMEM);
1448+		}
1449+		dst = req->dst;
1450+	} else {
1451+		src_len = data_len;
1452+
1453+		src_nents = sg_nents_for_len(req->src, src_len);
1454+		if (unlikely(src_nents < 0)) {
1455+			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1456+				src_len);
1457+			qi_cache_free(edesc);
1458+			return ERR_PTR(src_nents);
1459+		}
1460+
1461+		dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
1462+
1463+		dst_nents = sg_nents_for_len(dst, dst_len);
1464+		if (unlikely(dst_nents < 0)) {
1465+			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1466+				dst_len);
1467+			qi_cache_free(edesc);
1468+			return ERR_PTR(dst_nents);
1469+		}
1470+
1471+		if (src_nents) {
1472+			mapped_src_nents = dma_map_sg(dev, req->src,
1473+						      src_nents, DMA_TO_DEVICE);
1474+			if (unlikely(!mapped_src_nents)) {
1475+				dev_err(dev, "unable to map source\n");
1476+				qi_cache_free(edesc);
1477+				return ERR_PTR(-ENOMEM);
1478+			}
1479+		} else {
1480+			mapped_src_nents = 0;
1481+		}
1482+
1483+		mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
1484+					      DMA_FROM_DEVICE);
1485+		if (unlikely(!mapped_dst_nents)) {
1486+			dev_err(dev, "unable to map destination\n");
1487+			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1488+			qi_cache_free(edesc);
1489+			return ERR_PTR(-ENOMEM);
1490+		}
1491+	}
1492+
1493+	/*
1494+	 * Create S/G table: IV, src, dst.
1495+	 * Input is not contiguous.
1496+	 */
1497+	qm_sg_ents = 1 + mapped_src_nents +
1498+		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1499+	sg_table = &edesc->sgt[0];
1500+	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1501+
1502+	iv = (u8 *)(sg_table + qm_sg_ents);
1503+	/* Make sure IV is located in a DMAable area */
1504+	memcpy(iv, req->iv, ivsize);
1505+	iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1506+	if (dma_mapping_error(dev, iv_dma)) {
1507+		dev_err(dev, "unable to map IV\n");
1508+		caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
1509+		DMA_NONE, 0, 0);
1510+		qi_cache_free(edesc);
1511+		return ERR_PTR(-ENOMEM);
1512+	}
1513+
1514+	edesc->src_nents = src_nents;
1515+	edesc->dst_nents = dst_nents;
1516+	edesc->dst = dst;
1517+	edesc->iv_dma = iv_dma;
1518+
1519+	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1520+	qm_sg_index = 1;
1521+
1522+	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
1523+	qm_sg_index += mapped_src_nents;
1524+
1525+	if (mapped_dst_nents > 1)
1526+		sg_to_qm_sg_last(dst, dst_len, sg_table + qm_sg_index, 0);
1527+
1528+	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1529+	if (dma_mapping_error(dev, qm_sg_dma)) {
1530+		dev_err(dev, "unable to map S/G table\n");
1531+		caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
1532+			   ivsize, DMA_TO_DEVICE, 0, 0);
1533+		qi_cache_free(edesc);
1534+		return ERR_PTR(-ENOMEM);
1535+	}
1536+
1537+	edesc->qm_sg_dma = qm_sg_dma;
1538+	edesc->qm_sg_bytes = qm_sg_bytes;
1539+
1540+	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1541+	dpaa2_fl_set_final(in_fle, true);
1542+	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1543+	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
1544+	dpaa2_fl_set_len(in_fle, ivsize + data_len);
1545+
1546+	if (req->dst == req->src) {
1547+		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1548+		dpaa2_fl_set_addr(out_fle, qm_sg_dma +
1549+				  (sg_nents_for_len(req->src, req->assoclen) +
1550+				   1) * sizeof(*sg_table));
1551+	} else if (mapped_dst_nents == 1) {
1552+		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1553+		dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
1554+	} else {
1555+		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1556+		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
1557+				  sizeof(*sg_table));
1558+	}
1559+
1560+	dpaa2_fl_set_len(out_fle, dst_len);
1561+
1562+	return edesc;
1563+}
1564+
1565+static int tls_set_sh_desc(struct crypto_aead *tls)
1566+{
1567+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
1568+	unsigned int ivsize = crypto_aead_ivsize(tls);
1569+	unsigned int blocksize = crypto_aead_blocksize(tls);
1570+	struct device *dev = ctx->dev;
1571+	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1572+	struct caam_flc *flc;
1573+	u32 *desc;
1574+	unsigned int assoclen = 13; /* always 13 bytes for TLS */
1575+	unsigned int data_len[2];
1576+	u32 inl_mask;
1577+
1578+	if (!ctx->cdata.keylen || !ctx->authsize)
1579+		return 0;
1580+
1581+	/*
1582+	 * TLS 1.0 encrypt shared descriptor
1583+	 * Job Descriptor and Shared Descriptor
1584+	 * must fit into the 64-word Descriptor h/w Buffer
1585+	 */
1586+	data_len[0] = ctx->adata.keylen_pad;
1587+	data_len[1] = ctx->cdata.keylen;
1588+
1589+	if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
1590+			      &inl_mask, ARRAY_SIZE(data_len)) < 0)
1591+		return -EINVAL;
1592+
1593+	if (inl_mask & 1)
1594+		ctx->adata.key_virt = ctx->key;
1595+	else
1596+		ctx->adata.key_dma = ctx->key_dma;
1597+
1598+	if (inl_mask & 2)
1599+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
1600+	else
1601+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
1602+
1603+	ctx->adata.key_inline = !!(inl_mask & 1);
1604+	ctx->cdata.key_inline = !!(inl_mask & 2);
1605+
1606+	flc = &ctx->flc[ENCRYPT];
1607+	desc = flc->sh_desc;
1608+	cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
1609+			      assoclen, ivsize, ctx->authsize, blocksize,
1610+			      priv->sec_attr.era);
1611+	flc->flc[1] = cpu_to_caam32(desc_len(desc));
1612+	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1613+				   sizeof(flc->flc) + desc_bytes(desc),
1614+				   ctx->dir);
1615+
1616+	/*
1617+	 * TLS 1.0 decrypt shared descriptor
1618+	 * Keys do not fit inline, regardless of algorithms used
1619+	 */
1620+	ctx->adata.key_inline = false;
1621+	ctx->adata.key_dma = ctx->key_dma;
1622+	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
1623+
1624+	flc = &ctx->flc[DECRYPT];
1625+	desc = flc->sh_desc;
1626+	cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
1627+			      ctx->authsize, blocksize, priv->sec_attr.era);
1628+	flc->flc[1] = cpu_to_caam32(desc_len(desc));
1629+	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1630+				   sizeof(flc->flc) + desc_bytes(desc),
1631+				   ctx->dir);
1632+
1633+	return 0;
1634+}
1635+
1636 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
1637 {
1638 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1639@@ -626,6 +874,60 @@ static int chachapoly_setauthsize(struct crypto_aead *aead,
1640 	return chachapoly_set_sh_desc(aead);
1641 }
1642
1643+static int tls_setkey(struct crypto_aead *tls, const u8 *key,
1644+		      unsigned int keylen)
1645+{
1646+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
1647+	struct device *dev = ctx->dev;
1648+	struct crypto_authenc_keys keys;
1649+
1650+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1651+		goto badkey;
1652+
1653+#ifdef DEBUG
1654+	dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
1655+		keys.authkeylen + keys.enckeylen, keys.enckeylen,
1656+		keys.authkeylen);
1657+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
1658+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1659+#endif
1660+
1661+	ctx->adata.keylen = keys.authkeylen;
1662+	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1663+					      OP_ALG_ALGSEL_MASK);
1664+
1665+	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1666+		goto badkey;
1667+
1668+	memcpy(ctx->key, keys.authkey, keys.authkeylen);
1669+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1670+	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
1671+				   keys.enckeylen, ctx->dir);
1672+#ifdef DEBUG
1673+	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
1674+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1675+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
1676+#endif
1677+
1678+	ctx->cdata.keylen = keys.enckeylen;
1679+
1680+	memzero_explicit(&keys, sizeof(keys));
1681+	return tls_set_sh_desc(tls);
1682+badkey:
1683+	memzero_explicit(&keys, sizeof(keys));
1684+	return -EINVAL;
1685+}
1686+
1687+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
1688+{
1689+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
1690+
1691+	ctx->authsize = authsize;
1692+	tls_set_sh_desc(tls);
1693+
1694+	return 0;
1695+}
1696+
1697 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
1698 			     unsigned int keylen)
1699 {
1700@@ -1264,6 +1566,17 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1701 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1702 }
1703
1704+static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
1705+		      struct aead_request *req)
1706+{
1707+	struct crypto_aead *tls = crypto_aead_reqtfm(req);
1708+	int ivsize = crypto_aead_ivsize(tls);
1709+
1710+	caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
1711+		   edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE,
1712+		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
1713+}
1714+
1715 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1716 			   struct skcipher_request *req)
1717 {
1718@@ -1275,7 +1588,7 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1719 		   edesc->qm_sg_bytes);
1720 }
1721
1722-static void aead_encrypt_done(void *cbk_ctx, u32 status)
1723+static void aead_crypt_done(void *cbk_ctx, u32 status)
1724 {
1725 	struct crypto_async_request *areq = cbk_ctx;
1726 	struct aead_request *req = container_of(areq, struct aead_request,
1727@@ -1296,28 +1609,7 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
1728 	aead_request_complete(req, ecode);
1729 }
1730
1731-static void aead_decrypt_done(void *cbk_ctx, u32 status)
1732-{
1733-	struct crypto_async_request *areq = cbk_ctx;
1734-	struct aead_request *req = container_of(areq, struct aead_request,
1735-						base);
1736-	struct caam_request *req_ctx = to_caam_req(areq);
1737-	struct aead_edesc *edesc = req_ctx->edesc;
1738-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1739-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1740-	int ecode = 0;
1741-
1742-	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1743-
1744-	if (unlikely(status))
1745-		ecode = caam_qi2_strstatus(ctx->dev, status);
1746-
1747-	aead_unmap(ctx->dev, edesc, req);
1748-	qi_cache_free(edesc);
1749-	aead_request_complete(req, ecode);
1750-}
1751-
1752-static int aead_encrypt(struct aead_request *req)
1753+static int aead_crypt(struct aead_request *req, enum optype op)
1754 {
1755 	struct aead_edesc *edesc;
1756 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1757@@ -1326,13 +1618,13 @@ static int aead_encrypt(struct aead_request *req)
1758 	int ret;
1759
1760 	/* allocate extended descriptor */
1761-	edesc = aead_edesc_alloc(req, true);
1762+	edesc = aead_edesc_alloc(req, op == ENCRYPT);
1763 	if (IS_ERR(edesc))
1764 		return PTR_ERR(edesc);
1765
1766-	caam_req->flc = &ctx->flc[ENCRYPT];
1767-	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1768-	caam_req->cbk = aead_encrypt_done;
1769+	caam_req->flc = &ctx->flc[op];
1770+	caam_req->flc_dma = ctx->flc_dma[op];
1771+	caam_req->cbk = aead_crypt_done;
1772 	caam_req->ctx = &req->base;
1773 	caam_req->edesc = edesc;
1774 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1775@@ -1345,83 +1637,88 @@ static int aead_encrypt(struct aead_request *req)
1776 	return ret;
1777 }
1778
1779+static int aead_encrypt(struct aead_request *req)
1780+{
1781+	return aead_crypt(req, ENCRYPT);
1782+}
1783+
1784 static int aead_decrypt(struct aead_request *req)
1785 {
1786-	struct aead_edesc *edesc;
1787-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1788-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1789+	return aead_crypt(req, DECRYPT);
1790+}
1791+
1792+static void tls_crypt_done(void *cbk_ctx, u32 status)
1793+{
1794+	struct crypto_async_request *areq = cbk_ctx;
1795+	struct aead_request *req = container_of(areq, struct aead_request,
1796+						base);
1797+	struct caam_request *req_ctx = to_caam_req(areq);
1798+	struct tls_edesc *edesc = req_ctx->edesc;
1799+	struct crypto_aead *tls = crypto_aead_reqtfm(req);
1800+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
1801+	int ecode = 0;
1802+
1803+#ifdef DEBUG
1804+	dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1805+#endif
1806+
1807+	if (unlikely(status))
1808+		ecode = caam_qi2_strstatus(ctx->dev, status);
1809+
1810+	tls_unmap(ctx->dev, edesc, req);
1811+	qi_cache_free(edesc);
1812+	aead_request_complete(req, ecode);
1813+}
1814+
1815+static int tls_crypt(struct aead_request *req, enum optype op)
1816+{
1817+	struct tls_edesc *edesc;
1818+	struct crypto_aead *tls = crypto_aead_reqtfm(req);
1819+	struct caam_ctx *ctx = crypto_aead_ctx(tls);
1820 	struct caam_request *caam_req = aead_request_ctx(req);
1821 	int ret;
1822
1823 	/* allocate extended descriptor */
1824-	edesc = aead_edesc_alloc(req, false);
1825+	edesc = tls_edesc_alloc(req, op == ENCRYPT);
1826 	if (IS_ERR(edesc))
1827 		return PTR_ERR(edesc);
1828
1829-	caam_req->flc = &ctx->flc[DECRYPT];
1830-	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1831-	caam_req->cbk = aead_decrypt_done;
1832+	caam_req->flc = &ctx->flc[op];
1833+	caam_req->flc_dma = ctx->flc_dma[op];
1834+	caam_req->cbk = tls_crypt_done;
1835 	caam_req->ctx = &req->base;
1836 	caam_req->edesc = edesc;
1837 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1838 	if (ret != -EINPROGRESS &&
1839 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1840-		aead_unmap(ctx->dev, edesc, req);
1841+		tls_unmap(ctx->dev, edesc, req);
1842 		qi_cache_free(edesc);
1843 	}
1844
1845 	return ret;
1846 }
1847
1848-static int ipsec_gcm_encrypt(struct aead_request *req)
1849+static int tls_encrypt(struct aead_request *req)
1850 {
1851-	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1852+	return tls_crypt(req, ENCRYPT);
1853 }
1854
1855-static int ipsec_gcm_decrypt(struct aead_request *req)
1856+static int tls_decrypt(struct aead_request *req)
1857 {
1858-	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1859+	return tls_crypt(req, DECRYPT);
1860 }
1861
1862-static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1863+static int ipsec_gcm_encrypt(struct aead_request *req)
1864 {
1865-	struct crypto_async_request *areq = cbk_ctx;
1866-	struct skcipher_request *req = skcipher_request_cast(areq);
1867-	struct caam_request *req_ctx = to_caam_req(areq);
1868-	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1869-	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1870-	struct skcipher_edesc *edesc = req_ctx->edesc;
1871-	int ecode = 0;
1872-	int ivsize = crypto_skcipher_ivsize(skcipher);
1873-
1874-	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1875-
1876-	if (unlikely(status))
1877-		ecode = caam_qi2_strstatus(ctx->dev, status);
1878-
1879-	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1880-			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1881-			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1882-	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1883-		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1884-		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1885-
1886-	skcipher_unmap(ctx->dev, edesc, req);
1887-
1888-	/*
1889-	 * The crypto API expects us to set the IV (req->iv) to the last
1890-	 * ciphertext block (CBC mode) or last counter (CTR mode).
1891-	 * This is used e.g. by the CTS mode.
1892-	 */
1893-	if (!ecode)
1894-		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1895-		       ivsize);
1896+	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1897+}
1898
1899-	qi_cache_free(edesc);
1900-	skcipher_request_complete(req, ecode);
1901+static int ipsec_gcm_decrypt(struct aead_request *req)
1902+{
1903+	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1904 }
1905
1906-static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1907+static void skcipher_crypt_done(void *cbk_ctx, u32 status)
1908 {
1909 	struct crypto_async_request *areq = cbk_ctx;
1910 	struct skcipher_request *req = skcipher_request_cast(areq);
1911@@ -1467,7 +1764,7 @@ static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1912 	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1913 }
1914
1915-static int skcipher_encrypt(struct skcipher_request *req)
1916+static int skcipher_crypt(struct skcipher_request *req, enum optype op)
1917 {
1918 	struct skcipher_edesc *edesc;
1919 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1920@@ -1494,7 +1791,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
1921 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1922 					   req->dst, req->cryptlen, req->iv);
1923
1924-		return crypto_skcipher_encrypt(&caam_req->fallback_req);
1925+		return (op == ENCRYPT) ?
1926+			crypto_skcipher_encrypt(&caam_req->fallback_req) :
1927+			crypto_skcipher_decrypt(&caam_req->fallback_req);
1928 	}
1929
1930 	/* allocate extended descriptor */
1931@@ -1502,9 +1801,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
1932 	if (IS_ERR(edesc))
1933 		return PTR_ERR(edesc);
1934
1935-	caam_req->flc = &ctx->flc[ENCRYPT];
1936-	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1937-	caam_req->cbk = skcipher_encrypt_done;
1938+	caam_req->flc = &ctx->flc[op];
1939+	caam_req->flc_dma = ctx->flc_dma[op];
1940+	caam_req->cbk = skcipher_crypt_done;
1941 	caam_req->ctx = &req->base;
1942 	caam_req->edesc = edesc;
1943 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1944@@ -1517,54 +1816,14 @@ static int skcipher_encrypt(struct skcipher_request *req)
1945 	return ret;
1946 }
1947
1948-static int skcipher_decrypt(struct skcipher_request *req)
1949+static int skcipher_encrypt(struct skcipher_request *req)
1950 {
1951-	struct skcipher_edesc *edesc;
1952-	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1953-	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1954-	struct caam_request *caam_req = skcipher_request_ctx(req);
1955-	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1956-	int ret;
1957-
1958-	/*
1959-	 * XTS is expected to return an error even for input length = 0
1960-	 * Note that the case input length < block size will be caught during
1961-	 * HW offloading and return an error.
1962-	 */
1963-	if (!req->cryptlen && !ctx->fallback)
1964-		return 0;
1965-
1966-	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1967-			      ctx->xts_key_fallback)) {
1968-		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1969-		skcipher_request_set_callback(&caam_req->fallback_req,
1970-					      req->base.flags,
1971-					      req->base.complete,
1972-					      req->base.data);
1973-		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1974-					   req->dst, req->cryptlen, req->iv);
1975-
1976-		return crypto_skcipher_decrypt(&caam_req->fallback_req);
1977-	}
1978-
1979-	/* allocate extended descriptor */
1980-	edesc = skcipher_edesc_alloc(req);
1981-	if (IS_ERR(edesc))
1982-		return PTR_ERR(edesc);
1983-
1984-	caam_req->flc = &ctx->flc[DECRYPT];
1985-	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1986-	caam_req->cbk = skcipher_decrypt_done;
1987-	caam_req->ctx = &req->base;
1988-	caam_req->edesc = edesc;
1989-	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1990-	if (ret != -EINPROGRESS &&
1991-	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1992-		skcipher_unmap(ctx->dev, edesc, req);
1993-		qi_cache_free(edesc);
1994-	}
1995+	return skcipher_crypt(req, ENCRYPT);
1996+}
1997
1998-	return ret;
1999+static int skcipher_decrypt(struct skcipher_request *req)
2000+{
2001+	return skcipher_crypt(req, DECRYPT);
2002 }
2003
2004 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2005@@ -2997,6 +3256,26 @@ static struct caam_aead_alg driver_aeads[] = {
2006 			.geniv = true,
2007 		},
2008 	},
2009+	{
2010+		.aead = {
2011+			.base = {
2012+				.cra_name = "tls10(hmac(sha1),cbc(aes))",
2013+				.cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
2014+				.cra_blocksize = AES_BLOCK_SIZE,
2015+			},
2016+			.setkey = tls_setkey,
2017+			.setauthsize = tls_setauthsize,
2018+			.encrypt = tls_encrypt,
2019+			.decrypt = tls_decrypt,
2020+			.ivsize = AES_BLOCK_SIZE,
2021+			.maxauthsize = SHA1_DIGEST_SIZE,
2022+		},
2023+		.caam = {
2024+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2025+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2026+					   OP_ALG_AAI_HMAC_PRECOMP,
2027+		},
2028+	},
2029 };
2030
2031 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2032diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
2033index d35253407..0b610c106 100644
2034--- a/drivers/crypto/caam/caamalg_qi2.h
2035+++ b/drivers/crypto/caam/caamalg_qi2.h
2036@@ -118,6 +118,28 @@ struct aead_edesc {
2037 	struct dpaa2_sg_entry sgt[];
2038 };
2039
2040+/*
2041+ * tls_edesc - s/w-extended tls descriptor
2042+ * @src_nents: number of segments in input scatterlist
2043+ * @dst_nents: number of segments in output scatterlist
2044+ * @iv_dma: dma address of iv for checking continuity and link table
2045+ * @qm_sg_bytes: length of dma mapped h/w link table
2046+ * @qm_sg_dma: bus physical mapped address of h/w link table
2047+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
2048+ * @dst: pointer to output scatterlist, usefull for unmapping
2049+ * @sgt: the h/w link table, followed by IV
2050+ */
2051+struct tls_edesc {
2052+	int src_nents;
2053+	int dst_nents;
2054+	dma_addr_t iv_dma;
2055+	int qm_sg_bytes;
2056+	dma_addr_t qm_sg_dma;
2057+	struct scatterlist tmp[2];
2058+	struct scatterlist *dst;
2059+	struct dpaa2_sg_entry sgt[0];
2060+};
2061+
2062 /*
2063  * skcipher_edesc - s/w-extended skcipher descriptor
2064  * @src_nents: number of segments in input scatterlist
2065diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
2066index e8a6d8bc4..ae2ae28e3 100644
2067--- a/drivers/crypto/caam/caamhash.c
2068+++ b/drivers/crypto/caam/caamhash.c
2069@@ -1946,12 +1946,14 @@ int caam_algapi_hash_init(struct device *ctrldev)
2070 	 * presence and attributes of MD block.
2071 	 */
2072 	if (priv->era < 10) {
2073-		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
2074+		struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
2075+
2076+		md_vid = (rd_reg32(&perfmon->cha_id_ls) &
2077 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2078-		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
2079+		md_inst = (rd_reg32(&perfmon->cha_num_ls) &
2080 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2081 	} else {
2082-		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2083+		u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
2084
2085 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2086 		md_inst = mdha & CHA_VER_NUM_MASK;
2087diff --git a/drivers/crypto/caam/caamkeyblob.c b/drivers/crypto/caam/caamkeyblob.c
2088new file mode 100644
2089index 000000000..03821d437
2090--- /dev/null
2091+++ b/drivers/crypto/caam/caamkeyblob.c
2092@@ -0,0 +1,670 @@
2093+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2094+/*
2095+ * Black key generation and blob encapsulation/decapsulation for CAAM
2096+ *
2097+ * Copyright 2018-2020 NXP
2098+ */
2099+#include "caamkeyblob.h"
2100+#include "error.h"
2101+
2102+/* Black key generation and blob encap/decap job completion handler */
2103+static void caam_key_blob_done(struct device *dev, u32 *desc, u32 err,
2104+			       void *context)
2105+{
2106+	struct jr_job_result *res = context;
2107+	int ecode = 0;
2108+
2109+	dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2110+
2111+	if (err)
2112+		ecode = caam_jr_strstatus(dev, err);
2113+
2114+	/* Save the error for post-processing */
2115+	res->error = ecode;
2116+	/* Mark job as complete */
2117+	complete(&res->completion);
2118+}
2119+
2120+/**
2121+ * map_write_data   - Prepare data to be written to CAAM
2122+ *
2123+ * @dev             : struct device of the job ring to be used
2124+ * @data            : The data to be prepared
2125+ * @size            : The size of data to be prepared
2126+ * @dma_addr        : The retrieve DMA address of the input data
2127+ * @allocated_data  : Pointer to a DMA-able address where the input
2128+ *                    data is copied and synchronized
2129+ *
2130+ * Return           : '0' on success, error code otherwise
2131+ */
2132+static int map_write_data(struct device *dev, const u8 *data, size_t size,
2133+			  dma_addr_t *dma_addr, u8 **allocated_data)
2134+{
2135+	int ret = 0;
2136+
2137+	/* Allocate memory for data and copy it to DMA zone */
2138+	*allocated_data = kmemdup(data, size, GFP_KERNEL | GFP_DMA);
2139+	if (!*allocated_data) {
2140+		ret = -ENOMEM;
2141+		goto exit;
2142+	}
2143+
2144+	*dma_addr = dma_map_single(dev, *allocated_data, size, DMA_TO_DEVICE);
2145+	if (dma_mapping_error(dev, *dma_addr)) {
2146+		dev_err(dev, "Unable to map write data\n");
2147+		ret = -ENOMEM;
2148+		goto free_alloc;
2149+	}
2150+
2151+	goto exit;
2152+
2153+free_alloc:
2154+	kfree(*allocated_data);
2155+
2156+exit:
2157+	return ret;
2158+}
2159+
2160+/**
2161+ * map_read_data   - Prepare data to be read from CAAM
2162+ *
2163+ * @dev             : struct device of the job ring to be used
2164+ * @size            : The size of data to be prepared
2165+ * @dma_addr        : The retrieve DMA address of the data to be read
2166+ * @allocated_data  : Pointer to a DMA-able address where the data
2167+ *                    to be read will be copied and synchronized
2168+ *
2169+ * Return           : '0' on success, error code otherwise
2170+ */
2171+static int map_read_data(struct device *dev, size_t size, dma_addr_t *dma_addr,
2172+			 u8 **allocated_data)
2173+{
2174+	int ret = 0;
2175+
2176+	/* Allocate memory for data compatible with DMA */
2177+	*allocated_data = kmalloc(size, GFP_KERNEL | GFP_DMA);
2178+	if (!*allocated_data) {
2179+		ret = -ENOMEM;
2180+		goto exit;
2181+	}
2182+
2183+	*dma_addr = dma_map_single(dev, *allocated_data, size, DMA_FROM_DEVICE);
2184+	if (dma_mapping_error(dev, *dma_addr)) {
2185+		dev_err(dev, "Unable to map read data\n");
2186+		ret = -ENOMEM;
2187+		goto free_alloc;
2188+	}
2189+
2190+	goto exit;
2191+
2192+free_alloc:
2193+	kfree(*allocated_data);
2194+
2195+exit:
2196+	return ret;
2197+}
2198+
2199+/**
2200+ * read_map_data   - Read the data from CAAM
2201+ *
2202+ * @dev             : struct device of the job ring to be used
2203+ * @data            : The read data from CAAM will be copied here
2204+ * @dma_addr        : The DMA address of the data to be read
2205+ * @allocated_data  : Pointer to a DMA-able address where the data
2206+ *                    to be read is
2207+ * @size            : The size of data to be read
2208+ */
2209+static void read_map_data(struct device *dev, u8 *data, dma_addr_t dma_addr,
2210+			  u8 *allocated_data, size_t size)
2211+{
2212+	/* Synchronize the DMA and copy the data */
2213+	dma_sync_single_for_cpu(dev, dma_addr, size, DMA_FROM_DEVICE);
2214+	memcpy(data, allocated_data, size);
2215+}
2216+
2217+/**
2218+ * unmap_read_write_data - Unmap the data needed for or from CAAM
2219+ *
2220+ * @dev             : struct device of the job ring to be used
2221+ * @dma_addr        : The DMA address of the data used for DMA transfer
2222+ * @allocated_data  : The data used for DMA transfer
2223+ * @size            : The size of data
2224+ * @dir             : The DMA_API direction
2225+ */
2226+static void unmap_read_write_data(struct device *dev, dma_addr_t dma_addr,
2227+				  u8 *allocated_data, size_t size,
2228+				  enum dma_data_direction dir)
2229+{
2230+	/* Free the resources and clear the data*/
2231+	dma_unmap_single(dev, dma_addr, size, dir);
2232+	kfree_sensitive(allocated_data);
2233+}
2234+
2235+/**
2236+ * get_caam_dma_addr - Get the CAAM DMA address of a physical address.
2237+ *
2238+ * @phy_address     : The physical address
2239+ *
2240+ * Return           : The CAAM DMA address
2241+ */
2242+static dma_addr_t get_caam_dma_addr(const void *phy_address)
2243+{
2244+	uintptr_t ptr_conv;
2245+	dma_addr_t caam_dma_address = 0;
2246+
2247+	/* Check if conversion is possible */
2248+	if (sizeof(caam_dma_address) < sizeof(phy_address)) {
2249+		/*
2250+		 * Check that all bits sets in the phy_address
2251+		 * can be stored in caam_dma_address
2252+		 */
2253+
2254+		/* Generate a mask of the representable bits */
2255+		u64 mask = GENMASK_ULL(sizeof(caam_dma_address) * 8 - 1, 0);
2256+
2257+		/*
2258+		 * Check that the bits not representable of
2259+		 * the physical address are not set
2260+		 */
2261+		if ((uintptr_t)phy_address & ~mask)
2262+			goto exit;
2263+	}
2264+
2265+	/* Convert address to caam_dma_address */
2266+	ptr_conv = (uintptr_t)phy_address;
2267+	caam_dma_address = (dma_addr_t)ptr_conv;
2268+
2269+exit:
2270+	return caam_dma_address;
2271+}
2272+
2273+/**
2274+ * generate_black_key - Generate a black key from a plaintext or random,
2275+ *                      based on the given input: a size for a random black
2276+ *                      key, or a plaintext (input key).
2277+ *
2278+ * If the memory type is Secure Memory, the key to cover is read
2279+ * directly by CAAM from Secure Memory without intermediate copy.
2280+ * The value of the input key (plaintext) must be a physical address
2281+ * in Secure Memory.
2282+ *
2283+ * Notes:
2284+ * Limited to Class 1 keys, at the present time.
2285+ * The input and output data are copied to temporary arrays
2286+ * except for the input key if the memory type is Secure Memory.
2287+ * For now, we have support for Black keys, stored in General Memory.
2288+ *
2289+ * @dev             : struct device of the job ring to be used
2290+ * @info            : keyblob_info structure, will be updated with
2291+ *                    the black key data from CAAM.
2292+ *                    This contains, also, all the data necessary to generate
2293+ *                    a black key from plaintext/random like: key encryption
2294+ *                    key, memory type, input key, etc.
2295+ *
2296+ * Return           : '0' on success, error code otherwise
2297+ */
2298+int generate_black_key(struct device *dev, struct keyblob_info *info)
2299+{
2300+	int ret = 0;
2301+	bool not_random = false;
2302+	u8 trusted_key, key_enc;
2303+	u32 *desc = NULL;
2304+	size_t black_key_length_req = 0;
2305+	dma_addr_t black_key_dma;
2306+	u8 *tmp_black_key = NULL;
2307+
2308+	/* Validate device */
2309+	if (!dev)
2310+		return -EINVAL;
2311+
2312+	/*
2313+	 * If an input key (plaintext) is given,
2314+	 * generate a black key from it, not from random
2315+	 */
2316+	if (info->key)
2317+		not_random = true;
2318+
2319+	/* Get trusted key and key encryption type from type */
2320+	trusted_key = (info->type >> TAG_OBJ_TK_OFFSET) & 0x1;
2321+	key_enc = (info->type >> TAG_OBJ_EKT_OFFSET) & 0x1;
2322+
2323+	dev_dbg(dev, "%s input: [key: (%zu) black_key: %p(%zu), key_enc: %x]\n",
2324+		__func__, info->key_len, info->black_key, info->black_key_len,
2325+		key_enc);
2326+	if (not_random)
2327+		print_hex_dump_debug("input key @" __stringify(__LINE__) ": ",
2328+				     DUMP_PREFIX_ADDRESS, 16, 4, info->key,
2329+				     info->key_len, 1);
2330+
2331+	/* Validate key type - only JDKEK keys are supported */
2332+	if (!is_key_type(info->type) || is_trusted_type(info->type))
2333+		return -EINVAL;
2334+
2335+	/*
2336+	 * Validate key size, expected values are
2337+	 * between 16 and 64 bytes.
2338+	 * See TODO from cnstr_desc_black_key().
2339+	 */
2340+	if (info->key_len < MIN_KEY_SIZE || info->key_len > MAX_KEY_SIZE)
2341+		return -EINVAL;
2342+
2343+	/*
2344+	 * Based on key encryption type (ecb or ccm),
2345+	 * compute the black key size
2346+	 */
2347+	if (key_enc == KEY_COVER_ECB)
2348+		/*
2349+		 * ECB-Black Key will be padded with zeros to make it a
2350+		 * multiple of 16 bytes long before it is encrypted,
2351+		 * and the resulting Black Key will be this length.
2352+		 */
2353+		black_key_length_req = ECB_BLACK_KEY_SIZE(info->key_len);
2354+	else if (key_enc == KEY_COVER_CCM)
2355+		/*
2356+		 * CCM-Black Key will always be at least 12 bytes longer,
2357+		 * since the encapsulation uses a 6-byte nonce and adds
2358+		 * a 6-byte ICV. But first, the key is padded as necessary so
2359+		 * that CCM-Black Key is a multiple of 8 bytes long.
2360+		 */
2361+		black_key_length_req = CCM_BLACK_KEY_SIZE(info->key_len);
2362+
2363+	/* Check if there is enough space for black key */
2364+	if (info->black_key_len < black_key_length_req) {
2365+		info->black_key_len = black_key_length_req;
2366+		return -EINVAL;
2367+	}
2368+
2369+	/* Black key will have at least the same length as the input key */
2370+	info->black_key_len = info->key_len;
2371+
2372+	dev_dbg(dev, "%s processing: [key: (%zu) black_key: %p(%zu)",
2373+		__func__, info->key_len, info->black_key, info->black_key_len);
2374+	dev_dbg(dev, "req:%zu, key_enc: 0x%x]\n", black_key_length_req, key_enc);
2375+
2376+	/* Map black key, this will be read from CAAM */
2377+	if (map_read_data(dev, black_key_length_req,
2378+			  &black_key_dma, &tmp_black_key)) {
2379+		dev_err(dev, "Unable to map black key\n");
2380+		ret = -ENOMEM;
2381+		goto exit;
2382+	}
2383+
2384+	/* Construct descriptor for black key */
2385+	if (not_random)
2386+		ret = cnstr_desc_black_key(&desc, info->key, info->key_len,
2387+					   black_key_dma, info->black_key_len,
2388+					   key_enc, trusted_key);
2389+	else
2390+		ret = cnstr_desc_random_black_key(&desc, info->key_len,
2391+						  black_key_dma,
2392+						  info->black_key_len,
2393+						  key_enc, trusted_key);
2394+
2395+	if (ret) {
2396+		dev_err(dev,
2397+			"Failed to construct the descriptor for black key\n");
2398+		goto unmap_black_key;
2399+	}
2400+
2401+	/* Execute descriptor and wait for its completion */
2402+	ret = caam_jr_run_and_wait_for_completion(dev, desc,
2403+						  caam_key_blob_done);
2404+	if (ret) {
2405+		dev_err(dev, "Failed to execute black key descriptor\n");
2406+		goto free_desc;
2407+	}
2408+
2409+	/* Read black key from CAAM */
2410+	read_map_data(dev, info->black_key, black_key_dma,
2411+		      tmp_black_key, black_key_length_req);
2412+
2413+	/* Update black key length with the correct size */
2414+	info->black_key_len = black_key_length_req;
2415+
2416+free_desc:
2417+	kfree(desc);
2418+
2419+unmap_black_key:
2420+	unmap_read_write_data(dev, black_key_dma, tmp_black_key,
2421+			      black_key_length_req, DMA_FROM_DEVICE);
2422+
2423+exit:
2424+	return ret;
2425+}
2426+EXPORT_SYMBOL(generate_black_key);
2427+
2428+/**
2429+ * caam_blob_encap - Encapsulate a black key into a blob
2430+ *
2431+ * If the memory type is Secure Memory, the key to encapsulate is read
2432+ * directly by CAAM from Secure Memory without intermediate copy.
2433+ * The value of the key (black key) must be a physical address
2434+ * in Secure Memory.
2435+ *
2436+ * Notes:
2437+ * For now, we have support for Black keys, stored in General Memory and
2438+ * encapsulated into black blobs.
2439+ *
2440+ * @dev             : struct device of the job ring to be used
2441+ * @info            : keyblob_info structure, will be updated with
2442+ *                    the blob data from CAAM.
2443+ *                    This contains, also, all the data necessary to
2444+ *                    encapsulate a black key into a blob: key encryption
2445+ *                    key, memory type, color, etc.
2446+ *
2447+ * Return           : '0' on success, error code otherwise
2448+ */
2449+int caam_blob_encap(struct device *dev, struct keyblob_info *info)
2450+{
2451+	int ret = 0;
2452+	u32 *desc = NULL;
2453+	size_t black_key_real_len = 0;
2454+	size_t blob_req_len = 0;
2455+	u8 mem_type, color, key_enc, trusted_key;
2456+	dma_addr_t black_key_dma, blob_dma;
2457+	unsigned char *blob = info->blob;
2458+	u8 *tmp_black_key = NULL, *tmp_blob = NULL;
2459+
2460+	/* Validate device */
2461+	if (!dev)
2462+		return -EINVAL;
2463+
2464+	/*
2465+	 * Get memory type, trusted key, key encryption
2466+	 * type and color from type
2467+	 */
2468+	mem_type = (info->type >> TAG_OBJ_MEM_OFFSET) & 0x1;
2469+	color = (info->type >> TAG_OBJ_COLOR_OFFSET) & 0x1;
2470+	key_enc = (info->type >> TAG_OBJ_EKT_OFFSET) & 0x1;
2471+	trusted_key = (info->type >> TAG_OBJ_TK_OFFSET) & 0x1;
2472+
2473+	/* Validate input data*/
2474+	if (!info->key_mod || !blob)
2475+		return -EINVAL;
2476+
2477+	/* Validate object type - only JDKEK keys are supported */
2478+	if (is_trusted_type(info->type))
2479+		return -EINVAL;
2480+
2481+	dev_dbg(dev, "%s input:[black_key: %p (%zu) color: %x, key_enc: %x",
2482+		__func__, info->black_key, info->black_key_len, color, key_enc);
2483+	dev_dbg(dev, ", key_mod: %p (%zu)", info->key_mod, info->key_mod_len);
2484+	dev_dbg(dev, "blob: %p (%zu)]\n", blob, info->blob_len);
2485+
2486+	/*
2487+	 * Based on memory type, the key modifier length
2488+	 * can be 8-byte or 16-byte.
2489+	 */
2490+	if (mem_type == DATA_SECMEM)
2491+		info->key_mod_len = KEYMOD_SIZE_SM;
2492+	else
2493+		info->key_mod_len = KEYMOD_SIZE_GM;
2494+
2495+	/* Adapt the size of the black key */
2496+	black_key_real_len = info->black_key_len;
2497+
2498+	blob_req_len = CCM_BLACK_KEY_SIZE(info->key_len);
2499+
2500+	/* Check if the blob can be stored */
2501+	if (info->blob_len < (blob_req_len + BLOB_OVERHEAD))
2502+		return -EINVAL;
2503+
2504+	/* Update the blob length */
2505+	info->blob_len = blob_req_len + BLOB_OVERHEAD;
2506+
2507+	dev_dbg(dev, "%s processing: [black_key: %p (%zu) cnstr: %zu",
2508+		__func__, info->black_key, info->black_key_len,
2509+		black_key_real_len);
2510+	dev_dbg(dev, " color: %x key_enc: %x, mem_type: %x,",
2511+		color, key_enc, mem_type);
2512+	dev_dbg(dev, ", key_mod: %p (%zu) ", info->key_mod, info->key_mod_len);
2513+	dev_dbg(dev, "blob: %p (%zu)]\n", blob, info->blob_len);
2514+
2515+	/* Map black key, this will be transferred to CAAM */
2516+	if (mem_type == DATA_GENMEM) {
2517+		if (map_write_data(dev, info->black_key, info->black_key_len,
2518+				   &black_key_dma, &tmp_black_key)) {
2519+			dev_err(dev, "Unable to map black key for blob\n");
2520+			ret = -ENOMEM;
2521+			goto exit;
2522+		}
2523+	} else {
2524+		black_key_dma = get_caam_dma_addr(info->black_key);
2525+		if (!black_key_dma)
2526+			return -ENOMEM;
2527+	}
2528+
2529+	/* Map blob, this will be read to CAAM */
2530+	if (mem_type == DATA_GENMEM) {
2531+		if (map_read_data(dev, info->blob_len, &blob_dma, &tmp_blob)) {
2532+			dev_err(dev, "Unable to map blob\n");
2533+			ret = -ENOMEM;
2534+			goto unmap_black_key;
2535+		}
2536+	} else {
2537+		blob_dma = get_caam_dma_addr(info->blob);
2538+		if (!blob_dma)
2539+			return -ENOMEM;
2540+	}
2541+
2542+	/* Construct descriptor for blob encapsulation */
2543+	ret = cnstr_desc_blob_encap(&desc, black_key_dma, info->key_len,
2544+				    color, key_enc, trusted_key, mem_type,
2545+				    info->key_mod, info->key_mod_len,
2546+				    blob_dma, info->blob_len);
2547+	if (ret) {
2548+		dev_err(dev,
2549+			"Failed to construct the descriptor for blob encap\n");
2550+		goto unmap_blob;
2551+	}
2552+
2553+	/* Execute descriptor and wait for its completion */
2554+	ret = caam_jr_run_and_wait_for_completion(dev, desc,
2555+						  caam_key_blob_done);
2556+	if (ret) {
2557+		dev_err(dev, "Failed to execute blob encap descriptor\n");
2558+		goto free_desc;
2559+	}
2560+
2561+	/* Read blob from CAAM */
2562+	if (mem_type == DATA_GENMEM)
2563+		read_map_data(dev, blob, blob_dma, tmp_blob, info->blob_len);
2564+
2565+	print_hex_dump_debug("blob @" __stringify(__LINE__) ": ",
2566+			     DUMP_PREFIX_ADDRESS, 16, 4, blob,
2567+			     info->blob_len, 1);
2568+free_desc:
2569+	kfree(desc);
2570+
2571+unmap_blob:
2572+	if (mem_type == DATA_GENMEM)
2573+		unmap_read_write_data(dev, blob_dma, tmp_blob,
2574+				      info->blob_len, DMA_FROM_DEVICE);
2575+
2576+unmap_black_key:
2577+	if (mem_type == DATA_GENMEM)
2578+		unmap_read_write_data(dev, black_key_dma, tmp_black_key,
2579+				      info->black_key_len, DMA_TO_DEVICE);
2580+
2581+exit:
2582+	return ret;
2583+}
2584+EXPORT_SYMBOL(caam_blob_encap);
2585+
2586+/**
2587+ * caam_blob_decap - Decapsulate a black key from a blob
2588+ *
2589+ * Notes:
2590+ * For now, we have support for Black blob, stored in General Memory and
2591+ * can be decapsulated into a black key.
2592+ *
2593+ * @dev             : struct device of the job ring to be used
2594+ * @info            : keyblob_info structure, will be updated with
2595+ *                    the black key decapsulated from the blob.
2596+ *                    This contains, also, all the data necessary to
2597+ *                    encapsulate a black key into a blob: key encryption
2598+ *                    key, memory type, color, etc.
2599+ *
2600+ * Return           : '0' on success, error code otherwise
2601+ */
2602+int caam_blob_decap(struct device *dev, struct keyblob_info *info)
2603+{
2604+	int ret = 0;
2605+	u32 *desc = NULL;
2606+	u8 mem_type, color, key_enc, trusted_key;
2607+	size_t black_key_real_len;
2608+	dma_addr_t black_key_dma, blob_dma;
2609+	unsigned char *blob = info->blob + TAG_OVERHEAD_SIZE;
2610+	u8 *tmp_black_key = NULL, *tmp_blob = NULL;
2611+
2612+	/* Validate device */
2613+	if (!dev)
2614+		return -EINVAL;
2615+
2616+	/*
2617+	 * Get memory type, trusted key, key encryption
2618+	 * type and color from type
2619+	 */
2620+	mem_type = (info->type >> TAG_OBJ_MEM_OFFSET) & 0x1;
2621+	color = (info->type >> TAG_OBJ_COLOR_OFFSET) & 0x1;
2622+	key_enc = (info->type >> TAG_OBJ_EKT_OFFSET) & 0x1;
2623+	trusted_key = (info->type >> TAG_OBJ_TK_OFFSET) & 0x1;
2624+
2625+	/* Validate input data*/
2626+	if (!info->key_mod || !blob)
2627+		return -EINVAL;
2628+
2629+	dev_dbg(dev, "%s input: [blob: %p (%zu), mem_type: %x, color: %x",
2630+		__func__, blob, info->blob_len, mem_type, color);
2631+	dev_dbg(dev, " keymod: %p (%zu)", info->key_mod, info->key_mod_len);
2632+	dev_dbg(dev, " secret: %p (%zu) key_enc: %x]\n",
2633+		info->black_key, info->black_key_len, key_enc);
2634+
2635+	/* Validate object type - only JDKEK keys are supported */
2636+	if (is_trusted_type(info->type))
2637+		return -EINVAL;
2638+
2639+	print_hex_dump_debug("blob @" __stringify(__LINE__) ": ",
2640+			     DUMP_PREFIX_ADDRESS, 16, 4, blob,
2641+			     info->blob_len, 1);
2642+
2643+	/*
2644+	 * Based on memory type, the key modifier length
2645+	 * can be 8-byte or 16-byte.
2646+	 */
2647+	if (mem_type == DATA_SECMEM)
2648+		info->key_mod_len = KEYMOD_SIZE_SM;
2649+	else
2650+		info->key_mod_len = KEYMOD_SIZE_GM;
2651+
2652+	/* Check if the blob is valid */
2653+	if (info->blob_len <= BLOB_OVERHEAD)
2654+		return -EINVAL;
2655+
2656+	/* Initialize black key length */
2657+	black_key_real_len = info->blob_len - BLOB_OVERHEAD;
2658+
2659+	/* Check if the black key has enough space to be stored */
2660+	if (info->black_key_len < black_key_real_len)
2661+		return -EINVAL;
2662+
2663+	/*
2664+	 * Based on key encryption type (ecb or ccm),
2665+	 * compute the black key size
2666+	 */
2667+	if (key_enc == KEY_COVER_ECB)
2668+		/*
2669+		 * ECB-Black Key will be padded with zeros to make it a
2670+		 * multiple of 16 bytes long before it is encrypted,
2671+		 * and the resulting Black Key will be this length.
2672+		 */
2673+		black_key_real_len = ECB_BLACK_KEY_SIZE(info->key_len);
2674+	else if (key_enc == KEY_COVER_CCM)
2675+		/*
2676+		 * CCM-Black Key will always be at least 12 bytes longer,
2677+		 * since the encapsulation uses a 6-byte nonce and adds
2678+		 * a 6-byte ICV. But first, the key is padded as necessary so
2679+		 * that CCM-Black Key is a multiple of 8 bytes long.
2680+		 */
2681+		black_key_real_len = CCM_BLACK_KEY_SIZE(info->key_len);
2682+
2683+	/* Check if there is enough space for black key */
2684+	if (info->black_key_len < black_key_real_len)
2685+		return -EINVAL;
2686+
2687+	/* Update black key length with the one computed based on key_enc */
2688+	info->black_key_len = black_key_real_len;
2689+
2690+	dev_dbg(dev, "%s processing: [blob: %p (%zu), mem_type: %x, color: %x,",
2691+		__func__, blob, info->blob_len, mem_type, color);
2692+	dev_dbg(dev, " key_mod: %p (%zu), black_key: %p (%zu) real_len: %zu]\n",
2693+		info->key_mod, info->key_mod_len, info->black_key,
2694+		info->black_key_len, black_key_real_len);
2695+
2696+	/* Map blob, this will be transferred to CAAM */
2697+	if (mem_type == DATA_GENMEM) {
2698+		if (map_write_data(dev, blob, info->blob_len,
2699+				   &blob_dma, &tmp_blob)) {
2700+			dev_err(dev, "Unable to map blob for decap\n");
2701+			ret = -ENOMEM;
2702+			goto exit;
2703+		}
2704+	} else {
2705+		blob_dma = get_caam_dma_addr(blob);
2706+		if (!blob_dma)
2707+			return -ENOMEM;
2708+	}
2709+
2710+	/* Map black key, this will be read from CAAM */
2711+	if (mem_type == DATA_GENMEM) {
2712+		if (map_read_data(dev, info->black_key_len,
2713+				  &black_key_dma, &tmp_black_key)) {
2714+			dev_err(dev, "Unable to map black key for blob decap\n");
2715+			ret = -ENOMEM;
2716+			goto unmap_blob;
2717+		}
2718+	} else {
2719+		black_key_dma = get_caam_dma_addr(info->black_key);
2720+		if (!black_key_dma)
2721+			return -ENOMEM;
2722+	}
2723+
2724+	ret = cnstr_desc_blob_decap(&desc, blob_dma, info->blob_len,
2725+				    info->key_mod, info->key_mod_len,
2726+				    black_key_dma, info->key_len,
2727+				    color, key_enc, trusted_key, mem_type);
2728+	if (ret) {
2729+		dev_err(dev,
2730+			"Failed to construct the descriptor for blob decap\n");
2731+		goto unmap_black_key;
2732+	}
2733+
2734+	ret = caam_jr_run_and_wait_for_completion(dev, desc,
2735+						  caam_key_blob_done);
2736+	if (ret) {
2737+		dev_err(dev, "Failed to execute blob decap descriptor\n");
2738+		goto free_desc;
2739+	}
2740+
2741+	/* Read black key from CAAM */
2742+	if (mem_type == DATA_GENMEM)
2743+		read_map_data(dev, info->black_key, black_key_dma,
2744+			      tmp_black_key, info->black_key_len);
2745+
2746+free_desc:
2747+	kfree(desc);
2748+
2749+unmap_black_key:
2750+	if (mem_type == DATA_GENMEM)
2751+		unmap_read_write_data(dev, black_key_dma, tmp_black_key,
2752+				      info->black_key_len, DMA_FROM_DEVICE);
2753+
2754+unmap_blob:
2755+	if (mem_type == DATA_GENMEM)
2756+		unmap_read_write_data(dev, blob_dma, tmp_blob,
2757+				      info->blob_len, DMA_TO_DEVICE);
2758+
2759+exit:
2760+	return ret;
2761+}
2762+EXPORT_SYMBOL(caam_blob_decap);
2763diff --git a/drivers/crypto/caam/caamkeyblob.h b/drivers/crypto/caam/caamkeyblob.h
2764new file mode 100644
2765index 000000000..495b74c29
2766--- /dev/null
2767+++ b/drivers/crypto/caam/caamkeyblob.h
2768@@ -0,0 +1,82 @@
2769+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2770+/*
2771+ * Black key generation and blob encapsulation/decapsualtion for CAAM
2772+ *
2773+ * Copyright 2018-2020 NXP
2774+ */
2775+
2776+#ifndef _CAAMKEYBLOB_H_
2777+#define _CAAMKEYBLOB_H_
2778+
2779+#include <linux/device.h>
2780+#include "caamkeyblob_desc.h"
2781+
2782+/*
2783+ * Minimum key size to be used is 16 bytes and maximum key size fixed
2784+ * is 64 bytes.
2785+ * Blob size to be kept is Maximum key size + tag object header added by CAAM.
2786+ */
2787+
2788+#define MIN_KEY_SIZE			16
2789+#define MAX_KEY_SIZE			64
2790+
2791+#define MAX_BLACK_KEY_SIZE		(MAX_KEY_SIZE + CCM_OVERHEAD +\
2792+					TAG_OVERHEAD_SIZE)
2793+
2794+/*
2795+ * For blobs a randomly-generated, 256-bit blob key is used to
2796+ * encrypt the data using the AES-CCM cryptographic algorithm.
2797+ * Therefore, blob size is max key size, CCM_OVERHEAD, blob header
2798+ * added by CAAM and the tagged object header size.
2799+ */
2800+#define MAX_BLOB_SIZE			(MAX_KEY_SIZE + CCM_OVERHEAD +\
2801+					BLOB_OVERHEAD + TAG_OVERHEAD_SIZE)
2802+
2803+/* Key modifier for CAAM blobs, used as a revision number */
2804+static const char caam_key_modifier[KEYMOD_SIZE_GM] = {
2805+		'C', 'A', 'A', 'M', '_', 'K', 'E', 'Y',
2806+		'_', 'T', 'Y', 'P', 'E', '_', 'V', '1',
2807+};
2808+
2809+/**
2810+ * struct keyblob_info - Structure that contains all the data necessary
2811+ *                       to generate a black key and encapsulate it into a blob
2812+ *
2813+ * @key                : The plaintext used as input key
2814+ *                       for black key generation
2815+ * @key_len            : Size of plaintext or size of key in case of
2816+ *                       black key generated from random
2817+ * @type               : The type of data contained (e.g. black key, blob, etc.)
2818+ * @black_key_len      : Length of the generated black key
2819+ * @black_key          : Black key data obtained from CAAM
2820+ * @blob_len           : Length of the blob that encapsulates the black key
2821+ * @blob               : Blob data obtained from CAAM
2822+ * @key_modifier_len   : 8-byte or 16-byte Key_Modifier based on general or
2823+ *                       secure memory blob type
2824+ * @key_modifier       : can be either a secret value, or used as a revision
2825+ *                       number, revision date or nonce
2826+ *                       In this case is used as a revision number.
2827+ */
2828+struct keyblob_info {
2829+	char *key;
2830+	size_t key_len;
2831+
2832+	u32 type;
2833+
2834+	size_t black_key_len;
2835+	unsigned char black_key[MAX_BLACK_KEY_SIZE];
2836+
2837+	size_t blob_len;
2838+	unsigned char blob[MAX_BLOB_SIZE];
2839+
2840+	size_t key_mod_len;
2841+	const void *key_mod;
2842+};
2843+
2844+int generate_black_key(struct device *dev, struct keyblob_info *info);
2845+
2846+int caam_blob_encap(struct device *dev, struct keyblob_info *info);
2847+
2848+int caam_blob_decap(struct device *dev, struct keyblob_info *info);
2849+
2850+#endif /* _CAAMKEYBLOB_H_ */
2851diff --git a/drivers/crypto/caam/caamkeyblob_desc.c b/drivers/crypto/caam/caamkeyblob_desc.c
2852new file mode 100644
2853index 000000000..628873167
2854--- /dev/null
2855+++ b/drivers/crypto/caam/caamkeyblob_desc.c
2856@@ -0,0 +1,446 @@
2857+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2858+/*
2859+ * Shared descriptors for CAAM black key
2860+ * and blob encapsulation/decapsulation
2861+ *
2862+ * Copyright 2018-2020 NXP
2863+ */
2864+#include "caamkeyblob_desc.h"
2865+
2866+/* Size of tmp buffer for descriptor const. */
2867+#define INITIAL_DESCSZ 64
2868+
2869+/*
2870+ * Construct a black key conversion job descriptor
2871+ *
2872+ * This function constructs a job descriptor capable of performing
2873+ * a key blackening operation on a plaintext secure memory resident object.
2874+ *
2875+ * @desc          : Pointer to a pointer to the descriptor generated by this
2876+ *                  function. Caller will be responsible to kfree() this
2877+ *                  descriptor after execution.
2878+ * @key           : Pointer to the plaintext, which will also hold
2879+ *                  the result. Since encryption occurs in place, caller must
2880+ *                  ensure that the space is large enough to accommodate the
2881+ *                  blackened key
2882+ * @key_len       : Size of the plaintext
2883+ * @black_key     : DMA address of the black key obtained from hardware
2884+ * @black_key_len : Size of the black key
2885+ * @key_enc       : Encrypted Key Type (AES-ECB or AES-CCM)
2886+ * @trusted_key   : Trusted Key (use Job Descriptor Key Encryption Key (JDKEK)
2887+ *                  or Trusted Descriptor Key Encryption Key (TDKEK) to
2888+ *                  decrypt the key to be loaded into a Key Register).
2889+ *
2890+ * Return         : '0' on success, error code otherwise
2891+ */
2892+int cnstr_desc_black_key(u32 **desc, char *key, size_t key_len,
2893+			 dma_addr_t black_key, size_t black_key_len,
2894+			 u8 key_enc, u8 trusted_key)
2895+{
2896+	u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
2897+	u16 dsize;
2898+	u32 bk_store;
2899+	u32 key_length_for_desc = key_len;
2900+
2901+	/* Trusted key not supported */
2902+	if (trusted_key != UNTRUSTED_KEY)
2903+		return -EOPNOTSUPP;
2904+
2905+	memset(tmpdesc, 0, sizeof(tmpdesc));
2906+
2907+	init_job_desc(tmpdesc, 0);
2908+
2909+	/*
2910+	 * KEY commands seems limited to 32 bytes, so we should use the load
2911+	 * command instead which can load up to 64 bytes.
2912+	 * The size must also be loaded.
2913+	 *
2914+	 * TODO: The KEY command indicate it should be able to load key bigger
2915+	 * than 32bytes but it doesn't work in practice
2916+	 *
2917+	 * TODO: The LOAD command indicate it should be able to load up to 96
2918+	 * byte keys it doesn't work in practice and is limited to 64 bytes
2919+	 */
2920+
2921+	/* Load key to class 1 key register */
2922+	append_load_as_imm(tmpdesc, (void *)key, key_length_for_desc,
2923+			   LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_KEY);
2924+
2925+	/* Load the size of the key */
2926+	append_load_imm_u32(tmpdesc, key_length_for_desc, LDST_CLASS_1_CCB |
2927+			    LDST_IMM | LDST_SRCDST_WORD_KEYSZ_REG);
2928+
2929+	/* ...and write back out via FIFO store*/
2930+	bk_store = CLASS_1;
2931+	if (key_enc == KEY_COVER_ECB)
2932+		bk_store |= FIFOST_TYPE_KEY_KEK;
2933+	else
2934+		bk_store |= FIFOST_TYPE_KEY_CCM_JKEK;
2935+
2936+	/* Save the key as black key in memory */
2937+	append_fifo_store(tmpdesc, black_key, black_key_len, bk_store);
2938+
2939+	dsize = desc_bytes(&tmpdesc);
2940+
2941+	/* Now allocate execution buffer and coat it with executable */
2942+	tdesc = kmemdup(tmpdesc, dsize, GFP_KERNEL | GFP_DMA);
2943+	if (!tdesc)
2944+		return -ENOMEM;
2945+
2946+	*desc = tdesc;
2947+
2948+	print_hex_dump_debug("black key desc@" __stringify(__LINE__) ":",
2949+			     DUMP_PREFIX_ADDRESS, 16, 4, *desc,
2950+			     desc_bytes(*desc), 1);
2951+
2952+	return 0;
2953+}
2954+EXPORT_SYMBOL(cnstr_desc_black_key);
2955+
2956+/*
2957+ * Construct a black key using RNG job descriptor
2958+ *
2959+ * This function constructs a job descriptor capable of performing
2960+ * a key blackening operation on RNG generated.
2961+ *
2962+ * @desc          : Pointer to a pointer to the descriptor generated by this
2963+ *                  function. Caller will be responsible to kfree() this
2964+ *                  descriptor after execution.
2965+ * @key_len       : Size of the random plaintext
2966+ * @black_key     : DMA address of the black key obtained from hardware
2967+ * @black_key_len : Size of the black key
2968+ * @key_enc       : Encrypted Key Type (AES-ECB or AES-CCM)
2969+ * @trusted_key   : Trusted Key (use Job Descriptor Key Encryption Key (JDKEK)
2970+ *                  or Trusted Descriptor Key Encryption Key (TDKEK) to
2971+ *                  decrypt the key to be loaded into a Key Register).
2972+ *
2973+ * Return         : '0' on success, error code otherwise
2974+ */
2975+int cnstr_desc_random_black_key(u32 **desc, size_t key_len,
2976+				dma_addr_t black_key, size_t black_key_len,
2977+				u8 key_enc, u8 trusted_key)
2978+{
2979+	u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
2980+	u16 dsize;
2981+	u32 bk_store;
2982+
2983+	memset(tmpdesc, 0, sizeof(tmpdesc));
2984+
2985+	init_job_desc(tmpdesc, 0);
2986+
2987+	/* Prepare RNG */
2988+	append_operation(tmpdesc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
2989+
2990+	/* Generate RNG and left it in output data fifo */
2991+	append_cmd(tmpdesc, CMD_FIFO_STORE | FIFOST_TYPE_RNGFIFO | key_len);
2992+
2993+	/* Copy RNG from outfifo to class 1 Key register */
2994+	append_move(tmpdesc, MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1KEY |
2995+			MOVE_WAITCOMP | (key_len & MOVE_LEN_MASK));
2996+
2997+	/* Write the size of the key moved */
2998+	append_load_imm_u32(tmpdesc, key_len, LDST_CLASS_1_CCB |
2999+			    LDST_SRCDST_WORD_KEYSZ_REG | LDST_IMM);
3000+
3001+	bk_store = CLASS_1;
3002+	if (key_enc == KEY_COVER_ECB)
3003+		bk_store |= FIFOST_TYPE_KEY_KEK;
3004+	else
3005+		bk_store |= FIFOST_TYPE_KEY_CCM_JKEK;
3006+
3007+	/* Fifo store to save the key as black key in memory */
3008+	append_fifo_store(tmpdesc, black_key, black_key_len, bk_store);
3009+
3010+	dsize = desc_bytes(&tmpdesc);
3011+
3012+	/* Now allocate execution buffer and coat it with executable */
3013+	tdesc = kmemdup(tmpdesc, dsize, GFP_KERNEL | GFP_DMA);
3014+	if (!tdesc)
3015+		return -ENOMEM;
3016+
3017+	*desc = tdesc;
3018+
3019+	print_hex_dump_debug("black key random desc@" __stringify(__LINE__) ":",
3020+			     DUMP_PREFIX_ADDRESS, 16, 4, *desc,
3021+			     desc_bytes(*desc), 1);
3022+
3023+	return 0;
3024+}
3025+EXPORT_SYMBOL(cnstr_desc_random_black_key);
3026+
3027+/*
3028+ * Construct a blob encapsulation job descriptor
3029+ *
3030+ * This function dynamically constructs a blob encapsulation job descriptor
3031+ * from the following arguments:
3032+ *
3033+ * @desc          : Pointer to a pointer to the descriptor generated by this
3034+ *                  function. Caller will be responsible to kfree() this
3035+ *                  descriptor after execution.
3036+ * @black_key     : Physical pointer to a secret, normally a black or red key,
3037+ *                  possibly residing within an accessible secure memory page,
3038+ *                  of the secret to be encapsulated to an output blob.
3039+ * @black_key_len : Size of input secret, in bytes. This is limited to 65536
3040+ *                  less the size of blob overhead, since the length embeds
3041+ *                  into DECO pointer in/out instructions.
3042+ * @keycolor      : Determines if the source data is covered (black key) or
3043+ *                  plaintext (red key). RED_KEY or BLACK_KEY are defined in
3044+ *                  for this purpose.
3045+ * @key_enc       : If BLACK_KEY source is covered via AES-CCM, specify
3046+ *                  KEY_COVER_CCM, else uses AES-ECB (KEY_COVER_ECB).
3047+ * @trusted_key   : Trusted Key (use Job Descriptor Key Encryption Key (JDKEK)
3048+ *                  or Trusted Descriptor Key Encryption Key (TDKEK) to
3049+ *                  decrypt the key to be loaded into a Key Register).
3050+ * @mem_type      : Determine if encapsulated blob should be a secure memory
3051+ *                  blob (DATA_SECMEM), with partition data embedded with key
3052+ *                  material, or a general memory blob (DATA_GENMEM).
3053+ * @key_mod       : Pointer to a key modifier, which must reside in a
3054+ *                  contiguous piece of memory. Modifier will be assumed to be
3055+ *                  8 bytes long for a blob of type DATA_SECMEM, or 16 bytes
3056+ *                  long for a blob of type DATA_GENMEM
3057+ * @key_mod_len   : Modifier length is 8 bytes long for a blob of type
3058+ *                  DATA_SECMEM, or 16 bytes long for a blob of type DATA_GENMEM
3059+ * @blob          : Physical pointer to the destination buffer to receive the
3060+ *                  encapsulated output. This buffer will need to be 48 bytes
3061+ *                  larger than the input because of the added encapsulation
3062+ *                  data. The generated descriptor will account for the
3063+ *                  increase in size, but the caller must also account for
3064+ *                  this increase in the buffer allocator.
3065+ * @blob_len      : Size of the destination buffer to receive the
3066+ *                  encapsulated output.
3067+ * Return         : '0' on success, error code otherwise
3068+ *
3069+ * Upon completion, desc points to a buffer containing a CAAM job
3070+ * descriptor which encapsulates data into an externally-storable blob
3071+ * suitable for use across power cycles.
3072+ *
3073+ * This is an example of a black key encapsulation job into a general memory
3074+ * blob. Notice the 16-byte key modifier in the LOAD instruction. Also note
3075+ * the output 48 bytes longer than the input:
3076+ *
3077+ * [00] B0800008       jobhdr: stidx=0 len=8
3078+ * [01] 14400010           ld: ccb2-key len=16 offs=0
3079+ * [02] 08144891               ptr->@0x08144891
3080+ * [03] F800003A    seqoutptr: len=58
3081+ * [04] 01000000               out_ptr->@0x01000000
3082+ * [05] F000000A     seqinptr: len=10
3083+ * [06] 09745090               in_ptr->@0x09745090
3084+ * [07] 870D0004    operation: encap blob  reg=memory, black, format=normal
3085+ *
3086+ * This is an example of a red key encapsulation job for storing a red key
3087+ * into a secure memory blob. Note the 8 byte modifier on the 12 byte offset
3088+ * in the LOAD instruction; this accounts for blob permission storage:
3089+ *
3090+ * [00] B0800008       jobhdr: stidx=0 len=8
3091+ * [01] 14400C08           ld: ccb2-key len=8 offs=12
3092+ * [02] 087D0784               ptr->@0x087d0784
3093+ * [03] F8000050    seqoutptr: len=80
3094+ * [04] 09251BB2               out_ptr->@0x09251bb2
3095+ * [05] F0000020     seqinptr: len=32
3096+ * [06] 40000F31               in_ptr->@0x40000f31
3097+ * [07] 870D0008    operation: encap blob  reg=memory, red, sec_mem,
3098+ *                             format=normal
3099+ */
3100+int cnstr_desc_blob_encap(u32 **desc, dma_addr_t black_key,
3101+			  size_t key_len, u8 keycolor, u8 key_enc,
3102+			  u8 trusted_key, u8 mem_type, const void *key_mod,
3103+			  size_t key_mod_len, dma_addr_t blob, size_t blob_len)
3104+{
3105+	u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
3106+	u16 dsize;
3107+	u32 bk_store;
3108+
3109+	/* Trusted key not supported */
3110+	if (trusted_key != UNTRUSTED_KEY)
3111+		return -EOPNOTSUPP;
3112+
3113+	memset(tmpdesc, 0, sizeof(tmpdesc));
3114+
3115+	init_job_desc(tmpdesc, 0);
3116+
3117+	/*
3118+	 * Key modifier works differently for secure/general memory blobs
3119+	 * This accounts for the permission/protection data encapsulated
3120+	 * within the blob if a secure memory blob is requested
3121+	 */
3122+	if (mem_type == DATA_SECMEM)
3123+		append_load_as_imm(tmpdesc, key_mod, key_mod_len,
3124+				   LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
3125+				   ((12 << LDST_OFFSET_SHIFT) &
3126+				    LDST_OFFSET_MASK));
3127+	else /* is general memory blob */
3128+		append_load_as_imm(tmpdesc, key_mod, key_mod_len,
3129+				   LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY);
3130+
3131+	/* Input data, should be somewhere in secure memory */
3132+	append_seq_in_ptr_intlen(tmpdesc, black_key, key_len, 0);
3133+
3134+	/*
3135+	 * Encapsulation output must include space for blob key encryption
3136+	 * key and MAC tag
3137+	 */
3138+	append_seq_out_ptr_intlen(tmpdesc, blob, CCM_BLACK_KEY_SIZE(key_len) +
3139+				  BLOB_OVERHEAD, 0);
3140+
3141+	bk_store = OP_PCLID_BLOB;
3142+	if (mem_type == DATA_SECMEM)
3143+		bk_store |= OP_PCL_BLOB_PTXT_SECMEM;
3144+
3145+	if (key_enc == KEY_COVER_CCM)
3146+		bk_store |= OP_PCL_BLOB_EKT;
3147+
3148+	/* An input black key cannot be stored in a red blob */
3149+	if (keycolor == BLACK_KEY)
3150+		bk_store |= OP_PCL_BLOB_BLACK;
3151+
3152+	/* Set blob encap, then color */
3153+	append_operation(tmpdesc, OP_TYPE_ENCAP_PROTOCOL | bk_store);
3154+
3155+	dsize = desc_bytes(&tmpdesc);
3156+
3157+	tdesc = kmemdup(tmpdesc, dsize, GFP_KERNEL | GFP_DMA);
3158+	if (!tdesc)
3159+		return -ENOMEM;
3160+
3161+	*desc = tdesc;
3162+
3163+	print_hex_dump_debug("blob encap desc@" __stringify(__LINE__) ":",
3164+			     DUMP_PREFIX_ADDRESS, 16, 4, *desc,
3165+			     desc_bytes(*desc), 1);
3166+	return 0;
3167+}
3168+EXPORT_SYMBOL(cnstr_desc_blob_encap);
3169+
3170+/*
3171+ * Construct a blob decapsulation job descriptor
3172+ *
3173+ * This function dynamically constructs a blob decapsulation job descriptor
3174+ * from the following arguments:
3175+ *
3176+ * @desc          : Pointer to a pointer to the descriptor generated by this
3177+ *                  function. Caller will be responsible to kfree() this
3178+ *                  descriptor after execution.
3179+ * @blob          : Physical pointer (into external memory) of the blob to
3180+ *                  be decapsulated. Blob must reside in a contiguous memory
3181+ *                  segment.
3182+ * @blob_len      : Size of the blob buffer to be decapsulated.
3183+ * @key_mod       : Pointer to a key modifier, which must reside in a
3184+ *                  contiguous piece of memory. Modifier will be assumed to be
3185+ *                  8 bytes long for a blob of type DATA_SECMEM, or 16 bytes
3186+ *                  long for a blob of type DATA_GENMEM
3187+ * @key_mod_len   : Modifier length is 8 bytes long for a blob of type
3188+ *                  DATA_SECMEM, or 16 bytes long for a blob of type DATA_GENMEM
3189+ * @black_key     : Physical pointer of the decapsulated output, possibly into
3190+ *                  a location within a secure memory page. Must be contiguous.
3191+ * @black_key_len : Size of encapsulated secret in bytes (not the size of the
3192+ *                  input blob).
3193+ * @keycolor      : Determines if the source data is covered (black key) or
3194+ *                  plaintext (red key). RED_KEY or BLACK_KEY are defined in
3195+ *                  for this purpose.
3196+ * @key_enc       : If BLACK_KEY source is covered via AES-CCM, specify
3197+ *                  KEY_COVER_CCM, else uses AES-ECB (KEY_COVER_ECB).
3198+ * @trusted_key   : Trusted Key (use Job Descriptor Key Encryption Key (JDKEK)
3199+ *                  or Trusted Descriptor Key Encryption Key (TDKEK) to
3200+ *                  decrypt the key to be loaded into a Key Register).
3201+ * @mem_type      : Determine if encapsulated blob should be a secure memory
3202+ *                  blob (DATA_SECMEM), with partition data embedded with key
3203+ *                  material, or a general memory blob (DATA_GENMEM).
3204+ * Return         : '0' on success, error code otherwise
3205+ *
3206+ * Upon completion, desc points to a buffer containing a CAAM job descriptor
3207+ * that decapsulates a key blob from external memory into a black (encrypted)
3208+ * key or red (plaintext) content.
3209+ *
3210+ * This is an example of a black key decapsulation job from a general memory
3211+ * blob. Notice the 16-byte key modifier in the LOAD instruction.
3212+ *
3213+ * [00] B0800008       jobhdr: stidx=0 len=8
3214+ * [01] 14400010           ld: ccb2-key len=16 offs=0
3215+ * [02] 08A63B7F               ptr->@0x08a63b7f
3216+ * [03] F8000010    seqoutptr: len=16
3217+ * [04] 01000000               out_ptr->@0x01000000
3218+ * [05] F000003A     seqinptr: len=58
3219+ * [06] 01000010               in_ptr->@0x01000010
3220+ * [07] 860D0004    operation: decap blob  reg=memory, black, format=normal
3221+ *
3222+ * This is an example of a red key decapsulation job for restoring a red key
3223+ * from a secure memory blob. Note the 8 byte modifier on the 12 byte offset
3224+ * in the LOAD instruction:
3225+ *
3226+ * [00] B0800008       jobhdr: stidx=0 len=8
3227+ * [01] 14400C08           ld: ccb2-key len=8 offs=12
3228+ * [02] 01000000               ptr->@0x01000000
3229+ * [03] F8000020    seqoutptr: len=32
3230+ * [04] 400000E6               out_ptr->@0x400000e6
3231+ * [05] F0000050     seqinptr: len=80
3232+ * [06] 08F0C0EA               in_ptr->@0x08f0c0ea
3233+ * [07] 860D0008    operation: decap blob  reg=memory, red, sec_mem,
3234+ *			       format=normal
3235+ */
3236+int cnstr_desc_blob_decap(u32 **desc, dma_addr_t blob, size_t blob_len,
3237+			  const void *key_mod, size_t key_mod_len,
3238+			  dma_addr_t black_key, size_t plaintext_len,
3239+			  u8 keycolor, u8 key_enc, u8 trusted_key, u8 mem_type)
3240+{
3241+	u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
3242+	u16 dsize;
3243+	u32 bk_store;
3244+
3245+	/* Trusted key not supported */
3246+	if (trusted_key != UNTRUSTED_KEY)
3247+		return -EOPNOTSUPP;
3248+
3249+	memset(tmpdesc, 0, sizeof(tmpdesc));
3250+
3251+	init_job_desc(tmpdesc, 0);
3252+
3253+	/* Load key modifier */
3254+	if (mem_type == DATA_SECMEM)
3255+		append_load_as_imm(tmpdesc, key_mod, key_mod_len,
3256+				   LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
3257+				   ((12 << LDST_OFFSET_SHIFT) &
3258+				    LDST_OFFSET_MASK));
3259+	else /* is general memory blob */
3260+		append_load_as_imm(tmpdesc, key_mod, key_mod_len,
3261+				   LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY);
3262+
3263+	/* Compensate blob header + MAC tag over size of encapsulated secret */
3264+	append_seq_in_ptr_intlen(tmpdesc, blob, plaintext_len + BLOB_OVERHEAD,
3265+				 0);
3266+
3267+	append_seq_out_ptr_intlen(tmpdesc, black_key, plaintext_len, 0);
3268+
3269+	/* Decapsulate from secure memory partition to black blob */
3270+	bk_store = OP_PCLID_BLOB;
3271+	if (mem_type == DATA_SECMEM)
3272+		bk_store |= OP_PCL_BLOB_PTXT_SECMEM;
3273+
3274+	if (key_enc == KEY_COVER_CCM)
3275+		bk_store |= OP_PCL_BLOB_EKT;
3276+
3277+	/* An input black key cannot be stored in a red blob */
3278+	if (keycolor == BLACK_KEY)
3279+		bk_store |= OP_PCL_BLOB_BLACK;
3280+
3281+	/* Set blob encap, then color */
3282+	append_operation(tmpdesc, OP_TYPE_DECAP_PROTOCOL | bk_store);
3283+
3284+	dsize = desc_bytes(&tmpdesc);
3285+
3286+	tdesc = kmemdup(tmpdesc, dsize, GFP_KERNEL | GFP_DMA);
3287+	if (!tdesc)
3288+		return -ENOMEM;
3289+
3290+	*desc = tdesc;
3291+
3292+	print_hex_dump_debug("blob decap desc@" __stringify(__LINE__) ":",
3293+			     DUMP_PREFIX_ADDRESS, 16, 4, *desc,
3294+			     desc_bytes(*desc), 1);
3295+
3296+	return 0;
3297+}
3298+EXPORT_SYMBOL(cnstr_desc_blob_decap);
3299+
3300+MODULE_LICENSE("Dual BSD/GPL");
3301+MODULE_DESCRIPTION("NXP CAAM Black Key and Blob descriptors");
3302+MODULE_AUTHOR("NXP Semiconductors");
3303diff --git a/drivers/crypto/caam/caamkeyblob_desc.h b/drivers/crypto/caam/caamkeyblob_desc.h
3304new file mode 100644
3305index 000000000..0affbb184
3306--- /dev/null
3307+++ b/drivers/crypto/caam/caamkeyblob_desc.h
3308@@ -0,0 +1,101 @@
3309+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
3310+/*
3311+ * Shared descriptors for CAAM black key and blob
3312+ *
3313+ * Copyright 2018-2020 NXP
3314+ */
3315+
3316+#ifndef _CAAMKEYBLOB_DESC_H_
3317+#define _CAAMKEYBLOB_DESC_H_
3318+
3319+#include <linux/types.h>
3320+
3321+#include "jr.h"
3322+#include "regs.h"
3323+#include "desc.h"
3324+
3325+#include "compat.h"
3326+#include "tag_object.h"
3327+#include "desc_constr.h"
3328+
3329+/* Defines for secure memory and general memory blobs */
3330+#define DATA_GENMEM 0
3331+#define DATA_SECMEM 1
3332+
3333+/* Encrypted key */
3334+#define BLACK_KEY 1
3335+
3336+/* Define key encryption/covering options */
3337+#define KEY_COVER_ECB 0	/* cover key in AES-ECB */
3338+#define KEY_COVER_CCM 1 /* cover key with AES-CCM */
3339+
3340+/* Define the trust in the key, to select either JDKEK or TDKEK */
3341+#define UNTRUSTED_KEY 0
3342+#define TRUSTED_KEY 1
3343+
3344+/* Define space required for BKEK + MAC tag storage in any blob */
3345+#define BLOB_OVERHEAD (32 + 16)
3346+
3347+#define PAD_16_BYTE(_key_size) (roundup(_key_size, 16))
3348+#define PAD_8_BYTE(_key_size) (roundup(_key_size, 8))
3349+
3350+/*
3351+ * ECB-Black Key will be padded with zeros to make it a
3352+ * multiple of 16 bytes long before it is encrypted,
3353+ * and the resulting Black Key will be this length.
3354+ */
3355+#define ECB_BLACK_KEY_SIZE(_key_size) (PAD_16_BYTE(_key_size))
3356+
3357+/*
3358+ * CCM-Black Key will always be at least 12 bytes longer,
3359+ * since the encapsulation uses a 6-byte nonce and adds
3360+ * a 6-byte ICV. But first, the key is padded as necessary so
3361+ * that CCM-Black Key is a multiple of 8 bytes long.
3362+ */
3363+#define NONCE_SIZE 6
3364+#define ICV_SIZE 6
3365+#define CCM_OVERHEAD (NONCE_SIZE + ICV_SIZE)
3366+#define CCM_BLACK_KEY_SIZE(_key_size) (PAD_8_BYTE(_key_size) \
3367+							+ CCM_OVERHEAD)
3368+
3369+static inline int secret_size_in_ccm_black_key(int key_size)
3370+{
3371+	return ((key_size >= CCM_OVERHEAD) ? key_size - CCM_OVERHEAD : 0);
3372+}
3373+
3374+#define SECRET_SIZE_IN_CCM_BLACK_KEY(_key_size) \
3375+	secret_size_in_ccm_black_key(_key_size)
3376+
3377+/* A red key is not encrypted so its size is the same */
3378+#define RED_KEY_SIZE(_key_size) (_key_size)
3379+
3380+/*
3381+ * Based on memory type, the key modifier length
3382+ * can be either 8-byte or 16-byte.
3383+ */
3384+#define KEYMOD_SIZE_SM 8
3385+#define KEYMOD_SIZE_GM 16
3386+
3387+/* Create job descriptor to cover key */
3388+int cnstr_desc_black_key(u32 **desc, char *key, size_t key_len,
3389+			 dma_addr_t black_key, size_t black_key_len,
3390+			 u8 key_enc, u8 trusted_key);
3391+
3392+/* Create job descriptor to generate a random key and cover it */
3393+int cnstr_desc_random_black_key(u32 **desc, size_t key_len,
3394+				dma_addr_t black_key, size_t black_key_len,
3395+				u8 key_enc, u8 trusted_key);
3396+
3397+/* Encapsulate data in a blob */
3398+int cnstr_desc_blob_encap(u32 **desc, dma_addr_t black_key,
3399+			  size_t black_key_len, u8 color, u8 key_enc,
3400+			  u8 trusted_key, u8 mem_type, const void *key_mod,
3401+			  size_t key_mod_len, dma_addr_t blob, size_t blob_len);
3402+
3403+/* Decapsulate data from a blob */
3404+int cnstr_desc_blob_decap(u32 **desc, dma_addr_t blob, size_t blob_len,
3405+			  const void *key_mod, size_t key_mod_len,
3406+			  dma_addr_t black_key, size_t black_key_len,
3407+			  u8 keycolor, u8 key_enc, u8 trusted_key, u8 mem_type);
3408+
3409+#endif /* _CAAMKEYBLOB_DESC_H_ */
3410diff --git a/drivers/crypto/caam/caamkeygen.c b/drivers/crypto/caam/caamkeygen.c
3411new file mode 100644
3412index 000000000..9d4d909a6
3413--- /dev/null
3414+++ b/drivers/crypto/caam/caamkeygen.c
3415@@ -0,0 +1,630 @@
3416+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3417+/*
3418+ * Copyright 2020 NXP
3419+ */
3420+
3421+#include <linux/types.h>
3422+#include <linux/kernel.h>
3423+#include <linux/fs.h>
3424+#include <linux/miscdevice.h>
3425+#include <linux/device.h>
3426+#include <linux/slab.h>
3427+#include <linux/uaccess.h>
3428+#include <linux/ioctl.h>
3429+
3430+#include "caamkeyblob.h"
3431+#include "intern.h"
3432+#include <linux/caam_keygen.h>
3433+
3434+#define DEVICE_NAME "caam-keygen"
3435+
3436+static long caam_keygen_ioctl(struct file *file, unsigned int cmd,
3437+			      unsigned long arg);
3438+
3439+/**
3440+ * tag_black_obj      - Tag a black object (key/blob) with a tag object header.
3441+ *
3442+ * @info              : keyblob_info structure, which contains
3443+ *                      the black key/blob, obtained from CAAM,
3444+ *                      that needs to be tagged
3445+ * @black_max_len     : The maximum size of the black object (blob/key)
3446+ * @blob              : Used to determine if it's a blob or key object
3447+ *
3448+ * Return             : '0' on success, error code otherwise
3449+ */
3450+static int tag_black_obj(struct keyblob_info *info, size_t black_max_len,
3451+			 bool blob)
3452+{
3453+	struct header_conf tag;
3454+	u32 type;
3455+	int ret;
3456+	u32 size_tagged = black_max_len;
3457+
3458+	if (!info)
3459+		return -EINVAL;
3460+
3461+	type = info->type;
3462+
3463+	/* Prepare and set the tag */
3464+	if (blob) {
3465+		init_tag_object_header(&tag, 0, type, info->key_len,
3466+				       info->blob_len);
3467+		ret = set_tag_object_header_conf(&tag, info->blob,
3468+						 info->blob_len,
3469+						 &size_tagged);
3470+	} else {
3471+		init_tag_object_header(&tag, 0, type, info->key_len,
3472+				       info->black_key_len);
3473+		ret = set_tag_object_header_conf(&tag, info->black_key,
3474+						 info->black_key_len,
3475+						 &size_tagged);
3476+	}
3477+	if (ret)
3478+		return ret;
3479+
3480+	/* Update the size of the black key tagged */
3481+	if (blob)
3482+		info->blob_len = size_tagged;
3483+	else
3484+		info->black_key_len = size_tagged;
3485+
3486+	return ret;
3487+}
3488+
3489+/**
3490+ * send_err_msg      - Send the error message from kernel to user-space
3491+ *
3492+ * @msg              : The message to be sent
3493+ * @output           : The output buffer where we want to copy the error msg
3494+ * @size             : The size of output buffer
3495+ */
3496+static void send_err_msg(char *msg, void __user *output, size_t size)
3497+{
3498+	size_t min_s;
3499+	char null_ch = 0;
3500+
3501+	/* Not enough space to copy any message */
3502+	if (size <= 1)
3503+		return;
3504+
3505+	min_s = min(size - 1, strlen(msg));
3506+	/*
3507+	 * Avoid compile and checkpatch warnings, since we don't
3508+	 * care about return value from copy_to_user
3509+	 */
3510+	(void)(copy_to_user(output, msg, min_s) + 1);
3511+	/* Copy null terminator */
3512+	(void)(copy_to_user((output + min_s), &null_ch, 1) + 1);
3513+}
3514+
3515+/**
3516+ * validate_key_size - Validate the key size from user.
3517+ *                     This can be the exact size given by user when
3518+ *                     generating a black key from random (with -s),
3519+ *                     or the size of the plaintext (with -t).
3520+ *
3521+ * @key_len          : The size of key we want to validate
3522+ * @output           : The output buffer where we want to copy the error msg
3523+ * @size             : The size of output buffer
3524+ *
3525+ *Return             : '0' on success, error code otherwise
3526+ */
3527+static int validate_key_size(size_t key_len, void __user *output, size_t size)
3528+{
3529+	char *msg = NULL;
3530+
3531+	if (key_len < MIN_KEY_SIZE || key_len > MAX_KEY_SIZE) {
3532+		msg = "Invalid key size, expected values are between 16 and 64 bytes.\n";
3533+		send_err_msg(msg, output, size);
3534+		return -EINVAL;
3535+	}
3536+
3537+	return 0;
3538+}
3539+
3540+/**
3541+ * validate_input    - Validate the input from user and set the
3542+ *                     keyblob_info structure.
3543+ *                     This contains the input key in case of black key
3544+ *                     generated from plaintext or size for random
3545+ *                     black key.
3546+ *
3547+ * @key_crt          : Structure with data from user
3548+ * @arg              : User-space argument from ioctl call
3549+ * @info             : keyblob_info structure, will be updated with all the
3550+ *                     data from user-space
3551+ * @create_key_op    : Used to determine if it's a create or import operation
3552+ *
3553+ * Return            : '0' on success, error code otherwise
3554+ */
3555+static int validate_input(struct caam_keygen_cmd *key_crt, unsigned long arg,
3556+			  struct keyblob_info *info, bool create_key_op)
3557+{
3558+	char *tmp, *msg;
3559+	size_t tmp_size;
3560+	bool random = false;
3561+	int ret = 0;
3562+	u32 tmp_len = 0;
3563+	char null_ch = 0;
3564+
3565+	/*
3566+	 * So far, we only support Black keys, encrypted with JDKEK,
3567+	 * kept in general memory, non-secure state.
3568+	 * Therefore, default value for type is 1.
3569+	 */
3570+	u32 type = 1;
3571+
3572+	if (copy_from_user(key_crt, (void __user *)arg,
3573+			   sizeof(struct caam_keygen_cmd)))
3574+		return -EFAULT;
3575+
3576+	/* Get blob_len from user. */
3577+	info->blob_len = key_crt->blob_len;
3578+	/* Get black_key_len from user. */
3579+	info->black_key_len = key_crt->black_key_len;
3580+
3581+	/*
3582+	 * Based on operation type validate a different set of input data.
3583+	 *
3584+	 * For key creation, validate the Encrypted Key Type,
3585+	 * the Key Mode and Key Value
3586+	 */
3587+	if (create_key_op) {
3588+		/*
3589+		 * Validate arguments received from user.
3590+		 * These must be at least 1 since
3591+		 * they have null terminator.
3592+		 */
3593+		if (key_crt->key_enc_len < 1 || key_crt->key_mode_len < 1 ||
3594+		    key_crt->key_value_len < 1) {
3595+			msg = "Invalid arguments.\n";
3596+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3597+				     key_crt->blob_len);
3598+			return -EFAULT;
3599+		}
3600+		/*
3601+		 * Allocate memory for temporary buffer used to
3602+		 * get the user arguments from user-space
3603+		 */
3604+		tmp_size = max_t(size_t, key_crt->key_enc_len,
3605+				 max_t(size_t, key_crt->key_mode_len,
3606+				       key_crt->key_value_len)) + 1;
3607+		tmp = kmalloc(tmp_size, GFP_KERNEL);
3608+		if (!tmp) {
3609+			msg = "Unable to allocate memory for temporary buffer.\n";
3610+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3611+				     key_crt->blob_len);
3612+			return -ENOMEM;
3613+		}
3614+		/* Add null terminator */
3615+		tmp[tmp_size - 1] = null_ch;
3616+		/*
3617+		 * Validate and set, in type, the Encrypted Key Type
3618+		 * given from user-space.
3619+		 * This must be ecb or ccm.
3620+		 */
3621+		if (copy_from_user(tmp, u64_to_user_ptr(key_crt->key_enc),
3622+				   key_crt->key_enc_len)) {
3623+			msg = "Unable to copy from user the Encrypted Key Type.\n";
3624+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3625+				     key_crt->blob_len);
3626+			ret = -EFAULT;
3627+			goto free_resource;
3628+		}
3629+		if (!strcmp(tmp, "ccm")) {
3630+			type |= BIT(TAG_OBJ_EKT_OFFSET);
3631+		} else if (strcmp(tmp, "ecb")) {
3632+			msg = "Invalid argument for Encrypted Key Type, expected ecb or ccm.\n";
3633+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3634+				     key_crt->blob_len);
3635+			ret = -EINVAL;
3636+			goto free_resource;
3637+		}
3638+		/*
3639+		 * Validate the Key Mode given from user-space.
3640+		 * This must be -t (text), for a black key generated
3641+		 * from a plaintext, or -s (size) for a black key
3642+		 * generated from random.
3643+		 */
3644+		if (copy_from_user(tmp, u64_to_user_ptr(key_crt->key_mode),
3645+				   key_crt->key_mode_len)) {
3646+			msg = "Unable to copy from user the Key Mode: random (-s) or plaintext (-t).\n";
3647+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3648+				     key_crt->blob_len);
3649+			ret = -EFAULT;
3650+			goto free_resource;
3651+		}
3652+		if (!strcmp(tmp, "-s")) {
3653+			random = true; /* black key generated from random */
3654+		} else if (strcmp(tmp, "-t")) {
3655+			msg = "Invalid argument for Key Mode, expected -s or -t.\n";
3656+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3657+				     key_crt->blob_len);
3658+			ret = -EINVAL;
3659+			goto free_resource;
3660+		}
3661+		/*
3662+		 * Validate and set, into keyblob_info structure,
3663+		 * the plaintext or key size, based on Key Mode.
3664+		 */
3665+		if (copy_from_user(tmp, u64_to_user_ptr(key_crt->key_value),
3666+				   key_crt->key_value_len)) {
3667+			msg = "Unable to copy from user the Key Value: size or plaintext.\n";
3668+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3669+				     key_crt->blob_len);
3670+			ret = -EFAULT;
3671+			goto free_resource;
3672+		}
3673+		/* Black key generated from random, get its size */
3674+		if (random) {
3675+			info->key = NULL;
3676+			ret = kstrtou32(tmp, 10, &tmp_len);
3677+			if (ret != 0) {
3678+				msg = "Invalid key size.\n";
3679+				send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3680+					     key_crt->blob_len);
3681+				goto free_resource;
3682+			}
3683+			ret = validate_key_size(tmp_len,
3684+						u64_to_user_ptr(key_crt->blob),
3685+						key_crt->blob_len);
3686+			if (ret)
3687+				goto free_resource;
3688+
3689+			info->key_len = tmp_len;
3690+		} else {
3691+			/*
3692+			 * Black key generated from plaintext,
3693+			 * get the plaintext (input key) and its size
3694+			 */
3695+			ret = validate_key_size(strlen(tmp),
3696+						u64_to_user_ptr(key_crt->blob),
3697+						key_crt->blob_len);
3698+			if (ret)
3699+				goto free_resource;
3700+
3701+			info->key = tmp;
3702+			info->key_len = strlen(tmp);
3703+		}
3704+		info->type = type;
3705+	} else {
3706+		/* For key import, get the blob from user-space */
3707+		if (copy_from_user(info->blob, u64_to_user_ptr(key_crt->blob),
3708+				   info->blob_len)) {
3709+			msg = "Unable to copy from user the blob.\n";
3710+			send_err_msg(msg, u64_to_user_ptr(key_crt->black_key),
3711+				     key_crt->black_key_len);
3712+			return -EFAULT;
3713+		}
3714+	}
3715+
3716+	goto exit;
3717+
3718+free_resource:
3719+	kfree(tmp);
3720+
3721+exit:
3722+	return ret;
3723+}
3724+
3725+/**
3726+ * keygen_create_keyblob - Generate key and blob
3727+ *
3728+ * @info             : keyblob_info structure, will be updated with
3729+ *                     the black key and blob data from CAAM
3730+ *
3731+ * Return            : '0' on success, error code otherwise
3732+ */
3733+static int keygen_create_keyblob(struct keyblob_info *info)
3734+{
3735+	int ret = 0;
3736+	struct device *jrdev;
3737+
3738+	/* Allocate caam job ring for operation to be performed from CAAM */
3739+	jrdev = caam_jr_alloc();
3740+	if (IS_ERR(jrdev)) {
3741+		pr_err("Job Ring Device allocation failed\n");
3742+		return PTR_ERR(jrdev);
3743+	}
3744+
3745+	/* Create a black key */
3746+	ret = generate_black_key(jrdev, info);
3747+	if (ret) {
3748+		dev_err(jrdev, "Black key generation failed: (%d)\n", ret);
3749+		goto free_jr;
3750+	}
3751+
3752+	/* Clear the input key, if exists */
3753+	if (info->key)
3754+		memset(info->key, 0, info->key_len);
3755+
3756+	/* Set key modifier, used as revision number, for blob */
3757+	info->key_mod = caam_key_modifier;
3758+	info->key_mod_len = ARRAY_SIZE(caam_key_modifier);
3759+
3760+	/*
3761+	 * Encapsulate the key, into a black blob, in general memory
3762+	 * (the only memory type supported, right now)
3763+	 */
3764+	ret = caam_blob_encap(jrdev, info);
3765+	if (ret) {
3766+		dev_err(jrdev, "Blob encapsulation of black key failed: %d\n",
3767+			ret);
3768+		goto free_jr;
3769+	}
3770+
3771+	/* Tag the black key so it can be passed to CAAM Crypto API */
3772+	ret = tag_black_obj(info, sizeof(info->black_key), false);
3773+	if (ret) {
3774+		dev_err(jrdev, "Black key tagging failed: %d\n", ret);
3775+		goto free_jr;
3776+	}
3777+
3778+	/* Tag the black blob so it can be passed to CAAM Crypto API */
3779+	ret = tag_black_obj(info, sizeof(info->blob), true);
3780+	if (ret) {
3781+		dev_err(jrdev, "Black blob tagging failed: %d\n", ret);
3782+		goto free_jr;
3783+	}
3784+
3785+free_jr:
3786+	caam_jr_free(jrdev);
3787+
3788+	return ret;
3789+}
3790+
3791+/**
3792+ * keygen_import_key - Import a black key from a blob
3793+ *
3794+ * @info             : keyblob_info structure, will be updated with
3795+ *                     the black key obtained after blob decapsulation by CAAM
3796+ *
3797+ * Return            : '0' on success, error code otherwise
3798+ */
3799+static int keygen_import_key(struct keyblob_info *info)
3800+{
3801+	int ret = 0;
3802+	struct device *jrdev;
3803+	struct header_conf *header;
3804+	struct tagged_object *tag_obj;
3805+
3806+	/* Allocate CAAM Job Ring for operation to be performed from CAAM */
3807+	jrdev = caam_jr_alloc();
3808+	if (IS_ERR(jrdev)) {
3809+		pr_err("Job Ring Device allocation failed\n");
3810+		return PTR_ERR(jrdev);
3811+	}
3812+
3813+	/* Set key modifier, used as revision number, for blob */
3814+	info->key_mod = caam_key_modifier;
3815+	info->key_mod_len = ARRAY_SIZE(caam_key_modifier);
3816+
3817+	print_hex_dump_debug("input blob @ " __stringify(__LINE__) " : ",
3818+			     DUMP_PREFIX_ADDRESS, 16, 4, info->blob,
3819+			     info->blob_len, 1);
3820+
3821+	/* Check if one can retrieve the tag object header configuration */
3822+	if (info->blob_len <= TAG_OVERHEAD_SIZE) {
3823+		dev_err(jrdev, "Invalid blob length\n");
3824+		ret = -EINVAL;
3825+		goto free_jr;
3826+	}
3827+
3828+	/* Retrieve the tag object */
3829+	tag_obj = (struct tagged_object *)info->blob;
3830+
3831+	/*
3832+	 * Check tag object header configuration
3833+	 * and retrieve the tag object header configuration
3834+	 */
3835+	if (is_valid_header_conf(&tag_obj->header)) {
3836+		header = &tag_obj->header;
3837+	} else {
3838+		dev_err(jrdev,
3839+			"Unable to get tag object header configuration for blob\n");
3840+		ret = -EINVAL;
3841+		goto free_jr;
3842+	}
3843+
3844+	info->key_len = header->red_key_len;
3845+
3846+	/* Validate the red key size extracted from blob */
3847+	if (info->key_len < MIN_KEY_SIZE || info->key_len > MAX_KEY_SIZE) {
3848+		dev_err(jrdev,
3849+			"Invalid red key length extracted from blob, expected values are between 16 and 64 bytes\n");
3850+		ret = -EINVAL;
3851+		goto free_jr;
3852+	}
3853+
3854+	info->type = header->type;
3855+
3856+	/* Update blob length by removing the header size */
3857+	info->blob_len -= TAG_OVERHEAD_SIZE;
3858+
3859+	/*
3860+	 * Check the received, from user, blob length
3861+	 * with the one from tag header
3862+	 */
3863+	if (info->blob_len != header->obj_len) {
3864+		dev_err(jrdev, "Mismatch between received blob length and the one from tag header\n");
3865+		ret = -EINVAL;
3866+		goto free_jr;
3867+	}
3868+
3869+	/*
3870+	 * Decapsulate the blob into a black key,
3871+	 * in general memory (the only memory type supported, right now)
3872+	 */
3873+	ret = caam_blob_decap(jrdev, info);
3874+	if (ret) {
3875+		dev_err(jrdev, "Blob decapsulation failed: %d\n", ret);
3876+		goto free_jr;
3877+	}
3878+
3879+	/* Tag the black key so it can be passed to CAAM Crypto API */
3880+	ret = tag_black_obj(info, sizeof(info->black_key), false);
3881+	if (ret)
3882+		dev_err(jrdev, "Black key tagging failed: %d\n", ret);
3883+
3884+free_jr:
3885+	caam_jr_free(jrdev);
3886+
3887+	return ret;
3888+}
3889+
3890+/**
3891+ * send_output       - Send the output data (tagged key and blob)
3892+ *                     from kernel to user-space.
3893+ *
3894+ * @key_crt          : Structure used to transfer data
3895+ *                     from user-space to kernel
3896+ * @info             : keyblob_info structure, which contains all
3897+ *                     the data obtained from CAAM that needs to
3898+ *                     be transferred to user-space
3899+ * @create_key_op    : Used to determine if it's a create or import operation
3900+ * @err              : Error code received from previous operations
3901+ *
3902+ * Return            : '0' on success, error code otherwise
3903+ */
3904+static int send_output(struct caam_keygen_cmd *key_crt, unsigned long arg,
3905+		       struct keyblob_info *info, bool create_key_op, int err)
3906+{
3907+	int ret = 0;
3908+	char *msg;
3909+
3910+	/* Free resource used on validate_input */
3911+	kfree(info->key);
3912+
3913+	if (err)
3914+		return err;
3915+
3916+	/* Check if there's enough space to copy black key to user */
3917+	if (key_crt->black_key_len < info->black_key_len) {
3918+		msg = "Not enough space for black key.\n";
3919+		send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3920+			     key_crt->blob_len);
3921+		/* Send, to user, the necessary size for key */
3922+		key_crt->black_key_len = info->black_key_len;
3923+
3924+		ret = -EINVAL;
3925+		goto exit;
3926+	}
3927+	key_crt->black_key_len = info->black_key_len;
3928+
3929+	/* For key import, copy to user only the black key */
3930+	if (copy_to_user(u64_to_user_ptr(key_crt->black_key),
3931+			 info->black_key, info->black_key_len))
3932+		return -EFAULT;
3933+
3934+	/* For key creation, copy to user, also, the blob */
3935+	if (create_key_op) {
3936+		/* Check if there's enough space to copy blob user */
3937+		if (key_crt->blob_len < info->blob_len) {
3938+			msg = "Not enough space for blob key.\n";
3939+			send_err_msg(msg, u64_to_user_ptr(key_crt->blob),
3940+				     key_crt->blob_len);
3941+			/* Send, to user, the necessary size for blob */
3942+			key_crt->blob_len = info->blob_len;
3943+
3944+			ret = -EINVAL;
3945+			goto exit;
3946+		}
3947+
3948+		key_crt->blob_len = info->blob_len;
3949+
3950+		if (copy_to_user(u64_to_user_ptr(key_crt->blob), info->blob,
3951+				 info->blob_len))
3952+			return -EFAULT;
3953+	}
3954+
3955+exit:
3956+	if (copy_to_user((void __user *)arg, key_crt,
3957+			 sizeof(struct caam_keygen_cmd)))
3958+		return -EFAULT;
3959+
3960+	return ret;
3961+}
3962+
3963+static long caam_keygen_ioctl(struct file *file, unsigned int cmd,
3964+			      unsigned long arg)
3965+{
3966+	int ret = 0;
3967+	struct keyblob_info info = {.key = NULL};
3968+	struct caam_keygen_cmd key_crt;
3969+	/* Used to determine if it's a create or import operation */
3970+	bool create_key_op = false;
3971+
3972+	switch (cmd) {
3973+	case CAAM_KEYGEN_IOCTL_CREATE:
3974+	{
3975+		create_key_op = true;
3976+
3977+		/* Validate user-space input */
3978+		ret = validate_input(&key_crt, arg, &info, create_key_op);
3979+		if (ret)
3980+			break;
3981+
3982+		/* Create tagged key and blob */
3983+		ret = keygen_create_keyblob(&info);
3984+
3985+		/* Send data from kernel to user-space */
3986+		ret = send_output(&key_crt, arg, &info, create_key_op, ret);
3987+
3988+		break;
3989+	}
3990+	case CAAM_KEYGEN_IOCTL_IMPORT:
3991+	{
3992+		/* Validate user-space input */
3993+		ret = validate_input(&key_crt, arg, &info, create_key_op);
3994+		if (ret)
3995+			break;
3996+
3997+		/* Import tagged key from blob */
3998+		ret = keygen_import_key(&info);
3999+
4000+		/* Send data from kernel to user-space */
4001+		ret = send_output(&key_crt, arg, &info, create_key_op, ret);
4002+
4003+		break;
4004+	}
4005+	default:
4006+		ret = -ENOTTY;
4007+	}
4008+
4009+	return ret;
4010+}
4011+
4012+static const struct file_operations fops = {
4013+	.owner = THIS_MODULE,
4014+	.unlocked_ioctl = caam_keygen_ioctl,
4015+	.compat_ioctl = compat_ptr_ioctl,
4016+};
4017+
4018+static struct miscdevice caam_keygen_dev = {
4019+	.minor = MISC_DYNAMIC_MINOR,
4020+	.name = DEVICE_NAME,
4021+	.fops = &fops
4022+};
4023+
4024+int caam_keygen_init(void)
4025+{
4026+	int ret;
4027+
4028+	ret = misc_register(&caam_keygen_dev);
4029+	if (ret) {
4030+		pr_err("Failed to register device %s\n",
4031+		       caam_keygen_dev.name);
4032+		return ret;
4033+	}
4034+
4035+	pr_info("Device %s registered\n", caam_keygen_dev.name);
4036+
4037+	return 0;
4038+}
4039+
4040+void caam_keygen_exit(void)
4041+{
4042+	misc_deregister(&caam_keygen_dev);
4043+
4044+	pr_info("caam_keygen unregistered\n");
4045+}
4046diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
4047index 77d048dfe..a550e7230 100644
4048--- a/drivers/crypto/caam/caamrng.c
4049+++ b/drivers/crypto/caam/caamrng.c
4050@@ -170,6 +170,52 @@ static void caam_cleanup(struct hwrng *rng)
4051 	kfifo_free(&ctx->fifo);
4052 }
4053
4054+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
4055+static inline void test_len(struct hwrng *rng, size_t len, bool wait)
4056+{
4057+	u8 *buf;
4058+	int real_len;
4059+	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
4060+	struct device *dev = ctx->ctrldev;
4061+
4062+	buf = kzalloc(sizeof(u8) * len, GFP_KERNEL);
4063+	real_len = rng->read(rng, buf, len, wait);
4064+	dev_info(dev, "wanted %zu bytes, got %d\n", len, real_len);
4065+	if (real_len < 0)
4066+		dev_err(dev, "READ FAILED\n");
4067+	else if (real_len == 0 && wait)
4068+		dev_err(dev, "WAITING FAILED\n");
4069+	if (real_len > 0)
4070+		print_hex_dump_debug("random bytes@: ", DUMP_PREFIX_ADDRESS, 16,
4071+				     4, buf, real_len, 1);
4072+	kfree(buf);
4073+}
4074+
4075+static inline void test_mode_once(struct hwrng *rng, bool wait)
4076+{
4077+	test_len(rng, 32, wait);
4078+	test_len(rng, 64, wait);
4079+	test_len(rng, 128, wait);
4080+}
4081+
4082+static inline void test_mode(struct hwrng *rng, bool wait)
4083+{
4084+#define TEST_PASS 1
4085+	int i;
4086+
4087+	for (i = 0; i < TEST_PASS; i++)
4088+		test_mode_once(rng, wait);
4089+}
4090+
4091+static void self_test(struct hwrng *rng)
4092+{
4093+	pr_info("testing without waiting\n");
4094+	test_mode(rng, false);
4095+	pr_info("testing with waiting\n");
4096+	test_mode(rng, true);
4097+}
4098+#endif
4099+
4100 static int caam_init(struct hwrng *rng)
4101 {
4102 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
4103@@ -224,10 +270,10 @@ int caam_rng_init(struct device *ctrldev)
4104
4105 	/* Check for an instantiated RNG before registration */
4106 	if (priv->era < 10)
4107-		rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
4108+		rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
4109 			    CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
4110 	else
4111-		rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
4112+		rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
4113
4114 	if (!rng_inst)
4115 		return 0;
4116@@ -256,6 +302,10 @@ int caam_rng_init(struct device *ctrldev)
4117 		return ret;
4118 	}
4119
4120+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
4121+	self_test(&ctx->rng);
4122+#endif
4123+
4124 	devres_close_group(ctrldev, caam_rng_init);
4125 	return 0;
4126 }
4127diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
4128index ca0361b2d..1cf3de63f 100644
4129--- a/drivers/crypto/caam/ctrl.c
4130+++ b/drivers/crypto/caam/ctrl.c
4131@@ -7,6 +7,7 @@
4132  */
4133
4134 #include <linux/device.h>
4135+#include <linux/dma-map-ops.h>
4136 #include <linux/of_address.h>
4137 #include <linux/of_irq.h>
4138 #include <linux/sys_soc.h>
4139@@ -19,6 +20,7 @@
4140 #include "jr.h"
4141 #include "desc_constr.h"
4142 #include "ctrl.h"
4143+#include "sm.h"
4144
4145 bool caam_dpaa2;
4146 EXPORT_SYMBOL(caam_dpaa2);
4147@@ -79,6 +81,14 @@ static void build_deinstantiation_desc(u32 *desc, int handle)
4148 	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
4149 }
4150
4151+static const struct of_device_id imx8m_machine_match[] = {
4152+	{ .compatible = "fsl,imx8mm", },
4153+	{ .compatible = "fsl,imx8mn", },
4154+	{ .compatible = "fsl,imx8mp", },
4155+	{ .compatible = "fsl,imx8mq", },
4156+	{ }
4157+};
4158+
4159 /*
4160  * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
4161  *			  the software (no JR/QI used).
4162@@ -105,10 +115,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
4163 	     * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
4164 	     * and the following steps should be performed regardless
4165 	     */
4166-	    of_machine_is_compatible("fsl,imx8mq") ||
4167-	    of_machine_is_compatible("fsl,imx8mm") ||
4168-	    of_machine_is_compatible("fsl,imx8mn") ||
4169-	    of_machine_is_compatible("fsl,imx8mp")) {
4170+	    of_match_node(imx8m_machine_match, of_root)) {
4171 		clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
4172
4173 		while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
4174@@ -342,16 +349,15 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
4175 /*
4176  * kick_trng - sets the various parameters for enabling the initialization
4177  *	       of the RNG4 block in CAAM
4178- * @pdev - pointer to the platform device
4179+ * @dev - pointer to the controller device
4180  * @ent_delay - Defines the length (in system clocks) of each entropy sample.
4181  */
4182-static void kick_trng(struct platform_device *pdev, int ent_delay)
4183+static void kick_trng(struct device *dev, int ent_delay)
4184 {
4185-	struct device *ctrldev = &pdev->dev;
4186-	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
4187+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
4188 	struct caam_ctrl __iomem *ctrl;
4189 	struct rng4tst __iomem *r4tst;
4190-	u32 val;
4191+	u32 val, rtsdctl;
4192
4193 	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
4194 	r4tst = &ctrl->r4tst[0];
4195@@ -367,26 +373,38 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
4196 	 * Performance-wise, it does not make sense to
4197 	 * set the delay to a value that is lower
4198 	 * than the last one that worked (i.e. the state handles
4199-	 * were instantiated properly. Thus, instead of wasting
4200-	 * time trying to set the values controlling the sample
4201-	 * frequency, the function simply returns.
4202+	 * were instantiated properly).
4203 	 */
4204-	val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
4205-	      >> RTSDCTL_ENT_DLY_SHIFT;
4206-	if (ent_delay <= val)
4207-		goto start_rng;
4208-
4209-	val = rd_reg32(&r4tst->rtsdctl);
4210-	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
4211-	      (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
4212-	wr_reg32(&r4tst->rtsdctl, val);
4213-	/* min. freq. count, equal to 1/4 of the entropy sample length */
4214-	wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
4215-	/* disable maximum frequency count */
4216-	wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
4217-	/* read the control register */
4218-	val = rd_reg32(&r4tst->rtmctl);
4219-start_rng:
4220+	rtsdctl = rd_reg32(&r4tst->rtsdctl);
4221+	val = (rtsdctl & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT;
4222+	if (ent_delay > val) {
4223+		val = ent_delay;
4224+		/* min. freq. count, equal to 1/4 of the entropy sample length */
4225+		wr_reg32(&r4tst->rtfrqmin, val >> 2);
4226+		/* max. freq. count, equal to 16 times the entropy sample length */
4227+		wr_reg32(&r4tst->rtfrqmax, val << 4);
4228+	}
4229+
4230+	wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) |
4231+		 RTSDCTL_SAMP_SIZE_VAL);
4232+
4233+	/*
4234+	 * To avoid reprogramming the self-test parameters over and over again,
4235+	 * use RTSDCTL[SAMP_SIZE] as an indicator.
4236+	 */
4237+	if ((rtsdctl & RTSDCTL_SAMP_SIZE_MASK) != RTSDCTL_SAMP_SIZE_VAL) {
4238+		wr_reg32(&r4tst->rtscmisc, (2 << 16) | 32);
4239+		wr_reg32(&r4tst->rtpkrrng, 570);
4240+		wr_reg32(&r4tst->rtpkrmax, 1600);
4241+		wr_reg32(&r4tst->rtscml, (122 << 16) | 317);
4242+		wr_reg32(&r4tst->rtscrl[0], (80 << 16) | 107);
4243+		wr_reg32(&r4tst->rtscrl[1], (57 << 16) | 62);
4244+		wr_reg32(&r4tst->rtscrl[2], (39 << 16) | 39);
4245+		wr_reg32(&r4tst->rtscrl[3], (27 << 16) | 26);
4246+		wr_reg32(&r4tst->rtscrl[4], (19 << 16) | 18);
4247+		wr_reg32(&r4tst->rtscrl[5], (18 << 16) | 17);
4248+	}
4249+
4250 	/*
4251 	 * select raw sampling in both entropy shifter
4252 	 * and statistical checker; ; put RNG4 into run mode
4253@@ -395,7 +413,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
4254 		      RTMCTL_SAMP_MODE_RAW_ES_SC);
4255 }
4256
4257-static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
4258+static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon)
4259 {
4260 	static const struct {
4261 		u16 ip_id;
4262@@ -421,12 +439,12 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
4263 	u16 ip_id;
4264 	int i;
4265
4266-	ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
4267+	ccbvid = rd_reg32(&perfmon->ccb_id);
4268 	era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
4269 	if (era)	/* This is '0' prior to CAAM ERA-6 */
4270 		return era;
4271
4272-	id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
4273+	id_ms = rd_reg32(&perfmon->caam_id_ms);
4274 	ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
4275 	maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
4276
4277@@ -446,7 +464,7 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
4278  *
4279  * @ctrl:	controller region
4280  */
4281-static int caam_get_era(struct caam_ctrl __iomem *ctrl)
4282+static int caam_get_era(struct caam_perfmon __iomem *perfmon)
4283 {
4284 	struct device_node *caam_node;
4285 	int ret;
4286@@ -459,7 +477,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
4287 	if (!ret)
4288 		return prop;
4289 	else
4290-		return caam_get_era_from_hw(ctrl);
4291+		return caam_get_era_from_hw(perfmon);
4292 }
4293
4294 /*
4295@@ -589,6 +607,214 @@ static void caam_remove_debugfs(void *root)
4296 	debugfs_remove_recursive(root);
4297 }
4298
4299+static void caam_dma_dev_unregister(void *data)
4300+{
4301+	platform_device_unregister(data);
4302+}
4303+
4304+static int caam_ctrl_rng_init(struct device *dev)
4305+{
4306+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
4307+	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
4308+	int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
4309+	u8 rng_vid;
4310+
4311+	if (ctrlpriv->era < 10) {
4312+		struct caam_perfmon __iomem *perfmon;
4313+
4314+		perfmon = ctrlpriv->total_jobrs ?
4315+			  (struct caam_perfmon *)&ctrlpriv->jr[0]->perfmon :
4316+			  (struct caam_perfmon *)&ctrl->perfmon;
4317+
4318+		rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
4319+			   CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
4320+	} else {
4321+		struct version_regs __iomem *vreg;
4322+
4323+		vreg = ctrlpriv->total_jobrs ?
4324+			(struct version_regs *)&ctrlpriv->jr[0]->vreg :
4325+			(struct version_regs *)&ctrl->vreg;
4326+
4327+		rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
4328+			  CHA_VER_VID_SHIFT;
4329+	}
4330+
4331+	/*
4332+	 * If SEC has RNG version >= 4 and RNG state handle has not been
4333+	 * already instantiated, do RNG instantiation
4334+	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
4335+	 */
4336+	if (!(ctrlpriv->mc_en && ctrlpriv->pr_support) && rng_vid >= 4) {
4337+		ctrlpriv->rng4_sh_init =
4338+			rd_reg32(&ctrl->r4tst[0].rdsta);
4339+		/*
4340+		 * If the secure keys (TDKEK, JDKEK, TDSK), were already
4341+		 * generated, signal this to the function that is instantiating
4342+		 * the state handles. An error would occur if RNG4 attempts
4343+		 * to regenerate these keys before the next POR.
4344+		 */
4345+		gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
4346+		ctrlpriv->rng4_sh_init &= RDSTA_MASK;
4347+		do {
4348+			int inst_handles =
4349+				rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
4350+			/*
4351+			 * If either SH were instantiated by somebody else
4352+			 * (e.g. u-boot) then it is assumed that the entropy
4353+			 * parameters are properly set and thus the function
4354+			 * setting these (kick_trng(...)) is skipped.
4355+			 * Also, if a handle was instantiated, do not change
4356+			 * the TRNG parameters.
4357+			 */
4358+			if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
4359+				dev_info(dev,
4360+					 "Entropy delay = %u\n",
4361+					 ent_delay);
4362+				kick_trng(dev, ent_delay);
4363+				ent_delay += 400;
4364+			}
4365+			/*
4366+			 * if instantiate_rng(...) fails, the loop will rerun
4367+			 * and the kick_trng(...) function will modify the
4368+			 * upper and lower limits of the entropy sampling
4369+			 * interval, leading to a successful initialization of
4370+			 * the RNG.
4371+			 */
4372+			ret = instantiate_rng(dev, inst_handles,
4373+					      gen_sk);
4374+			if (ret == -EAGAIN)
4375+				/*
4376+				 * if here, the loop will rerun,
4377+				 * so don't hog the CPU
4378+				 */
4379+				cpu_relax();
4380+		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
4381+		if (ret) {
4382+			dev_err(dev, "failed to instantiate RNG");
4383+			return ret;
4384+		}
4385+		/*
4386+		 * Set handles initialized by this module as the complement of
4387+		 * the already initialized ones
4388+		 */
4389+		ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
4390+
4391+		/* Enable RDB bit so that RNG works faster */
4392+		clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
4393+	}
4394+
4395+	return 0;
4396+}
4397+
4398+#ifdef CONFIG_PM_SLEEP
4399+
4400+/* Indicate if the internal state of the CAAM is lost during PM */
4401+static int caam_off_during_pm(void)
4402+{
4403+	bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") ||
4404+				 of_machine_is_compatible("fsl,imx6qp") ||
4405+				 of_machine_is_compatible("fsl,imx6dl");
4406+
4407+	return not_off_during_pm ? 0 : 1;
4408+}
4409+
4410+static void caam_state_save(struct device *dev)
4411+{
4412+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
4413+	struct caam_ctl_state *state = &ctrlpriv->state;
4414+	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
4415+	u32 deco_inst, jr_inst;
4416+	int i;
4417+
4418+	state->mcr = rd_reg32(&ctrl->mcr);
4419+	state->scfgr = rd_reg32(&ctrl->scfgr);
4420+
4421+	deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
4422+		     CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
4423+	for (i = 0; i < deco_inst; i++) {
4424+		state->deco_mid[i].liodn_ms =
4425+			rd_reg32(&ctrl->deco_mid[i].liodn_ms);
4426+		state->deco_mid[i].liodn_ls =
4427+			rd_reg32(&ctrl->deco_mid[i].liodn_ls);
4428+	}
4429+
4430+	jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
4431+		   CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
4432+	for (i = 0; i < jr_inst; i++) {
4433+		state->jr_mid[i].liodn_ms =
4434+			rd_reg32(&ctrl->jr_mid[i].liodn_ms);
4435+		state->jr_mid[i].liodn_ls =
4436+			rd_reg32(&ctrl->jr_mid[i].liodn_ls);
4437+	}
4438+}
4439+
4440+static void caam_state_restore(const struct device *dev)
4441+{
4442+	const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
4443+	const struct caam_ctl_state *state = &ctrlpriv->state;
4444+	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
4445+	u32 deco_inst, jr_inst;
4446+	int i;
4447+
4448+	wr_reg32(&ctrl->mcr, state->mcr);
4449+	wr_reg32(&ctrl->scfgr, state->scfgr);
4450+
4451+	deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
4452+		     CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
4453+	for (i = 0; i < deco_inst; i++) {
4454+		wr_reg32(&ctrl->deco_mid[i].liodn_ms,
4455+			 state->deco_mid[i].liodn_ms);
4456+		wr_reg32(&ctrl->deco_mid[i].liodn_ls,
4457+			 state->deco_mid[i].liodn_ls);
4458+	}
4459+
4460+	jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
4461+		   CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
4462+	for (i = 0; i < ctrlpriv->total_jobrs; i++) {
4463+		wr_reg32(&ctrl->jr_mid[i].liodn_ms,
4464+			 state->jr_mid[i].liodn_ms);
4465+		wr_reg32(&ctrl->jr_mid[i].liodn_ls,
4466+			 state->jr_mid[i].liodn_ls);
4467+	}
4468+
4469+	if (ctrlpriv->virt_en == 1)
4470+		clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
4471+			      JRSTART_JR1_START | JRSTART_JR2_START |
4472+			      JRSTART_JR3_START);
4473+}
4474+
4475+static int caam_ctrl_suspend(struct device *dev)
4476+{
4477+	const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
4478+
4479+	if (ctrlpriv->caam_off_during_pm && !ctrlpriv->scu_en &&
4480+	    !ctrlpriv->optee_en)
4481+		caam_state_save(dev);
4482+
4483+	return 0;
4484+}
4485+
4486+static int caam_ctrl_resume(struct device *dev)
4487+{
4488+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
4489+	int ret = 0;
4490+
4491+	if (ctrlpriv->caam_off_during_pm && !ctrlpriv->scu_en &&
4492+	    !ctrlpriv->optee_en) {
4493+		caam_state_restore(dev);
4494+
4495+		/* HW and rng will be reset so deinstantiation can be removed */
4496+		devm_remove_action(dev, devm_deinstantiate_rng, dev);
4497+		ret = caam_ctrl_rng_init(dev);
4498+	}
4499+
4500+	return ret;
4501+}
4502+
4503+SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume);
4504+
4505+#endif /* CONFIG_PM_SLEEP */
4506+
4507 #ifdef CONFIG_FSL_MC_BUS
4508 static bool check_version(struct fsl_mc_version *mc_version, u32 major,
4509 			  u32 minor, u32 revision)
4510@@ -612,19 +838,25 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major,
4511 /* Probe routine for CAAM top (controller) level */
4512 static int caam_probe(struct platform_device *pdev)
4513 {
4514-	int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
4515+	int ret, ring;
4516 	u64 caam_id;
4517 	const struct soc_device_attribute *imx_soc_match;
4518+	static struct platform_device_info caam_dma_pdev_info = {
4519+		.name = "caam-dma",
4520+		.id = PLATFORM_DEVID_NONE
4521+	};
4522+	static struct platform_device *caam_dma_dev;
4523 	struct device *dev;
4524 	struct device_node *nprop, *np;
4525+	struct resource res_regs;
4526 	struct caam_ctrl __iomem *ctrl;
4527 	struct caam_drv_private *ctrlpriv;
4528+	struct caam_perfmon __iomem *perfmon;
4529 	struct dentry *dfs_root;
4530 	u32 scfgr, comp_params;
4531-	u8 rng_vid;
4532 	int pg_size;
4533 	int BLOCK_OFFSET = 0;
4534-	bool pr_support = false;
4535+	bool reg_access = true;
4536
4537 	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
4538 	if (!ctrlpriv)
4539@@ -635,9 +867,44 @@ static int caam_probe(struct platform_device *pdev)
4540 	nprop = pdev->dev.of_node;
4541
4542 	imx_soc_match = soc_device_match(caam_imx_soc_table);
4543+	if (!imx_soc_match && of_match_node(imx8m_machine_match, of_root))
4544+		return -EPROBE_DEFER;
4545+
4546 	caam_imx = (bool)imx_soc_match;
4547
4548+#ifdef CONFIG_PM_SLEEP
4549+	ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm();
4550+#endif
4551+
4552 	if (imx_soc_match) {
4553+		np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu");
4554+
4555+		if (!np)
4556+			np = of_find_compatible_node(NULL, NULL, "fsl,imx-sentinel");
4557+
4558+		ctrlpriv->scu_en = !!np;
4559+		of_node_put(np);
4560+
4561+		reg_access = !ctrlpriv->scu_en;
4562+
4563+		/*
4564+		 * CAAM clocks cannot be controlled from kernel.
4565+		 * They are automatically turned on by SCU f/w.
4566+		 */
4567+		if (ctrlpriv->scu_en)
4568+			goto iomap_ctrl;
4569+
4570+		/*
4571+		 * Until Layerscape and i.MX OP-TEE get in sync,
4572+		 * only i.MX OP-TEE use cases disallow access to
4573+		 * caam page 0 (controller) registers.
4574+		 */
4575+		np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
4576+		ctrlpriv->optee_en = !!np;
4577+		of_node_put(np);
4578+
4579+		reg_access = reg_access && !ctrlpriv->optee_en;
4580+
4581 		if (!imx_soc_match->data) {
4582 			dev_err(dev, "No clock data provided for i.MX SoC");
4583 			return -EINVAL;
4584@@ -648,7 +915,7 @@ static int caam_probe(struct platform_device *pdev)
4585 			return ret;
4586 	}
4587
4588-
4589+iomap_ctrl:
4590 	/* Get configuration properties from device tree */
4591 	/* First, get register page */
4592 	ctrl = devm_of_iomap(dev, nprop, 0, NULL);
4593@@ -658,10 +925,38 @@ static int caam_probe(struct platform_device *pdev)
4594 		return ret;
4595 	}
4596
4597-	caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
4598+	ring = 0;
4599+	for_each_available_child_of_node(nprop, np)
4600+		if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
4601+		    of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
4602+			u32 reg;
4603+
4604+			if (of_property_read_u32_index(np, "reg", 0, &reg)) {
4605+				dev_err(dev, "%s read reg property error\n",
4606+					np->full_name);
4607+				continue;
4608+			}
4609+
4610+			ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
4611+					     ((__force uint8_t *)ctrl + reg);
4612+
4613+			ctrlpriv->total_jobrs++;
4614+			ring++;
4615+		}
4616+
4617+	/*
4618+	 * Wherever possible, instead of accessing registers from the global page,
4619+	 * use the alias registers in the first (cf. DT nodes order)
4620+	 * job ring's page.
4621+	 */
4622+	perfmon = ring ? (struct caam_perfmon *)&ctrlpriv->jr[0]->perfmon :
4623+			 (struct caam_perfmon *)&ctrl->perfmon;
4624+
4625+	caam_little_end = !(bool)(rd_reg32(&perfmon->status) &
4626 				  (CSTA_PLEND | CSTA_ALT_PLEND));
4627-	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
4628-	if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
4629+	comp_params = rd_reg32(&perfmon->comp_parms_ms);
4630+	if (reg_access && comp_params & CTPR_MS_PS &&
4631+	    rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
4632 		caam_ptr_sz = sizeof(u64);
4633 	else
4634 		caam_ptr_sz = sizeof(u32);
4635@@ -708,8 +1003,6 @@ static int caam_probe(struct platform_device *pdev)
4636 			 BLOCK_OFFSET * DECO_BLOCK_NUMBER
4637 			 );
4638
4639-	/* Get the IRQ of the controller (for security violations only) */
4640-	ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
4641 	np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
4642 	ctrlpriv->mc_en = !!np;
4643 	of_node_put(np);
4644@@ -720,12 +1013,44 @@ static int caam_probe(struct platform_device *pdev)
4645
4646 		mc_version = fsl_mc_get_version();
4647 		if (mc_version)
4648-			pr_support = check_version(mc_version, 10, 20, 0);
4649+			ctrlpriv->pr_support = check_version(mc_version, 10, 20,
4650+							     0);
4651 		else
4652 			return -EPROBE_DEFER;
4653 	}
4654 #endif
4655
4656+	/* Only i.MX SoCs have sm */
4657+	if (!imx_soc_match)
4658+		goto mc_fw;
4659+
4660+	/* Get CAAM-SM node and of_iomap() and save */
4661+	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
4662+	if (!np)
4663+		return -ENODEV;
4664+
4665+	/* Get CAAM SM registers base address from device tree */
4666+	ret = of_address_to_resource(np, 0, &res_regs);
4667+	if (ret) {
4668+		dev_err(dev, "failed to retrieve registers base from device tree\n");
4669+		of_node_put(np);
4670+		return -ENODEV;
4671+	}
4672+
4673+	ctrlpriv->sm_phy = res_regs.start;
4674+	ctrlpriv->sm_base = devm_ioremap_resource(dev, &res_regs);
4675+	if (IS_ERR(ctrlpriv->sm_base)) {
4676+		of_node_put(np);
4677+		return PTR_ERR(ctrlpriv->sm_base);
4678+	}
4679+
4680+	ctrlpriv->sm_present = 1;
4681+	of_node_put(np);
4682+
4683+	if (!reg_access)
4684+		goto set_dma_mask;
4685+
4686+mc_fw:
4687 	/*
4688 	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
4689 	 * long pointers in master configuration register.
4690@@ -765,13 +1090,14 @@ static int caam_probe(struct platform_device *pdev)
4691 			      JRSTART_JR1_START | JRSTART_JR2_START |
4692 			      JRSTART_JR3_START);
4693
4694+set_dma_mask:
4695 	ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
4696 	if (ret) {
4697 		dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
4698 		return ret;
4699 	}
4700
4701-	ctrlpriv->era = caam_get_era(ctrl);
4702+	ctrlpriv->era = caam_get_era(perfmon);
4703 	ctrlpriv->domain = iommu_get_domain_for_dev(dev);
4704
4705 	dfs_root = debugfs_create_dir(dev_name(dev), NULL);
4706@@ -782,7 +1108,7 @@ static int caam_probe(struct platform_device *pdev)
4707 			return ret;
4708 	}
4709
4710-	caam_debugfs_init(ctrlpriv, dfs_root);
4711+	caam_debugfs_init(ctrlpriv, perfmon, dfs_root);
4712
4713 	/* Check to see if (DPAA 1.x) QI present. If so, enable */
4714 	if (ctrlpriv->qi_present && !caam_dpaa2) {
4715@@ -801,101 +1127,34 @@ static int caam_probe(struct platform_device *pdev)
4716 #endif
4717 	}
4718
4719-	ring = 0;
4720-	for_each_available_child_of_node(nprop, np)
4721-		if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
4722-		    of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
4723-			ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
4724-					     ((__force uint8_t *)ctrl +
4725-					     (ring + JR_BLOCK_NUMBER) *
4726-					      BLOCK_OFFSET
4727-					     );
4728-			ctrlpriv->total_jobrs++;
4729-			ring++;
4730-		}
4731-
4732 	/* If no QI and no rings specified, quit and go home */
4733 	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
4734 		dev_err(dev, "no queues configured, terminating\n");
4735 		return -ENOMEM;
4736 	}
4737
4738-	if (ctrlpriv->era < 10)
4739-		rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
4740-			   CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
4741-	else
4742-		rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
4743-			   CHA_VER_VID_SHIFT;
4744-
4745-	/*
4746-	 * If SEC has RNG version >= 4 and RNG state handle has not been
4747-	 * already instantiated, do RNG instantiation
4748-	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
4749-	 */
4750-	if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
4751-		ctrlpriv->rng4_sh_init =
4752-			rd_reg32(&ctrl->r4tst[0].rdsta);
4753-		/*
4754-		 * If the secure keys (TDKEK, JDKEK, TDSK), were already
4755-		 * generated, signal this to the function that is instantiating
4756-		 * the state handles. An error would occur if RNG4 attempts
4757-		 * to regenerate these keys before the next POR.
4758-		 */
4759-		gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
4760-		ctrlpriv->rng4_sh_init &= RDSTA_MASK;
4761-		do {
4762-			int inst_handles =
4763-				rd_reg32(&ctrl->r4tst[0].rdsta) &
4764-								RDSTA_MASK;
4765-			/*
4766-			 * If either SH were instantiated by somebody else
4767-			 * (e.g. u-boot) then it is assumed that the entropy
4768-			 * parameters are properly set and thus the function
4769-			 * setting these (kick_trng(...)) is skipped.
4770-			 * Also, if a handle was instantiated, do not change
4771-			 * the TRNG parameters.
4772-			 */
4773-			if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
4774-				dev_info(dev,
4775-					 "Entropy delay = %u\n",
4776-					 ent_delay);
4777-				kick_trng(pdev, ent_delay);
4778-				ent_delay += 400;
4779-			}
4780-			/*
4781-			 * if instantiate_rng(...) fails, the loop will rerun
4782-			 * and the kick_trng(...) function will modify the
4783-			 * upper and lower limits of the entropy sampling
4784-			 * interval, leading to a successful initialization of
4785-			 * the RNG.
4786-			 */
4787-			ret = instantiate_rng(dev, inst_handles,
4788-					      gen_sk);
4789-			if (ret == -EAGAIN)
4790-				/*
4791-				 * if here, the loop will rerun,
4792-				 * so don't hog the CPU
4793-				 */
4794-				cpu_relax();
4795-		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
4796-		if (ret) {
4797-			dev_err(dev, "failed to instantiate RNG");
4798+	caam_dma_pdev_info.parent = dev;
4799+	caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
4800+	caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
4801+	if (IS_ERR(caam_dma_dev)) {
4802+		dev_err(dev, "Unable to create and register caam-dma dev\n");
4803+		return PTR_ERR(caam_dma_dev);
4804+	} else {
4805+		set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
4806+		ret = devm_add_action_or_reset(dev, caam_dma_dev_unregister,
4807+					       caam_dma_dev);
4808+		if (ret)
4809 			return ret;
4810-		}
4811-		/*
4812-		 * Set handles initialized by this module as the complement of
4813-		 * the already initialized ones
4814-		 */
4815-		ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
4816-
4817-		/* Enable RDB bit so that RNG works faster */
4818-		clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
4819 	}
4820
4821-	/* NOTE: RTIC detection ought to go here, around Si time */
4822+	if (reg_access) {
4823+		ret = caam_ctrl_rng_init(dev);
4824+		if (ret)
4825+			return ret;
4826+	}
4827
4828-	caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
4829-		  (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
4830+	caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
4831+		  (u64)rd_reg32(&perfmon->caam_id_ls);
4832
4833 	/* Report "alive" for developer to see */
4834 	dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
4835@@ -914,6 +1173,9 @@ static struct platform_driver caam_driver = {
4836 	.driver = {
4837 		.name = "caam",
4838 		.of_match_table = caam_match,
4839+#ifdef CONFIG_PM_SLEEP
4840+		.pm = &caam_ctrl_pm_ops,
4841+#endif
4842 	},
4843 	.probe       = caam_probe,
4844 };
4845diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
4846index 8ebf18398..a34d71ac1 100644
4847--- a/drivers/crypto/caam/debugfs.c
4848+++ b/drivers/crypto/caam/debugfs.c
4849@@ -42,16 +42,14 @@ void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
4850 }
4851 #endif
4852
4853-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
4854+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
4855+		       struct caam_perfmon *perfmon, struct dentry *root)
4856 {
4857-	struct caam_perfmon *perfmon;
4858-
4859 	/*
4860 	 * FIXME: needs better naming distinction, as some amalgamation of
4861 	 * "caam" and nprop->full_name. The OF name isn't distinctive,
4862 	 * but does separate instances
4863 	 */
4864-	perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
4865
4866 	ctrlpriv->ctl = debugfs_create_dir("ctl", root);
4867
4868@@ -78,6 +76,9 @@ void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
4869 	debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
4870 			    &perfmon->status, &caam_fops_u32_ro);
4871
4872+	if (ctrlpriv->scu_en || ctrlpriv->optee_en)
4873+		return;
4874+
4875 	/* Internal covering keys (useful in non-secure mode only) */
4876 	ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
4877 	ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
4878diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
4879index 661d768ac..6c566a2b7 100644
4880--- a/drivers/crypto/caam/debugfs.h
4881+++ b/drivers/crypto/caam/debugfs.h
4882@@ -6,11 +6,14 @@
4883
4884 struct dentry;
4885 struct caam_drv_private;
4886+struct caam_perfmon;
4887
4888 #ifdef CONFIG_DEBUG_FS
4889-void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
4890+void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
4891+		       struct caam_perfmon *perfmon, struct dentry *root);
4892 #else
4893 static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
4894+				     struct caam_perfmon *perfmon,
4895 				     struct dentry *root)
4896 {}
4897 #endif
4898diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
4899index e13470901..c2c58f818 100644
4900--- a/drivers/crypto/caam/desc.h
4901+++ b/drivers/crypto/caam/desc.h
4902@@ -43,6 +43,7 @@
4903 #define CMD_SEQ_LOAD		(0x03 << CMD_SHIFT)
4904 #define CMD_FIFO_LOAD		(0x04 << CMD_SHIFT)
4905 #define CMD_SEQ_FIFO_LOAD	(0x05 << CMD_SHIFT)
4906+#define CMD_MOVEB		(0x07 << CMD_SHIFT)
4907 #define CMD_STORE		(0x0a << CMD_SHIFT)
4908 #define CMD_SEQ_STORE		(0x0b << CMD_SHIFT)
4909 #define CMD_FIFO_STORE		(0x0c << CMD_SHIFT)
4910@@ -152,7 +153,7 @@
4911  * with the TDKEK if TK is set
4912  */
4913 #define KEY_ENC			0x00400000
4914-
4915+#define KEY_ENC_OFFSET		22
4916 /*
4917  * No Write Back - Do not allow key to be FIFO STOREd
4918  */
4919@@ -162,11 +163,13 @@
4920  * Enhanced Encryption of Key
4921  */
4922 #define KEY_EKT			0x00100000
4923+#define KEY_EKT_OFFSET		20
4924
4925 /*
4926  * Encrypted with Trusted Key
4927  */
4928 #define KEY_TK			0x00008000
4929+#define KEY_TK_OFFSET		15
4930
4931 /*
4932  * KDEST - Key Destination: 0 - class key register,
4933@@ -363,6 +366,7 @@
4934 #define FIFOLD_TYPE_PK_N	(0x08 << FIFOLD_TYPE_SHIFT)
4935 #define FIFOLD_TYPE_PK_A	(0x0c << FIFOLD_TYPE_SHIFT)
4936 #define FIFOLD_TYPE_PK_B	(0x0d << FIFOLD_TYPE_SHIFT)
4937+#define FIFOLD_TYPE_IFIFO	(0x0f << FIFOLD_TYPE_SHIFT)
4938
4939 /* Other types. Need to OR in last/flush bits as desired */
4940 #define FIFOLD_TYPE_MSG_MASK	(0x38 << FIFOLD_TYPE_SHIFT)
4941@@ -403,6 +407,10 @@
4942 #define FIFOST_TYPE_PKHA_N	 (0x08 << FIFOST_TYPE_SHIFT)
4943 #define FIFOST_TYPE_PKHA_A	 (0x0c << FIFOST_TYPE_SHIFT)
4944 #define FIFOST_TYPE_PKHA_B	 (0x0d << FIFOST_TYPE_SHIFT)
4945+#define FIFOST_TYPE_AF_SBOX_CCM_JKEK	(0x10 << FIFOST_TYPE_SHIFT)
4946+#define FIFOST_TYPE_AF_SBOX_CCM_TKEK	(0x11 << FIFOST_TYPE_SHIFT)
4947+#define FIFOST_TYPE_KEY_CCM_JKEK	(0x14 << FIFOST_TYPE_SHIFT)
4948+#define FIFOST_TYPE_KEY_CCM_TKEK	(0x15 << FIFOST_TYPE_SHIFT)
4949 #define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
4950 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
4951 #define FIFOST_TYPE_PKHA_E_JKEK	 (0x22 << FIFOST_TYPE_SHIFT)
4952@@ -1136,6 +1144,23 @@
4953 #define OP_PCL_PKPROT_ECC			 0x0002
4954 #define OP_PCL_PKPROT_F2M			 0x0001
4955
4956+/* Blob protocol protinfo bits */
4957+#define OP_PCL_BLOB_TK			0x0200
4958+#define OP_PCL_BLOB_EKT			0x0100
4959+
4960+#define OP_PCL_BLOB_K2KR_MEM		0x0000
4961+#define OP_PCL_BLOB_K2KR_C1KR		0x0010
4962+#define OP_PCL_BLOB_K2KR_C2KR		0x0030
4963+#define OP_PCL_BLOB_K2KR_AFHAS		0x0050
4964+#define OP_PCL_BLOB_K2KR_C2KR_SPLIT	0x0070
4965+
4966+#define OP_PCL_BLOB_PTXT_SECMEM		0x0008
4967+#define OP_PCL_BLOB_BLACK		0x0004
4968+
4969+#define OP_PCL_BLOB_FMT_NORMAL		0x0000
4970+#define OP_PCL_BLOB_FMT_MSTR		0x0002
4971+#define OP_PCL_BLOB_FMT_TEST		0x0003
4972+
4973 /* For non-protocol/alg-only op commands */
4974 #define OP_ALG_TYPE_SHIFT	24
4975 #define OP_ALG_TYPE_MASK	(0x7 << OP_ALG_TYPE_SHIFT)
4976@@ -1502,6 +1527,7 @@
4977 #define MATH_SRC1_INFIFO	(0x0a << MATH_SRC1_SHIFT)
4978 #define MATH_SRC1_OUTFIFO	(0x0b << MATH_SRC1_SHIFT)
4979 #define MATH_SRC1_ONE		(0x0c << MATH_SRC1_SHIFT)
4980+#define MATH_SRC1_ZERO		(0x0f << MATH_SRC1_SHIFT)
4981
4982 /* Destination selectors */
4983 #define MATH_DEST_SHIFT		8
4984@@ -1684,4 +1710,31 @@
4985 /* Frame Descriptor Command for Replacement Job Descriptor */
4986 #define FD_CMD_REPLACE_JOB_DESC				0x20000000
4987
4988+/* CHA Control Register bits */
4989+#define CCTRL_RESET_CHA_ALL          0x1
4990+#define CCTRL_RESET_CHA_AESA         0x2
4991+#define CCTRL_RESET_CHA_DESA         0x4
4992+#define CCTRL_RESET_CHA_AFHA         0x8
4993+#define CCTRL_RESET_CHA_KFHA         0x10
4994+#define CCTRL_RESET_CHA_SF8A         0x20
4995+#define CCTRL_RESET_CHA_PKHA         0x40
4996+#define CCTRL_RESET_CHA_MDHA         0x80
4997+#define CCTRL_RESET_CHA_CRCA         0x100
4998+#define CCTRL_RESET_CHA_RNG          0x200
4999+#define CCTRL_RESET_CHA_SF9A         0x400
5000+#define CCTRL_RESET_CHA_ZUCE         0x800
5001+#define CCTRL_RESET_CHA_ZUCA         0x1000
5002+#define CCTRL_UNLOAD_PK_A0           0x10000
5003+#define CCTRL_UNLOAD_PK_A1           0x20000
5004+#define CCTRL_UNLOAD_PK_A2           0x40000
5005+#define CCTRL_UNLOAD_PK_A3           0x80000
5006+#define CCTRL_UNLOAD_PK_B0           0x100000
5007+#define CCTRL_UNLOAD_PK_B1           0x200000
5008+#define CCTRL_UNLOAD_PK_B2           0x400000
5009+#define CCTRL_UNLOAD_PK_B3           0x800000
5010+#define CCTRL_UNLOAD_PK_N            0x1000000
5011+#define CCTRL_UNLOAD_PK_A            0x4000000
5012+#define CCTRL_UNLOAD_PK_B            0x8000000
5013+#define CCTRL_UNLOAD_SBOX            0x10000000
5014+
5015 #endif /* DESC_H */
5016diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
5017index 62ce6421b..f98dec8f2 100644
5018--- a/drivers/crypto/caam/desc_constr.h
5019+++ b/drivers/crypto/caam/desc_constr.h
5020@@ -240,6 +240,7 @@ static inline u32 *append_##cmd(u32 * const desc, u32 options) \
5021 APPEND_CMD_RET(jump, JUMP)
5022 APPEND_CMD_RET(move, MOVE)
5023 APPEND_CMD_RET(move_len, MOVE_LEN)
5024+APPEND_CMD_RET(moveb, MOVEB)
5025
5026 static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
5027 {
5028@@ -500,6 +501,8 @@ do { \
5029  * @key_virt: virtual address where algorithm key resides
5030  * @key_inline: true - key can be inlined in the descriptor; false - key is
5031  *              referenced by the descriptor
5032+ * @key_real_len: size of the key to be loaded by the CAAM
5033+ * @key_cmd_opt: optional parameters for KEY command
5034  */
5035 struct alginfo {
5036 	u32 algtype;
5037@@ -508,6 +511,8 @@ struct alginfo {
5038 	dma_addr_t key_dma;
5039 	const void *key_virt;
5040 	bool key_inline;
5041+	u32 key_real_len;
5042+	u32 key_cmd_opt;
5043 };
5044
5045 /**
5046diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
5047index 039df6c57..23dbb87f7 100644
5048--- a/drivers/crypto/caam/dpseci.c
5049+++ b/drivers/crypto/caam/dpseci.c
5050@@ -5,6 +5,7 @@
5051  */
5052
5053 #include <linux/fsl/mc.h>
5054+#include <soc/fsl/dpaa2-io.h>
5055 #include "dpseci.h"
5056 #include "dpseci_cmd.h"
5057
5058@@ -16,8 +17,8 @@
5059  * @token:	Returned token; use in subsequent API calls
5060  *
5061  * This function can be used to open a control session for an already created
5062- * object; an object may have been declared statically in the DPL
5063- * or created dynamically.
5064+ * object; an object may have been declared in the DPL or by calling the
5065+ * dpseci_create() function.
5066  * This function returns a unique authentication token, associated with the
5067  * specific object ID and the specific MC portal; this token must be used in all
5068  * subsequent commands for this specific object.
5069@@ -66,6 +67,85 @@ int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
5070 	return mc_send_command(mc_io, &cmd);
5071 }
5072
5073+/**
5074+ * dpseci_create() - Create the DPSECI object
5075+ * @mc_io:	Pointer to MC portal's I/O object
5076+ * @dprc_token:	Parent container token; '0' for default container
5077+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5078+ * @cfg:	Configuration structure
5079+ * @obj_id:	returned object id
5080+ *
5081+ * Create the DPSECI object, allocate required resources and perform required
5082+ * initialization.
5083+ *
5084+ * The object can be created either by declaring it in the DPL file, or by
5085+ * calling this function.
5086+ *
5087+ * The function accepts an authentication token of a parent container that this
5088+ * object should be assigned to. The token can be '0' so the object will be
5089+ * assigned to the default container.
5090+ * The newly created object can be opened with the returned object id and using
5091+ * the container's associated tokens and MC portals.
5092+ *
5093+ * Return:	'0' on success, error code otherwise
5094+ */
5095+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
5096+		  const struct dpseci_cfg *cfg, u32 *obj_id)
5097+{
5098+	struct fsl_mc_command cmd = { 0 };
5099+	struct dpseci_cmd_create *cmd_params;
5100+	int i, err;
5101+
5102+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
5103+					  cmd_flags,
5104+					  dprc_token);
5105+	cmd_params = (struct dpseci_cmd_create *)cmd.params;
5106+	for (i = 0; i < 8; i++)
5107+		cmd_params->priorities[i] = cfg->priorities[i];
5108+	for (i = 0; i < 8; i++)
5109+		cmd_params->priorities2[i] = cfg->priorities[8 + i];
5110+	cmd_params->num_tx_queues = cfg->num_tx_queues;
5111+	cmd_params->num_rx_queues = cfg->num_rx_queues;
5112+	cmd_params->options = cpu_to_le32(cfg->options);
5113+	err = mc_send_command(mc_io, &cmd);
5114+	if (err)
5115+		return err;
5116+
5117+	*obj_id = mc_cmd_read_object_id(&cmd);
5118+
5119+	return 0;
5120+}
5121+
5122+/**
5123+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources
5124+ * @mc_io:	Pointer to MC portal's I/O object
5125+ * @dprc_token: Parent container token; '0' for default container
5126+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5127+ * @object_id:	The object id; it must be a valid id within the container that
5128+ *		created this object
5129+ *
5130+ * The function accepts the authentication token of the parent container that
5131+ * created the object (not the one that currently owns the object). The object
5132+ * is searched within parent using the provided 'object_id'.
5133+ * All tokens to the object must be closed before calling destroy.
5134+ *
5135+ * Return:	'0' on success, error code otherwise
5136+ */
5137+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
5138+		   u32 object_id)
5139+{
5140+	struct fsl_mc_command cmd = { 0 };
5141+	struct dpseci_cmd_destroy *cmd_params;
5142+
5143+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
5144+					  cmd_flags,
5145+					  dprc_token);
5146+	cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
5147+	cmd_params->object_id = cpu_to_le32(object_id);
5148+
5149+	return mc_send_command(mc_io, &cmd);
5150+}
5151+
5152 /**
5153  * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
5154  * @mc_io:	Pointer to MC portal's I/O object
5155@@ -150,6 +230,198 @@ int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5156 	return 0;
5157 }
5158
5159+/**
5160+ * dpseci_get_irq_enable() - Get overall interrupt state
5161+ * @mc_io:	Pointer to MC portal's I/O object
5162+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5163+ * @token:	Token of DPSECI object
5164+ * @irq_index:	The interrupt index to configure
5165+ * @en:		Returned Interrupt state - enable = 1, disable = 0
5166+ *
5167+ * Return:	'0' on success, error code otherwise
5168+ */
5169+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5170+			  u8 irq_index, u8 *en)
5171+{
5172+	struct fsl_mc_command cmd = { 0 };
5173+	struct dpseci_cmd_irq_enable *cmd_params;
5174+	struct dpseci_rsp_get_irq_enable *rsp_params;
5175+	int err;
5176+
5177+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
5178+					  cmd_flags,
5179+					  token);
5180+	cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
5181+	cmd_params->irq_index = irq_index;
5182+	err = mc_send_command(mc_io, &cmd);
5183+	if (err)
5184+		return err;
5185+
5186+	rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
5187+	*en = rsp_params->enable_state;
5188+
5189+	return 0;
5190+}
5191+
5192+/**
5193+ * dpseci_set_irq_enable() - Set overall interrupt state.
5194+ * @mc_io:	Pointer to MC portal's I/O object
5195+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5196+ * @token:	Token of DPSECI object
5197+ * @irq_index:	The interrupt index to configure
5198+ * @en:		Interrupt state - enable = 1, disable = 0
5199+ *
5200+ * Allows GPP software to control when interrupts are generated.
5201+ * Each interrupt can have up to 32 causes. The enable/disable control's the
5202+ * overall interrupt state. If the interrupt is disabled no causes will cause
5203+ * an interrupt.
5204+ *
5205+ * Return:	'0' on success, error code otherwise
5206+ */
5207+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5208+			  u8 irq_index, u8 en)
5209+{
5210+	struct fsl_mc_command cmd = { 0 };
5211+	struct dpseci_cmd_irq_enable *cmd_params;
5212+
5213+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
5214+					  cmd_flags,
5215+					  token);
5216+	cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
5217+	cmd_params->irq_index = irq_index;
5218+	cmd_params->enable_state = en;
5219+
5220+	return mc_send_command(mc_io, &cmd);
5221+}
5222+
5223+/**
5224+ * dpseci_get_irq_mask() - Get interrupt mask.
5225+ * @mc_io:	Pointer to MC portal's I/O object
5226+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5227+ * @token:	Token of DPSECI object
5228+ * @irq_index:	The interrupt index to configure
5229+ * @mask:	Returned event mask to trigger interrupt
5230+ *
5231+ * Every interrupt can have up to 32 causes and the interrupt model supports
5232+ * masking/unmasking each cause independently.
5233+ *
5234+ * Return:	'0' on success, error code otherwise
5235+ */
5236+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5237+			u8 irq_index, u32 *mask)
5238+{
5239+	struct fsl_mc_command cmd = { 0 };
5240+	struct dpseci_cmd_irq_mask *cmd_params;
5241+	int err;
5242+
5243+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
5244+					  cmd_flags,
5245+					  token);
5246+	cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
5247+	cmd_params->irq_index = irq_index;
5248+	err = mc_send_command(mc_io, &cmd);
5249+	if (err)
5250+		return err;
5251+
5252+	*mask = le32_to_cpu(cmd_params->mask);
5253+
5254+	return 0;
5255+}
5256+
5257+/**
5258+ * dpseci_set_irq_mask() - Set interrupt mask.
5259+ * @mc_io:	Pointer to MC portal's I/O object
5260+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5261+ * @token:	Token of DPSECI object
5262+ * @irq_index:	The interrupt index to configure
5263+ * @mask:	event mask to trigger interrupt;
5264+ *		each bit:
5265+ *			0 = ignore event
5266+ *			1 = consider event for asserting IRQ
5267+ *
5268+ * Every interrupt can have up to 32 causes and the interrupt model supports
5269+ * masking/unmasking each cause independently
5270+ *
5271+ * Return:	'0' on success, error code otherwise
5272+ */
5273+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5274+			u8 irq_index, u32 mask)
5275+{
5276+	struct fsl_mc_command cmd = { 0 };
5277+	struct dpseci_cmd_irq_mask *cmd_params;
5278+
5279+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
5280+					  cmd_flags,
5281+					  token);
5282+	cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
5283+	cmd_params->mask = cpu_to_le32(mask);
5284+	cmd_params->irq_index = irq_index;
5285+
5286+	return mc_send_command(mc_io, &cmd);
5287+}
5288+
5289+/**
5290+ * dpseci_get_irq_status() - Get the current status of any pending interrupts
5291+ * @mc_io:	Pointer to MC portal's I/O object
5292+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5293+ * @token:	Token of DPSECI object
5294+ * @irq_index:	The interrupt index to configure
5295+ * @status:	Returned interrupts status - one bit per cause:
5296+ *			0 = no interrupt pending
5297+ *			1 = interrupt pending
5298+ *
5299+ * Return:	'0' on success, error code otherwise
5300+ */
5301+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5302+			  u8 irq_index, u32 *status)
5303+{
5304+	struct fsl_mc_command cmd = { 0 };
5305+	struct dpseci_cmd_irq_status *cmd_params;
5306+	int err;
5307+
5308+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
5309+					  cmd_flags,
5310+					  token);
5311+	cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
5312+	cmd_params->status = cpu_to_le32(*status);
5313+	cmd_params->irq_index = irq_index;
5314+	err = mc_send_command(mc_io, &cmd);
5315+	if (err)
5316+		return err;
5317+
5318+	*status = le32_to_cpu(cmd_params->status);
5319+
5320+	return 0;
5321+}
5322+
5323+/**
5324+ * dpseci_clear_irq_status() - Clear a pending interrupt's status
5325+ * @mc_io:	Pointer to MC portal's I/O object
5326+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5327+ * @token:	Token of DPSECI object
5328+ * @irq_index:	The interrupt index to configure
5329+ * @status:	bits to clear (W1C) - one bit per cause:
5330+ *			0 = don't change
5331+ *			1 = clear status bit
5332+ *
5333+ * Return:	'0' on success, error code otherwise
5334+ */
5335+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5336+			    u8 irq_index, u32 status)
5337+{
5338+	struct fsl_mc_command cmd = { 0 };
5339+	struct dpseci_cmd_irq_status *cmd_params;
5340+
5341+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
5342+					  cmd_flags,
5343+					  token);
5344+	cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
5345+	cmd_params->status = cpu_to_le32(status);
5346+	cmd_params->irq_index = irq_index;
5347+
5348+	return mc_send_command(mc_io, &cmd);
5349+}
5350+
5351 /**
5352  * dpseci_get_attributes() - Retrieve DPSECI attributes
5353  * @mc_io:	Pointer to MC portal's I/O object
5354@@ -339,6 +611,42 @@ int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5355 	return 0;
5356 }
5357
5358+/**
5359+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
5360+ * @mc_io:	Pointer to MC portal's I/O object
5361+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5362+ * @token:	Token of DPSECI object
5363+ * @counters:	Returned SEC counters
5364+ *
5365+ * Return:	'0' on success, error code otherwise
5366+ */
5367+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5368+			    struct dpseci_sec_counters *counters)
5369+{
5370+	struct fsl_mc_command cmd = { 0 };
5371+	struct dpseci_rsp_get_sec_counters *rsp_params;
5372+	int err;
5373+
5374+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
5375+					  cmd_flags,
5376+					  token);
5377+	err = mc_send_command(mc_io, &cmd);
5378+	if (err)
5379+		return err;
5380+
5381+	rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
5382+	counters->dequeued_requests =
5383+		le64_to_cpu(rsp_params->dequeued_requests);
5384+	counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
5385+	counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
5386+	counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
5387+	counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
5388+	counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
5389+	counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
5390+
5391+	return 0;
5392+}
5393+
5394 /**
5395  * dpseci_get_api_version() - Get Data Path SEC Interface API version
5396  * @mc_io:	Pointer to MC portal's I/O object
5397@@ -368,6 +676,90 @@ int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
5398 	return 0;
5399 }
5400
5401+/**
5402+ * dpseci_set_opr() - Set Order Restoration configuration
5403+ * @mc_io:	Pointer to MC portal's I/O object
5404+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5405+ * @token:	Token of DPSECI object
5406+ * @index:	The queue index
5407+ * @options:	Configuration mode options; can be OPR_OPT_CREATE or
5408+ *		OPR_OPT_RETIRE
5409+ * @cfg:	Configuration options for the OPR
5410+ *
5411+ * Return:	'0' on success, error code otherwise
5412+ */
5413+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
5414+		   u8 options, struct opr_cfg *cfg)
5415+{
5416+	struct fsl_mc_command cmd = { 0 };
5417+	struct dpseci_cmd_opr *cmd_params;
5418+
5419+	cmd.header = mc_encode_cmd_header(
5420+			DPSECI_CMDID_SET_OPR,
5421+			cmd_flags,
5422+			token);
5423+	cmd_params = (struct dpseci_cmd_opr *)cmd.params;
5424+	cmd_params->index = index;
5425+	cmd_params->options = options;
5426+	cmd_params->oloe = cfg->oloe;
5427+	cmd_params->oeane = cfg->oeane;
5428+	cmd_params->olws = cfg->olws;
5429+	cmd_params->oa = cfg->oa;
5430+	cmd_params->oprrws = cfg->oprrws;
5431+
5432+	return mc_send_command(mc_io, &cmd);
5433+}
5434+
5435+/**
5436+ * dpseci_get_opr() - Retrieve Order Restoration config and query
5437+ * @mc_io:	Pointer to MC portal's I/O object
5438+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
5439+ * @token:	Token of DPSECI object
5440+ * @index:	The queue index
5441+ * @cfg:	Returned OPR configuration
5442+ * @qry:	Returned OPR query
5443+ *
5444+ * Return:	'0' on success, error code otherwise
5445+ */
5446+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
5447+		   struct opr_cfg *cfg, struct opr_qry *qry)
5448+{
5449+	struct fsl_mc_command cmd = { 0 };
5450+	struct dpseci_cmd_opr *cmd_params;
5451+	struct dpseci_rsp_get_opr *rsp_params;
5452+	int err;
5453+
5454+	cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
5455+					  cmd_flags,
5456+					  token);
5457+	cmd_params = (struct dpseci_cmd_opr *)cmd.params;
5458+	cmd_params->index = index;
5459+	err = mc_send_command(mc_io, &cmd);
5460+	if (err)
5461+		return err;
5462+
5463+	rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
5464+	qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
5465+	qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
5466+	cfg->oloe = rsp_params->oloe;
5467+	cfg->oeane = rsp_params->oeane;
5468+	cfg->olws = rsp_params->olws;
5469+	cfg->oa = rsp_params->oa;
5470+	cfg->oprrws = rsp_params->oprrws;
5471+	qry->nesn = le16_to_cpu(rsp_params->nesn);
5472+	qry->ndsn = le16_to_cpu(rsp_params->ndsn);
5473+	qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
5474+	qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
5475+	qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
5476+	qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
5477+	qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
5478+	qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
5479+	qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
5480+	qry->opr_id = le16_to_cpu(rsp_params->opr_id);
5481+
5482+	return 0;
5483+}
5484+
5485 /**
5486  * dpseci_set_congestion_notification() - Set congestion group
5487  *	notification configuration
5488diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
5489index 6dcd9be81..d453baac7 100644
5490--- a/drivers/crypto/caam/dpseci.h
5491+++ b/drivers/crypto/caam/dpseci.h
5492@@ -12,6 +12,8 @@
5493  */
5494
5495 struct fsl_mc_io;
5496+struct opr_cfg;
5497+struct opr_qry;
5498
5499 /**
5500  * General DPSECI macros
5501@@ -37,10 +39,22 @@ int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
5502  */
5503 #define DPSECI_OPT_HAS_CG		0x000020
5504
5505+/**
5506+ * Enable the Order Restoration support
5507+ */
5508+#define DPSECI_OPT_HAS_OPR		0x000040
5509+
5510+/**
5511+ * Order Point Records are shared for the entire DPSECI
5512+ */
5513+#define DPSECI_OPT_OPR_SHARED		0x000080
5514+
5515 /**
5516  * struct dpseci_cfg - Structure representing DPSECI configuration
5517- * @options: Any combination of the following flags:
5518+ * @options: Any combination of the following options:
5519  *		DPSECI_OPT_HAS_CG
5520+ *		DPSECI_OPT_HAS_OPR
5521+ *		DPSECI_OPT_OPR_SHARED
5522  * @num_tx_queues: num of queues towards the SEC
5523  * @num_rx_queues: num of queues back from the SEC
5524  * @priorities: Priorities for the SEC hardware processing;
5525@@ -55,6 +69,12 @@ struct dpseci_cfg {
5526 	u8 priorities[DPSECI_MAX_QUEUE_NUM];
5527 };
5528
5529+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
5530+		  const struct dpseci_cfg *cfg, u32 *obj_id);
5531+
5532+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
5533+		   u32 object_id);
5534+
5535 int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
5536
5537 int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
5538@@ -64,13 +84,33 @@ int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
5539 int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5540 		      int *en);
5541
5542+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5543+			  u8 irq_index, u8 *en);
5544+
5545+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5546+			  u8 irq_index, u8 en);
5547+
5548+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5549+			u8 irq_index, u32 *mask);
5550+
5551+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5552+			u8 irq_index, u32 mask);
5553+
5554+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5555+			  u8 irq_index, u32 *status);
5556+
5557+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5558+			    u8 irq_index, u32 status);
5559+
5560 /**
5561  * struct dpseci_attr - Structure representing DPSECI attributes
5562  * @id: DPSECI object ID
5563  * @num_tx_queues: number of queues towards the SEC
5564  * @num_rx_queues: number of queues back from the SEC
5565- * @options: any combination of the following flags:
5566+ * @options: any combination of the following options:
5567  *		DPSECI_OPT_HAS_CG
5568+ *		DPSECI_OPT_HAS_OPR
5569+ *		DPSECI_OPT_OPR_SHARED
5570  */
5571 struct dpseci_attr {
5572 	int id;
5573@@ -250,9 +290,39 @@ struct dpseci_sec_attr {
5574 int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5575 			struct dpseci_sec_attr *attr);
5576
5577+/**
5578+ * struct dpseci_sec_counters - Structure representing global SEC counters and
5579+ *				not per dpseci counters
5580+ * @dequeued_requests:	Number of Requests Dequeued
5581+ * @ob_enc_requests:	Number of Outbound Encrypt Requests
5582+ * @ib_dec_requests:	Number of Inbound Decrypt Requests
5583+ * @ob_enc_bytes:	Number of Outbound Bytes Encrypted
5584+ * @ob_prot_bytes:	Number of Outbound Bytes Protected
5585+ * @ib_dec_bytes:	Number of Inbound Bytes Decrypted
5586+ * @ib_valid_bytes:	Number of Inbound Bytes Validated
5587+ */
5588+struct dpseci_sec_counters {
5589+	u64 dequeued_requests;
5590+	u64 ob_enc_requests;
5591+	u64 ib_dec_requests;
5592+	u64 ob_enc_bytes;
5593+	u64 ob_prot_bytes;
5594+	u64 ib_dec_bytes;
5595+	u64 ib_valid_bytes;
5596+};
5597+
5598+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
5599+			    struct dpseci_sec_counters *counters);
5600+
5601 int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
5602 			   u16 *major_ver, u16 *minor_ver);
5603
5604+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
5605+		   u8 options, struct opr_cfg *cfg);
5606+
5607+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
5608+		   struct opr_cfg *cfg, struct opr_qry *qry);
5609+
5610 /**
5611  * enum dpseci_congestion_unit - DPSECI congestion units
5612  * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
5613diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
5614index 71a007c85..9fec1c92e 100644
5615--- a/drivers/crypto/caam/dpseci_cmd.h
5616+++ b/drivers/crypto/caam/dpseci_cmd.h
5617@@ -17,6 +17,7 @@
5618 /* Command versioning */
5619 #define DPSECI_CMD_BASE_VERSION		1
5620 #define DPSECI_CMD_BASE_VERSION_V2	2
5621+#define DPSECI_CMD_BASE_VERSION_V3	3
5622 #define DPSECI_CMD_ID_OFFSET		4
5623
5624 #define DPSECI_CMD_V1(id)	(((id) << DPSECI_CMD_ID_OFFSET) | \
5625@@ -25,9 +26,14 @@
5626 #define DPSECI_CMD_V2(id)	(((id) << DPSECI_CMD_ID_OFFSET) | \
5627 				 DPSECI_CMD_BASE_VERSION_V2)
5628
5629+#define DPSECI_CMD_V3(id)	(((id) << DPSECI_CMD_ID_OFFSET) | \
5630+				 DPSECI_CMD_BASE_VERSION_V3)
5631+
5632 /* Command IDs */
5633 #define DPSECI_CMDID_CLOSE				DPSECI_CMD_V1(0x800)
5634 #define DPSECI_CMDID_OPEN				DPSECI_CMD_V1(0x809)
5635+#define DPSECI_CMDID_CREATE				DPSECI_CMD_V3(0x909)
5636+#define DPSECI_CMDID_DESTROY				DPSECI_CMD_V1(0x989)
5637 #define DPSECI_CMDID_GET_API_VERSION			DPSECI_CMD_V1(0xa09)
5638
5639 #define DPSECI_CMDID_ENABLE				DPSECI_CMD_V1(0x002)
5640@@ -36,10 +42,20 @@
5641 #define DPSECI_CMDID_RESET				DPSECI_CMD_V1(0x005)
5642 #define DPSECI_CMDID_IS_ENABLED				DPSECI_CMD_V1(0x006)
5643
5644+#define DPSECI_CMDID_SET_IRQ_ENABLE			DPSECI_CMD_V1(0x012)
5645+#define DPSECI_CMDID_GET_IRQ_ENABLE			DPSECI_CMD_V1(0x013)
5646+#define DPSECI_CMDID_SET_IRQ_MASK			DPSECI_CMD_V1(0x014)
5647+#define DPSECI_CMDID_GET_IRQ_MASK			DPSECI_CMD_V1(0x015)
5648+#define DPSECI_CMDID_GET_IRQ_STATUS			DPSECI_CMD_V1(0x016)
5649+#define DPSECI_CMDID_CLEAR_IRQ_STATUS			DPSECI_CMD_V1(0x017)
5650+
5651 #define DPSECI_CMDID_SET_RX_QUEUE			DPSECI_CMD_V1(0x194)
5652 #define DPSECI_CMDID_GET_RX_QUEUE			DPSECI_CMD_V1(0x196)
5653 #define DPSECI_CMDID_GET_TX_QUEUE			DPSECI_CMD_V1(0x197)
5654 #define DPSECI_CMDID_GET_SEC_ATTR			DPSECI_CMD_V2(0x198)
5655+#define DPSECI_CMDID_GET_SEC_COUNTERS			DPSECI_CMD_V1(0x199)
5656+#define DPSECI_CMDID_SET_OPR				DPSECI_CMD_V1(0x19A)
5657+#define DPSECI_CMDID_GET_OPR				DPSECI_CMD_V1(0x19B)
5658 #define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION	DPSECI_CMD_V1(0x170)
5659 #define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION	DPSECI_CMD_V1(0x171)
5660
5661@@ -58,6 +74,20 @@ struct dpseci_cmd_open {
5662 	__le32 dpseci_id;
5663 };
5664
5665+struct dpseci_cmd_create {
5666+	u8 priorities[8];
5667+	u8 num_tx_queues;
5668+	u8 num_rx_queues;
5669+	u8 pad0[6];
5670+	__le32 options;
5671+	__le32 pad1;
5672+	u8 priorities2[8];
5673+};
5674+
5675+struct dpseci_cmd_destroy {
5676+	__le32 object_id;
5677+};
5678+
5679 #define DPSECI_ENABLE_SHIFT	0
5680 #define DPSECI_ENABLE_SIZE	1
5681
5682@@ -65,6 +95,26 @@ struct dpseci_rsp_is_enabled {
5683 	u8 is_enabled;
5684 };
5685
5686+struct dpseci_cmd_irq_enable {
5687+	u8 enable_state;
5688+	u8 pad[3];
5689+	u8 irq_index;
5690+};
5691+
5692+struct dpseci_rsp_get_irq_enable {
5693+	u8 enable_state;
5694+};
5695+
5696+struct dpseci_cmd_irq_mask {
5697+	__le32 mask;
5698+	u8 irq_index;
5699+};
5700+
5701+struct dpseci_cmd_irq_status {
5702+	__le32 status;
5703+	u8 irq_index;
5704+};
5705+
5706 struct dpseci_rsp_get_attributes {
5707 	__le32 id;
5708 	__le32 pad0;
5709@@ -126,11 +176,70 @@ struct dpseci_rsp_get_sec_attr {
5710 	u8 ptha_acc_num;
5711 };
5712
5713+struct dpseci_rsp_get_sec_counters {
5714+	__le64 dequeued_requests;
5715+	__le64 ob_enc_requests;
5716+	__le64 ib_dec_requests;
5717+	__le64 ob_enc_bytes;
5718+	__le64 ob_prot_bytes;
5719+	__le64 ib_dec_bytes;
5720+	__le64 ib_valid_bytes;
5721+};
5722+
5723 struct dpseci_rsp_get_api_version {
5724 	__le16 major;
5725 	__le16 minor;
5726 };
5727
5728+struct dpseci_cmd_opr {
5729+	__le16 pad;
5730+	u8 index;
5731+	u8 options;
5732+	u8 pad1[7];
5733+	u8 oloe;
5734+	u8 oeane;
5735+	u8 olws;
5736+	u8 oa;
5737+	u8 oprrws;
5738+};
5739+
5740+#define DPSECI_OPR_RIP_SHIFT		0
5741+#define DPSECI_OPR_RIP_SIZE		1
5742+#define DPSECI_OPR_ENABLE_SHIFT		1
5743+#define DPSECI_OPR_ENABLE_SIZE		1
5744+#define DPSECI_OPR_TSEQ_NLIS_SHIFT	0
5745+#define DPSECI_OPR_TSEQ_NLIS_SIZE	1
5746+#define DPSECI_OPR_HSEQ_NLIS_SHIFT	0
5747+#define DPSECI_OPR_HSEQ_NLIS_SIZE	1
5748+
5749+struct dpseci_rsp_get_opr {
5750+	__le64 pad;
5751+	u8 flags;
5752+	u8 pad0[2];
5753+	u8 oloe;
5754+	u8 oeane;
5755+	u8 olws;
5756+	u8 oa;
5757+	u8 oprrws;
5758+	__le16 nesn;
5759+	__le16 pad1;
5760+	__le16 ndsn;
5761+	__le16 pad2;
5762+	__le16 ea_tseq;
5763+	u8 tseq_nlis;
5764+	u8 pad3;
5765+	__le16 ea_hseq;
5766+	u8 hseq_nlis;
5767+	u8 pad4;
5768+	__le16 ea_hptr;
5769+	__le16 pad5;
5770+	__le16 ea_tptr;
5771+	__le16 pad6;
5772+	__le16 opr_vid;
5773+	__le16 pad7;
5774+	__le16 opr_id;
5775+};
5776+
5777 #define DPSECI_CGN_DEST_TYPE_SHIFT	0
5778 #define DPSECI_CGN_DEST_TYPE_SIZE	4
5779 #define DPSECI_CGN_UNITS_SHIFT		4
5780diff --git a/drivers/crypto/caam/fsl_jr_uio.c b/drivers/crypto/caam/fsl_jr_uio.c
5781new file mode 100644
5782index 000000000..2d3a0554a
5783--- /dev/null
5784+++ b/drivers/crypto/caam/fsl_jr_uio.c
5785@@ -0,0 +1,245 @@
5786+// SPDX-License-Identifier: GPL-2.0
5787+/*
5788+ * Copyright 2013 Freescale Semiconductor, Inc.
5789+ * Copyright 2018 NXP
5790+ */
5791+
5792+#include <linux/kernel.h>
5793+#include <linux/module.h>
5794+#include <linux/of_address.h>
5795+#include <linux/of_irq.h>
5796+#include <linux/of_platform.h>
5797+#include <linux/io.h>
5798+#include <linux/uio_driver.h>
5799+#include <linux/slab.h>
5800+#include <linux/list.h>
5801+#include "regs.h"
5802+#include "fsl_jr_uio.h"
5803+
5804+static const char jr_uio_version[] = "fsl JR UIO driver v1.0";
5805+
5806+#define NAME_LENGTH 30
5807+#define JR_INDEX_OFFSET 12
5808+
5809+static const char uio_device_name[] = "fsl-jr";
5810+static LIST_HEAD(jr_list);
5811+
5812+struct jr_uio_info {
5813+	atomic_t ref; /* exclusive, only one open() at a time */
5814+	struct uio_info uio;
5815+	char name[NAME_LENGTH];
5816+};
5817+
5818+struct jr_dev {
5819+	u32 revision;
5820+	u32 index;
5821+	u32 irq;
5822+	struct caam_job_ring __iomem *global_regs;
5823+	struct device *dev;
5824+	struct resource *res;
5825+	struct jr_uio_info info;
5826+	struct list_head node;
5827+	struct list_head jr_list;
5828+};
5829+
5830+static int jr_uio_open(struct uio_info *info, struct inode *inode)
5831+{
5832+	struct jr_uio_info *uio_info = container_of(info,
5833+					struct jr_uio_info, uio);
5834+
5835+	if (!atomic_dec_and_test(&uio_info->ref)) {
5836+		pr_err("%s: failing non-exclusive open()\n", uio_info->name);
5837+		atomic_inc(&uio_info->ref);
5838+		return -EBUSY;
5839+	}
5840+
5841+	return 0;
5842+}
5843+
5844+static int jr_uio_release(struct uio_info *info, struct inode *inode)
5845+{
5846+	struct jr_uio_info *uio_info = container_of(info,
5847+					struct jr_uio_info, uio);
5848+	atomic_inc(&uio_info->ref);
5849+
5850+	return 0;
5851+}
5852+
5853+static irqreturn_t jr_uio_irq_handler(int irq, struct uio_info *dev_info)
5854+{
5855+	struct jr_dev *jrdev = dev_info->priv;
5856+	u32 irqstate;
5857+
5858+	irqstate = rd_reg32(&jrdev->global_regs->jrintstatus);
5859+
5860+	if (!irqstate)
5861+		return IRQ_NONE;
5862+
5863+	if (irqstate & JRINT_JR_ERROR)
5864+		dev_info(jrdev->dev, "uio job ring error - irqstate: %08x\n",
5865+			 irqstate);
5866+
5867+	/*mask valid interrupts */
5868+	clrsetbits_32(&jrdev->global_regs->rconfig_lo, 0, JRCFG_IMSK);
5869+
5870+	/* Have valid interrupt at this point, just ACK and trigger */
5871+	wr_reg32(&jrdev->global_regs->jrintstatus, irqstate);
5872+
5873+	return IRQ_HANDLED;
5874+}
5875+
5876+static int jr_uio_irqcontrol(struct uio_info *dev_info, int irqon)
5877+{
5878+	struct jr_dev *jrdev = dev_info->priv;
5879+
5880+	switch (irqon) {
5881+	case SEC_UIO_SIMULATE_IRQ_CMD:
5882+		uio_event_notify(dev_info);
5883+		break;
5884+	case SEC_UIO_ENABLE_IRQ_CMD:
5885+		/* Enable Job Ring interrupt */
5886+		clrsetbits_32(&jrdev->global_regs->rconfig_lo, JRCFG_IMSK, 0);
5887+		break;
5888+	case SEC_UIO_DISABLE_IRQ_CMD:
5889+		/* Disable Job Ring interrupt */
5890+		clrsetbits_32(&jrdev->global_regs->rconfig_lo, 0, JRCFG_IMSK);
5891+		break;
5892+	default:
5893+		break;
5894+	}
5895+	return 0;
5896+}
5897+
5898+static int __init jr_uio_init(struct jr_dev *uio_dev)
5899+{
5900+	int ret;
5901+	struct jr_uio_info *info;
5902+
5903+	info = &uio_dev->info;
5904+	atomic_set(&info->ref, 1);
5905+	info->uio.version = jr_uio_version;
5906+	info->uio.name = uio_dev->info.name;
5907+	info->uio.mem[0].name = "JR config space";
5908+	info->uio.mem[0].addr = uio_dev->res->start;
5909+	info->uio.mem[0].size = resource_size(uio_dev->res);
5910+	info->uio.mem[0].internal_addr = uio_dev->global_regs;
5911+	info->uio.mem[0].memtype = UIO_MEM_PHYS;
5912+	info->uio.irq = uio_dev->irq;
5913+	info->uio.irq_flags = IRQF_SHARED;
5914+	info->uio.handler = jr_uio_irq_handler;
5915+	info->uio.irqcontrol = jr_uio_irqcontrol;
5916+	info->uio.open = jr_uio_open;
5917+	info->uio.release = jr_uio_release;
5918+	info->uio.priv = uio_dev;
5919+
5920+	ret = uio_register_device(uio_dev->dev, &info->uio);
5921+	if (ret) {
5922+		dev_err(uio_dev->dev, "jr_uio: UIO registration failed\n");
5923+		return ret;
5924+	}
5925+
5926+	return 0;
5927+}
5928+
5929+static const struct of_device_id jr_ids[] = {
5930+	{ .compatible = "fsl,sec-v4.0-job-ring", },
5931+	{ .compatible = "fsl,sec-v4.4-job-ring", },
5932+	{ .compatible = "fsl,sec-v5.0-job-ring", },
5933+	{ .compatible = "fsl,sec-v6.0-job-ring", },
5934+	{},
5935+};
5936+
5937+static int fsl_jr_probe(struct platform_device *dev)
5938+{
5939+	struct jr_dev *jr_dev;
5940+	struct device_node *jr_node;
5941+	int ret, count = 0;
5942+	struct list_head *p;
5943+
5944+	jr_node = dev->dev.of_node;
5945+	if (!jr_node) {
5946+		dev_err(&dev->dev, "Device OF-Node is NULL\n");
5947+		return -EFAULT;
5948+	}
5949+
5950+	jr_dev = devm_kzalloc(&dev->dev, sizeof(*jr_dev), GFP_KERNEL);
5951+	if (!jr_dev)
5952+		return -ENOMEM;
5953+
5954+	/* Creat name and index */
5955+	list_for_each(p, &jr_list) {
5956+		count++;
5957+	}
5958+	jr_dev->index = count;
5959+
5960+	snprintf(jr_dev->info.name, sizeof(jr_dev->info.name) - 1,
5961+		 "%s%d", uio_device_name, jr_dev->index);
5962+
5963+	jr_dev->dev = &dev->dev;
5964+	platform_set_drvdata(dev, jr_dev);
5965+
5966+	jr_dev->res = platform_get_resource(dev, IORESOURCE_MEM, 0);
5967+	if (unlikely(!jr_dev->res)) {
5968+		dev_err(jr_dev->dev, "platform_get_resource() failed\n");
5969+		ret = -ENOMEM;
5970+		goto abort;
5971+	}
5972+
5973+	jr_dev->global_regs =
5974+		devm_ioremap(&dev->dev, jr_dev->res->start,
5975+			     resource_size(jr_dev->res));
5976+	if (unlikely(jr_dev->global_regs == 0)) {
5977+		dev_err(jr_dev->dev, "devm_ioremap failed\n");
5978+		ret = -EIO;
5979+		goto abort;
5980+	}
5981+	jr_dev->irq = irq_of_parse_and_map(jr_node, 0);
5982+	dev_dbg(jr_dev->dev, "errirq: %d\n", jr_dev->irq);
5983+
5984+	/* Register UIO */
5985+	ret = jr_uio_init(jr_dev);
5986+	if (ret) {
5987+		dev_err(&dev->dev, "UIO init Failed\n");
5988+		goto abort;
5989+	}
5990+
5991+	list_add_tail(&jr_dev->node, &jr_list);
5992+
5993+	dev_info(jr_dev->dev, "UIO device full name %s initialized\n",
5994+		 jr_dev->info.name);
5995+
5996+	return 0;
5997+
5998+abort:
5999+	return ret;
6000+}
6001+
6002+static int fsl_jr_remove(struct platform_device *dev)
6003+{
6004+	struct jr_dev *jr_dev = platform_get_drvdata(dev);
6005+
6006+	if (!jr_dev)
6007+		return 0;
6008+
6009+	list_del(&jr_dev->node);
6010+	uio_unregister_device(&jr_dev->info.uio);
6011+
6012+	return 0;
6013+}
6014+
6015+MODULE_DEVICE_TABLE(of, jr_ids);
6016+
6017+static struct platform_driver fsl_jr_driver = {
6018+	.driver = {
6019+		.name = "fsl-jr-uio",
6020+		.of_match_table = jr_ids,
6021+	},
6022+	.probe = fsl_jr_probe,
6023+	.remove = fsl_jr_remove,
6024+};
6025+
6026+module_platform_driver(fsl_jr_driver);
6027+
6028+MODULE_LICENSE("GPL");
6029+MODULE_AUTHOR("NXP");
6030+MODULE_DESCRIPTION("FSL SEC UIO Driver");
6031diff --git a/drivers/crypto/caam/fsl_jr_uio.h b/drivers/crypto/caam/fsl_jr_uio.h
6032new file mode 100644
6033index 000000000..2956645e1
6034--- /dev/null
6035+++ b/drivers/crypto/caam/fsl_jr_uio.h
6036@@ -0,0 +1,25 @@
6037+/* SPDX-License-Identifier: GPL-2.0 */
6038+/*
6039+ * CAAM Job RING UIO support header file
6040+ *
6041+ * Copyright 2013 Freescale Semiconductor, Inc
6042+ * Copyright 2018 NXP
6043+ */
6044+
6045+#ifndef FSL_JR_UIO_H
6046+#define FSL_JR_UIO_H
6047+
6048+/** UIO command used by user-space driver to request
6049+ *  disabling IRQs on a certain job ring
6050+ */
6051+#define SEC_UIO_DISABLE_IRQ_CMD         0
6052+/** UIO command used by user-space driver to request
6053+ *  enabling IRQs on a certain job ring
6054+ */
6055+#define SEC_UIO_ENABLE_IRQ_CMD          1
6056+/** UIO command used by user-space driver to request SEC kernel driver
6057+ *  to simulate that an IRQ is generated on a certain job ring
6058+ */
6059+#define SEC_UIO_SIMULATE_IRQ_CMD        2
6060+
6061+#endif
6062diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
6063index 9112279a4..ad83f3a83 100644
6064--- a/drivers/crypto/caam/intern.h
6065+++ b/drivers/crypto/caam/intern.h
6066@@ -39,6 +39,18 @@ struct caam_jrentry_info {
6067 	u32 desc_size;	/* Stored size for postprocessing, header derived */
6068 };
6069
6070+#ifdef CONFIG_PM_SLEEP
6071+struct caam_jr_state {
6072+	dma_addr_t inpbusaddr;
6073+	dma_addr_t outbusaddr;
6074+};
6075+#endif
6076+
6077+struct caam_jr_dequeue_params {
6078+	struct device *dev;
6079+	int enable_itr;
6080+};
6081+
6082 /* Private sub-storage for a single JobR */
6083 struct caam_drv_private_jr {
6084 	struct list_head	list_node;	/* Job Ring device list */
6085@@ -46,6 +58,7 @@ struct caam_drv_private_jr {
6086 	int ridx;
6087 	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
6088 	struct tasklet_struct irqtask;
6089+	struct caam_jr_dequeue_params tasklet_params;
6090 	int irq;			/* One per queue */
6091 	bool hwrng;
6092
6093@@ -63,18 +76,35 @@ struct caam_drv_private_jr {
6094 	int tail;			/* entinfo (s/w ring) tail index */
6095 	void *outring;			/* Base of output ring, DMA-safe */
6096 	struct crypto_engine *engine;
6097+
6098+#ifdef CONFIG_PM_SLEEP
6099+	struct caam_jr_state state;	/* State of the JR during PM */
6100+#endif
6101 };
6102
6103+#ifdef CONFIG_PM_SLEEP
6104+struct caam_ctl_state {
6105+	struct masterid deco_mid[16];
6106+	struct masterid jr_mid[4];
6107+	u32 mcr;
6108+	u32 scfgr;
6109+};
6110+#endif
6111+
6112 /*
6113  * Driver-private storage for a single CAAM block instance
6114  */
6115 struct caam_drv_private {
6116+	struct device *smdev;
6117+
6118 	/* Physical-presence section */
6119 	struct caam_ctrl __iomem *ctrl; /* controller region */
6120 	struct caam_deco __iomem *deco; /* DECO/CCB views */
6121 	struct caam_assurance __iomem *assure;
6122 	struct caam_queue_if __iomem *qi; /* QI control region */
6123 	struct caam_job_ring __iomem *jr[4];	/* JobR's register space */
6124+	dma_addr_t __iomem *sm_base;	/* Secure memory storage base */
6125+	phys_addr_t sm_phy;		/* Secure memory storage physical */
6126
6127 	struct iommu_domain *domain;
6128
6129@@ -84,8 +114,11 @@ struct caam_drv_private {
6130 	 */
6131 	u8 total_jobrs;		/* Total Job Rings in device */
6132 	u8 qi_present;		/* Nonzero if QI present in device */
6133+	u8 sm_present;		/* Nonzero if Secure Memory is supported */
6134 	u8 mc_en;		/* Nonzero if MC f/w is active */
6135-	int secvio_irq;		/* Security violation interrupt number */
6136+	u8 scu_en;		/* Nonzero if SCU f/w is active */
6137+	u8 optee_en;		/* Nonzero if OP-TEE f/w is active */
6138+	bool pr_support;	/* RNG prediction resistance available */
6139 	int virt_en;		/* Virtualization enabled in CAAM */
6140 	int era;		/* CAAM Era (internal HW revision) */
6141
6142@@ -105,6 +138,11 @@ struct caam_drv_private {
6143 	struct dentry *ctl; /* controller dir */
6144 	struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
6145 #endif
6146+
6147+#ifdef CONFIG_PM_SLEEP
6148+	int caam_off_during_pm;		/* If the CAAM is reset after suspend */
6149+	struct caam_ctl_state state;	/* State of the CTL during PM */
6150+#endif
6151 };
6152
6153 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6154@@ -195,6 +233,42 @@ static inline void caam_qi_algapi_exit(void)
6155
6156 #endif /* CONFIG_CAAM_QI */
6157
6158+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_SM
6159+
6160+int caam_sm_startup(struct device *dev);
6161+void caam_sm_shutdown(struct device *dev);
6162+
6163+#else
6164+
6165+static inline int caam_sm_startup(struct device *dev)
6166+{
6167+	return 0;
6168+}
6169+
6170+static inline void caam_sm_shutdown(struct device *dev)
6171+{
6172+}
6173+
6174+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_SM */
6175+
6176+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API
6177+
6178+int caam_keygen_init(void);
6179+void caam_keygen_exit(void);
6180+
6181+#else
6182+
6183+static inline int caam_keygen_init(void)
6184+{
6185+	return 0;
6186+}
6187+
6188+static inline void caam_keygen_exit(void)
6189+{
6190+}
6191+
6192+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_TK_API */
6193+
6194 static inline u64 caam_get_dma_mask(struct device *dev)
6195 {
6196 	struct device_node *nprop = dev->of_node;
6197diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
6198index 6f669966b..1572d31c9 100644
6199--- a/drivers/crypto/caam/jr.c
6200+++ b/drivers/crypto/caam/jr.c
6201@@ -4,7 +4,7 @@
6202  * JobR backend functionality
6203  *
6204  * Copyright 2008-2012 Freescale Semiconductor, Inc.
6205- * Copyright 2019 NXP
6206+ * Copyright 2019-2020 NXP
6207  */
6208
6209 #include <linux/of_irq.h>
6210@@ -27,8 +27,40 @@ static struct jr_driver_data driver_data;
6211 static DEFINE_MUTEX(algs_lock);
6212 static unsigned int active_devs;
6213
6214-static void register_algs(struct caam_drv_private_jr *jrpriv,
6215-			  struct device *dev)
6216+static void init_misc_func(struct caam_drv_private_jr *jrpriv,
6217+			   struct device *dev)
6218+{
6219+	mutex_lock(&algs_lock);
6220+
6221+	if (active_devs != 1)
6222+		goto algs_unlock;
6223+
6224+	jrpriv->hwrng = !caam_rng_init(dev);
6225+	caam_sm_startup(dev);
6226+	caam_keygen_init();
6227+
6228+algs_unlock:
6229+	mutex_unlock(&algs_lock);
6230+}
6231+
6232+static void exit_misc_func(struct caam_drv_private_jr *jrpriv,
6233+			   struct device *dev)
6234+{
6235+	mutex_lock(&algs_lock);
6236+
6237+	if (active_devs != 1)
6238+		goto algs_unlock;
6239+
6240+	caam_keygen_exit();
6241+	caam_sm_shutdown(dev);
6242+	if (jrpriv->hwrng)
6243+		caam_rng_exit(dev);
6244+
6245+algs_unlock:
6246+	mutex_unlock(&algs_lock);
6247+}
6248+
6249+static void register_algs(struct device *dev)
6250 {
6251 	mutex_lock(&algs_lock);
6252
6253@@ -38,14 +70,13 @@ static void register_algs(struct caam_drv_private_jr *jrpriv,
6254 	caam_algapi_init(dev);
6255 	caam_algapi_hash_init(dev);
6256 	caam_pkc_init(dev);
6257-	jrpriv->hwrng = !caam_rng_init(dev);
6258 	caam_qi_algapi_init(dev);
6259
6260 algs_unlock:
6261 	mutex_unlock(&algs_lock);
6262 }
6263
6264-static void unregister_algs(void)
6265+static void unregister_algs(struct device *dev)
6266 {
6267 	mutex_lock(&algs_lock);
6268
6269@@ -62,6 +93,14 @@ static void unregister_algs(void)
6270 	mutex_unlock(&algs_lock);
6271 }
6272
6273+static int jr_driver_probed;
6274+
6275+int caam_jr_driver_probed(void)
6276+{
6277+	return jr_driver_probed;
6278+}
6279+EXPORT_SYMBOL(caam_jr_driver_probed);
6280+
6281 static void caam_jr_crypto_engine_exit(void *data)
6282 {
6283 	struct device *jrdev = data;
6284@@ -71,19 +110,27 @@ static void caam_jr_crypto_engine_exit(void *data)
6285 	crypto_engine_exit(jrpriv->engine);
6286 }
6287
6288-static int caam_reset_hw_jr(struct device *dev)
6289+/*
6290+ * Put the CAAM in quiesce, ie stop
6291+ *
6292+ * Must be called with itr disabled
6293+ */
6294+static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
6295 {
6296 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
6297 	unsigned int timeout = 100000;
6298
6299-	/*
6300-	 * mask interrupts since we are going to poll
6301-	 * for reset completion status
6302-	 */
6303-	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
6304+	/* Check the current status */
6305+	if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
6306+		goto wait_quiesce_completion;
6307
6308-	/* initiate flush (required prior to reset) */
6309-	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
6310+	/* Reset the field */
6311+	clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
6312+
6313+	/* initiate flush / park (required prior to reset) */
6314+	wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
6315+
6316+wait_quiesce_completion:
6317 	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
6318 		JRINT_ERR_HALT_INPROGRESS) && --timeout)
6319 		cpu_relax();
6320@@ -94,8 +141,56 @@ static int caam_reset_hw_jr(struct device *dev)
6321 		return -EIO;
6322 	}
6323
6324+	return 0;
6325+}
6326+
6327+/*
6328+ * Flush the job ring, so the jobs running will be stopped, jobs queued will be
6329+ * invalidated and the CAAM will no longer fetch fron input ring.
6330+ *
6331+ * Must be called with itr disabled
6332+ */
6333+static int caam_jr_flush(struct device *dev)
6334+{
6335+	return caam_jr_stop_processing(dev, JRCR_RESET);
6336+}
6337+
6338+#ifdef CONFIG_PM_SLEEP
6339+/* The resume can be used after a park or a flush if CAAM has not been reset */
6340+static int caam_jr_restart_processing(struct device *dev)
6341+{
6342+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
6343+	u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) &
6344+			  JRINT_ERR_HALT_MASK;
6345+
6346+	/* Check that the flush/park is completed */
6347+	if (halt_status != JRINT_ERR_HALT_COMPLETE)
6348+		return -1;
6349+
6350+	/* Resume processing of jobs */
6351+	clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE);
6352+
6353+	return 0;
6354+}
6355+#endif /* CONFIG_PM_SLEEP */
6356+
6357+static int caam_reset_hw_jr(struct device *dev)
6358+{
6359+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
6360+	unsigned int timeout = 100000;
6361+	int err;
6362+
6363+	/*
6364+	 * mask interrupts since we are going to poll
6365+	 * for reset completion status
6366+	 */
6367+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
6368+
6369+	err = caam_jr_flush(dev);
6370+	if (err)
6371+		return err;
6372+
6373 	/* initiate reset */
6374-	timeout = 100000;
6375 	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
6376 	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
6377 		cpu_relax();
6378@@ -135,8 +230,7 @@ static int caam_jr_remove(struct platform_device *pdev)
6379 	jrdev = &pdev->dev;
6380 	jrpriv = dev_get_drvdata(jrdev);
6381
6382-	if (jrpriv->hwrng)
6383-		caam_rng_exit(jrdev->parent);
6384+	exit_misc_func(jrpriv, jrdev->parent);
6385
6386 	/*
6387 	 * Return EBUSY if job ring already allocated.
6388@@ -147,7 +241,7 @@ static int caam_jr_remove(struct platform_device *pdev)
6389 	}
6390
6391 	/* Unregister JR-based RNG & crypto algorithms */
6392-	unregister_algs();
6393+	unregister_algs(jrdev->parent);
6394
6395 	/* Remove the node from Physical JobR list maintained by driver */
6396 	spin_lock(&driver_data.jr_alloc_lock);
6397@@ -159,6 +253,8 @@ static int caam_jr_remove(struct platform_device *pdev)
6398 	if (ret)
6399 		dev_err(jrdev, "Failed to shut down job ring\n");
6400
6401+	jr_driver_probed--;
6402+
6403 	return ret;
6404 }
6405
6406@@ -174,7 +270,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
6407 	 * tasklet if jobs done.
6408 	 */
6409 	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
6410-	if (!irqstate)
6411+	if (!(irqstate & JRINT_JR_INT))
6412 		return IRQ_NONE;
6413
6414 	/*
6415@@ -204,7 +300,8 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
6416 static void caam_jr_dequeue(unsigned long devarg)
6417 {
6418 	int hw_idx, sw_idx, i, head, tail;
6419-	struct device *dev = (struct device *)devarg;
6420+	struct caam_jr_dequeue_params *params = (void *)devarg;
6421+	struct device *dev = params->dev;
6422 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
6423 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
6424 	u32 *userdesc, userstatus;
6425@@ -278,8 +375,9 @@ static void caam_jr_dequeue(unsigned long devarg)
6426 		outring_used--;
6427 	}
6428
6429-	/* reenable / unmask IRQs */
6430-	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
6431+	if (params->enable_itr)
6432+		/* reenable / unmask IRQs */
6433+		clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
6434 }
6435
6436 /**
6437@@ -322,6 +420,36 @@ struct device *caam_jr_alloc(void)
6438 }
6439 EXPORT_SYMBOL(caam_jr_alloc);
6440
6441+/**
6442+ * caam_jridx_alloc() - Alloc a specific job ring based on its index.
6443+ *
6444+ * returns :  pointer to the newly allocated physical
6445+ *	      JobR dev can be written to if successful.
6446+ **/
6447+struct device *caam_jridx_alloc(int idx)
6448+{
6449+	struct caam_drv_private_jr *jrpriv;
6450+	struct device *dev = ERR_PTR(-ENODEV);
6451+
6452+	spin_lock(&driver_data.jr_alloc_lock);
6453+
6454+	if (list_empty(&driver_data.jr_list))
6455+		goto end;
6456+
6457+	list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
6458+		if (jrpriv->ridx == idx) {
6459+			atomic_inc(&jrpriv->tfm_count);
6460+			dev = jrpriv->dev;
6461+			break;
6462+		}
6463+	}
6464+
6465+end:
6466+	spin_unlock(&driver_data.jr_alloc_lock);
6467+	return dev;
6468+}
6469+EXPORT_SYMBOL(caam_jridx_alloc);
6470+
6471 /**
6472  * caam_jr_free() - Free the Job Ring
6473  * @rdev:      points to the dev that identifies the Job ring to
6474@@ -404,8 +532,16 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
6475 	 * Guarantee that the descriptor's DMA address has been written to
6476 	 * the next slot in the ring before the write index is updated, since
6477 	 * other cores may update this index independently.
6478+	 *
6479+	 * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input
6480+	 * ring be updated before the CAAM starts reading it. So, CAAM will
6481+	 * process, again, an old descriptor address and will put it in the
6482+	 * output ring. This will make caam_jr_dequeue() to fail, since this
6483+	 * old descriptor is not in the software ring.
6484+	 * To fix this, use wmb() which works on the full system instead of
6485+	 * inner/outer shareable domains.
6486 	 */
6487-	smp_wmb();
6488+	wmb();
6489
6490 	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
6491
6492@@ -429,6 +565,80 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
6493 }
6494 EXPORT_SYMBOL(caam_jr_enqueue);
6495
6496+/**
6497+ * caam_jr_run_and_wait_for_completion() - Enqueue a job and wait for its
6498+ * completion. Returns 0 if OK, -ENOSPC if the queue is full,
6499+ * -EIO if it cannot map the caller's descriptor.
6500+ * @dev:  struct device of the job ring to be used
6501+ * @desc: points to a job descriptor that execute our request. All
6502+ *        descriptors (and all referenced data) must be in a DMAable
6503+ *        region, and all data references must be physical addresses
6504+ *        accessible to CAAM (i.e. within a PAMU window granted
6505+ *        to it).
6506+ * @cbk:  pointer to a callback function to be invoked upon completion
6507+ *        of this request. This has the form:
6508+ *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
6509+ *        where:
6510+ *        @dev:    contains the job ring device that processed this
6511+ *                 response.
6512+ *        @desc:   descriptor that initiated the request, same as
6513+ *                 "desc" being argued to caam_jr_enqueue().
6514+ *        @status: untranslated status received from CAAM. See the
6515+ *                 reference manual for a detailed description of
6516+ *                 error meaning, or see the JRSTA definitions in the
6517+ *                 register header file
6518+ *        @areq:   optional pointer to an argument passed with the
6519+ *                 original request
6520+ **/
6521+int caam_jr_run_and_wait_for_completion(struct device *dev, u32 *desc,
6522+					void (*cbk)(struct device *dev,
6523+						    u32 *desc, u32 status,
6524+						    void *areq))
6525+{
6526+	int ret = 0;
6527+	struct jr_job_result jobres = {0};
6528+
6529+	/* Initialize the completion structure */
6530+	init_completion(&jobres.completion);
6531+
6532+	/* Enqueue job for execution */
6533+	ret = caam_jr_enqueue(dev, desc, cbk, &jobres);
6534+	if (ret != -EINPROGRESS)
6535+		return ret;
6536+
6537+	/* Wait for job completion */
6538+	wait_for_completion(&jobres.completion);
6539+
6540+	/* Get return code processed in cbk */
6541+	ret = jobres.error;
6542+
6543+	return ret;
6544+}
6545+EXPORT_SYMBOL(caam_jr_run_and_wait_for_completion);
6546+
6547+static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr,
6548+			    dma_addr_t outbusaddr)
6549+{
6550+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
6551+
6552+	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
6553+	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
6554+	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
6555+	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
6556+
6557+	/* Select interrupt coalescing parameters */
6558+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
6559+		      (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
6560+		      (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
6561+}
6562+
6563+static void caam_jr_reset_index(struct caam_drv_private_jr *jrp)
6564+{
6565+	jrp->out_ring_read_index = 0;
6566+	jrp->head = 0;
6567+	jrp->tail = 0;
6568+}
6569+
6570 /*
6571  * Init JobR independent of platform property detection
6572  */
6573@@ -465,25 +675,16 @@ static int caam_jr_init(struct device *dev)
6574 		jrp->entinfo[i].desc_addr_dma = !0;
6575
6576 	/* Setup rings */
6577-	jrp->out_ring_read_index = 0;
6578-	jrp->head = 0;
6579-	jrp->tail = 0;
6580-
6581-	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
6582-	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
6583-	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
6584-	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
6585-
6586+	caam_jr_reset_index(jrp);
6587 	jrp->inpring_avail = JOBR_DEPTH;
6588+	caam_jr_init_hw(dev, inpbusaddr, outbusaddr);
6589
6590 	spin_lock_init(&jrp->inplock);
6591
6592-	/* Select interrupt coalescing parameters */
6593-	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
6594-		      (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
6595-		      (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
6596-
6597-	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
6598+	jrp->tasklet_params.dev = dev;
6599+	jrp->tasklet_params.enable_itr = 1;
6600+	tasklet_init(&jrp->irqtask, caam_jr_dequeue,
6601+		     (unsigned long)&jrp->tasklet_params);
6602
6603 	/* Connect job ring interrupt handler. */
6604 	error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
6605@@ -592,11 +793,138 @@ static int caam_jr_probe(struct platform_device *pdev)
6606
6607 	atomic_set(&jrpriv->tfm_count, 0);
6608
6609-	register_algs(jrpriv, jrdev->parent);
6610+	device_init_wakeup(&pdev->dev, 1);
6611+	device_set_wakeup_enable(&pdev->dev, false);
6612+
6613+	register_algs(jrdev->parent);
6614+	init_misc_func(jrpriv, jrdev->parent);
6615+	jr_driver_probed++;
6616
6617 	return 0;
6618 }
6619
6620+#ifdef CONFIG_PM_SLEEP
6621+static void caam_jr_get_hw_state(struct device *dev)
6622+{
6623+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
6624+
6625+	jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
6626+	jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base);
6627+}
6628+
6629+static int caam_jr_suspend(struct device *dev)
6630+{
6631+	struct platform_device *pdev = to_platform_device(dev);
6632+	struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
6633+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
6634+	struct caam_jr_dequeue_params suspend_params = {
6635+		.dev = dev,
6636+		.enable_itr = 0,
6637+	};
6638+
6639+	/* Remove the node from Physical JobR list maintained by driver */
6640+	spin_lock(&driver_data.jr_alloc_lock);
6641+	list_del(&jrpriv->list_node);
6642+	spin_unlock(&driver_data.jr_alloc_lock);
6643+
6644+	if (jrpriv->hwrng)
6645+		caam_rng_exit(dev->parent);
6646+
6647+	if (ctrlpriv->caam_off_during_pm) {
6648+		int err;
6649+
6650+		tasklet_disable(&jrpriv->irqtask);
6651+
6652+		/* mask itr to call flush */
6653+		clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK);
6654+
6655+		/* Invalid job in process */
6656+		err = caam_jr_flush(dev);
6657+		if (err) {
6658+			dev_err(dev, "Failed to flush\n");
6659+			return err;
6660+		}
6661+
6662+		/* Dequeing jobs flushed */
6663+		caam_jr_dequeue((unsigned long)&suspend_params);
6664+
6665+		/* Save state */
6666+		caam_jr_get_hw_state(dev);
6667+	} else if (device_may_wakeup(&pdev->dev)) {
6668+		enable_irq_wake(jrpriv->irq);
6669+	}
6670+
6671+	return 0;
6672+}
6673+
6674+static int caam_jr_resume(struct device *dev)
6675+{
6676+	struct platform_device *pdev = to_platform_device(dev);
6677+	struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
6678+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
6679+
6680+	if (ctrlpriv->caam_off_during_pm) {
6681+		u64 inp_addr;
6682+		int err;
6683+
6684+		/*
6685+		 * Check if the CAAM has been resetted checking the address of
6686+		 * the input ring
6687+		 */
6688+		inp_addr = rd_reg64(&jrpriv->rregs->inpring_base);
6689+		if (inp_addr != 0) {
6690+			/* JR still has some configuration */
6691+			if (inp_addr == jrpriv->state.inpbusaddr) {
6692+				/* JR has not been resetted */
6693+				err = caam_jr_restart_processing(dev);
6694+				if (err) {
6695+					dev_err(dev,
6696+						"Restart processing failed\n");
6697+					return err;
6698+				}
6699+
6700+				tasklet_enable(&jrpriv->irqtask);
6701+
6702+				clrsetbits_32(&jrpriv->rregs->rconfig_lo,
6703+					      JRCFG_IMSK, 0);
6704+
6705+				goto add_jr;
6706+			} else if (ctrlpriv->optee_en) {
6707+				/* JR has been used by OPTEE, reset it */
6708+				err = caam_reset_hw_jr(dev);
6709+				if (err) {
6710+					dev_err(dev, "Failed to reset JR\n");
6711+					return err;
6712+				}
6713+			} else {
6714+				/* No explanation, return error */
6715+				return -EIO;
6716+			}
6717+		}
6718+
6719+		caam_jr_reset_index(jrpriv);
6720+		caam_jr_init_hw(dev, jrpriv->state.inpbusaddr,
6721+				jrpriv->state.outbusaddr);
6722+
6723+		tasklet_enable(&jrpriv->irqtask);
6724+	} else if (device_may_wakeup(&pdev->dev)) {
6725+		disable_irq_wake(jrpriv->irq);
6726+	}
6727+
6728+add_jr:
6729+	spin_lock(&driver_data.jr_alloc_lock);
6730+	list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
6731+	spin_unlock(&driver_data.jr_alloc_lock);
6732+
6733+	if (jrpriv->hwrng)
6734+		jrpriv->hwrng = !caam_rng_init(dev->parent);
6735+
6736+	return 0;
6737+}
6738+
6739+SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume);
6740+#endif /* CONFIG_PM_SLEEP */
6741+
6742 static const struct of_device_id caam_jr_match[] = {
6743 	{
6744 		.compatible = "fsl,sec-v4.0-job-ring",
6745@@ -612,6 +940,9 @@ static struct platform_driver caam_jr_driver = {
6746 	.driver = {
6747 		.name = "caam_jr",
6748 		.of_match_table = caam_jr_match,
6749+#ifdef CONFIG_PM_SLEEP
6750+		.pm = &caam_jr_pm_ops,
6751+#endif
6752 	},
6753 	.probe       = caam_jr_probe,
6754 	.remove      = caam_jr_remove,
6755diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
6756index eab611530..768df0a6a 100644
6757--- a/drivers/crypto/caam/jr.h
6758+++ b/drivers/crypto/caam/jr.h
6759@@ -3,17 +3,38 @@
6760  * CAAM public-level include definitions for the JobR backend
6761  *
6762  * Copyright 2008-2011 Freescale Semiconductor, Inc.
6763+ * Copyright 2020 NXP
6764  */
6765
6766 #ifndef JR_H
6767 #define JR_H
6768
6769+#include <linux/completion.h>
6770+
6771+ /**
6772+  * struct jr_job_result - Job Ring result structure, used for requests
6773+  *                        that need to run and wait for their completion
6774+  *
6775+  * @error               : The result returned after request was executed
6776+  * @completion          : Structure used to maintain state for a "completion"
6777+  */
6778+struct jr_job_result {
6779+	int error;
6780+	struct completion completion;
6781+};
6782+
6783 /* Prototypes for backend-level services exposed to APIs */
6784+int caam_jr_driver_probed(void);
6785 struct device *caam_jr_alloc(void);
6786+struct device *caam_jridx_alloc(int idx);
6787 void caam_jr_free(struct device *rdev);
6788 int caam_jr_enqueue(struct device *dev, u32 *desc,
6789 		    void (*cbk)(struct device *dev, u32 *desc, u32 status,
6790 				void *areq),
6791 		    void *areq);
6792+int caam_jr_run_and_wait_for_completion(struct device *dev, u32 *desc,
6793+					void (*cbk)(struct device *dev,
6794+						    u32 *desc, u32 status,
6795+						    void *areq));
6796
6797 #endif /* JR_H */
6798diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
6799index ec53528d8..f0762c2e3 100644
6800--- a/drivers/crypto/caam/qi.c
6801+++ b/drivers/crypto/caam/qi.c
6802@@ -9,7 +9,7 @@
6803
6804 #include <linux/cpumask.h>
6805 #include <linux/kthread.h>
6806-#include <soc/fsl/qman.h>
6807+#include <linux/fsl_qman.h>
6808
6809 #include "debugfs.h"
6810 #include "regs.h"
6811@@ -99,23 +99,21 @@ static void *caam_iova_to_virt(struct iommu_domain *domain,
6812 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
6813 {
6814 	struct qm_fd fd;
6815-	dma_addr_t addr;
6816 	int ret;
6817 	int num_retries = 0;
6818
6819-	qm_fd_clear_fd(&fd);
6820-	qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
6821-
6822-	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
6823+	fd.cmd = 0;
6824+	fd.format = qm_fd_compound;
6825+	fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
6826+	fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
6827 			      DMA_BIDIRECTIONAL);
6828-	if (dma_mapping_error(qidev, addr)) {
6829+	if (dma_mapping_error(qidev, fd.addr)) {
6830 		dev_err(qidev, "DMA mapping error for QI enqueue request\n");
6831 		return -EIO;
6832 	}
6833-	qm_fd_addr_set64(&fd, addr);
6834
6835 	do {
6836-		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
6837+		ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
6838 		if (likely(!ret)) {
6839 			refcount_inc(&req->drv_ctx->refcnt);
6840 			return 0;
6841@@ -133,7 +131,7 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
6842 EXPORT_SYMBOL(caam_qi_enqueue);
6843
6844 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
6845-			   const union qm_mr_entry *msg)
6846+			   const struct qm_mr_entry *msg)
6847 {
6848 	const struct qm_fd *fd;
6849 	struct caam_drv_req *drv_req;
6850@@ -151,7 +149,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
6851
6852 	refcount_dec(&drv_req->drv_ctx->refcnt);
6853
6854-	if (qm_fd_get_format(fd) != qm_fd_compound) {
6855+	if (fd->format != qm_fd_compound) {
6856 		dev_err(qidev, "Non-compound FD from CAAM\n");
6857 		return;
6858 	}
6859@@ -182,20 +180,22 @@ static struct qman_fq *create_caam_req_fq(struct device *qidev,
6860 	req_fq->cb.fqs = NULL;
6861
6862 	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
6863-				QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
6864+				QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
6865+			     req_fq);
6866 	if (ret) {
6867 		dev_err(qidev, "Failed to create session req FQ\n");
6868 		goto create_req_fq_fail;
6869 	}
6870
6871-	memset(&opts, 0, sizeof(opts));
6872-	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
6873-				   QM_INITFQ_WE_CONTEXTB |
6874-				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
6875-	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
6876-	qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
6877-	opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
6878-	qm_fqd_context_a_set64(&opts.fqd, hwdesc);
6879+	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
6880+		       QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
6881+		       QM_INITFQ_WE_CGID;
6882+	opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
6883+	opts.fqd.dest.channel = qm_channel_caam;
6884+	opts.fqd.dest.wq = 2;
6885+	opts.fqd.context_b = qman_fq_fqid(rsp_fq);
6886+	opts.fqd.context_a.hi = upper_32_bits(hwdesc);
6887+	opts.fqd.context_a.lo = lower_32_bits(hwdesc);
6888 	opts.fqd.cgid = qipriv.cgr.cgrid;
6889
6890 	ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
6891@@ -209,7 +209,7 @@ static struct qman_fq *create_caam_req_fq(struct device *qidev,
6892 	return req_fq;
6893
6894 init_req_fq_fail:
6895-	qman_destroy_fq(req_fq);
6896+	qman_destroy_fq(req_fq, 0);
6897 create_req_fq_fail:
6898 	kfree(req_fq);
6899 	return ERR_PTR(ret);
6900@@ -277,7 +277,7 @@ static int kill_fq(struct device *qidev, struct qman_fq *fq)
6901 	if (ret)
6902 		dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
6903
6904-	qman_destroy_fq(fq);
6905+	qman_destroy_fq(fq, 0);
6906 	kfree(fq);
6907
6908 	return ret;
6909@@ -295,7 +295,7 @@ static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
6910 		if (ret)
6911 			return ret;
6912
6913-		if (!qm_mcr_np_get(&np, frm_cnt))
6914+		if (!np.frm_cnt)
6915 			break;
6916
6917 		msleep(20);
6918@@ -571,14 +571,13 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
6919 	const struct qm_fd *fd;
6920 	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
6921 	struct caam_drv_private *priv = dev_get_drvdata(qidev);
6922-	u32 status;
6923
6924 	if (caam_qi_napi_schedule(p, caam_napi))
6925 		return qman_cb_dqrr_stop;
6926
6927 	fd = &dqrr->fd;
6928
6929-	drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
6930+	drv_req = caam_iova_to_virt(priv->domain, fd->addr);
6931 	if (unlikely(!drv_req)) {
6932 		dev_err(qidev,
6933 			"Can't find original request for caam response\n");
6934@@ -587,19 +586,18 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
6935
6936 	refcount_dec(&drv_req->drv_ctx->refcnt);
6937
6938-	status = be32_to_cpu(fd->status);
6939-	if (unlikely(status)) {
6940-		u32 ssrc = status & JRSTA_SSRC_MASK;
6941-		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
6942+	if (unlikely(fd->status)) {
6943+		u32 ssrc = fd->status & JRSTA_SSRC_MASK;
6944+		u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
6945
6946 		if (ssrc != JRSTA_SSRC_CCB_ERROR ||
6947 		    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
6948 			dev_err_ratelimited(qidev,
6949 					    "Error: %#x in CAAM response FD\n",
6950-					    status);
6951+					    fd->status);
6952 	}
6953
6954-	if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
6955+	if (unlikely(fd->format != qm_fd_compound)) {
6956 		dev_err(qidev, "Non-compound FD from CAAM\n");
6957 		return qman_cb_dqrr_consume;
6958 	}
6959@@ -607,7 +605,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
6960 	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
6961 			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
6962
6963-	drv_req->cbk(drv_req, status);
6964+	drv_req->cbk(drv_req, fd->status);
6965 	return qman_cb_dqrr_consume;
6966 }
6967
6968@@ -631,17 +629,18 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
6969 		return -ENODEV;
6970 	}
6971
6972-	memset(&opts, 0, sizeof(opts));
6973-	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
6974-				   QM_INITFQ_WE_CONTEXTB |
6975-				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
6976-	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
6977-				       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
6978-	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
6979+	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
6980+		QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
6981+		QM_INITFQ_WE_CGID;
6982+	opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
6983+			   QM_FQCTRL_CGE;
6984+	opts.fqd.dest.channel = qman_affine_channel(cpu);
6985+	opts.fqd.dest.wq = 3;
6986 	opts.fqd.cgid = qipriv.cgr.cgrid;
6987 	opts.fqd.context_a.stashing.exclusive =	QM_STASHING_EXCL_CTX |
6988 						QM_STASHING_EXCL_DATA;
6989-	qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
6990+	opts.fqd.context_a.stashing.data_cl = 1;
6991+	opts.fqd.context_a.stashing.context_cl = 1;
6992
6993 	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
6994 	if (ret) {
6995@@ -671,8 +670,7 @@ static int init_cgr(struct device *qidev)
6996
6997 	qipriv.cgr.cb = cgr_cb;
6998 	memset(&opts, 0, sizeof(opts));
6999-	opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
7000-				   QM_CGR_WE_MODE);
7001+	opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
7002 	opts.cgr.cscn_en = QM_CGR_EN;
7003 	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
7004 	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
7005diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
7006index 5894f16f8..aa68e37c8 100644
7007--- a/drivers/crypto/caam/qi.h
7008+++ b/drivers/crypto/caam/qi.h
7009@@ -9,7 +9,7 @@
7010 #ifndef __QI_H__
7011 #define __QI_H__
7012
7013-#include <soc/fsl/qman.h>
7014+#include <linux/fsl_qman.h>
7015 #include "compat.h"
7016 #include "desc.h"
7017 #include "desc_constr.h"
7018diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
7019index 3738625c0..70ac8ad90 100644
7020--- a/drivers/crypto/caam/regs.h
7021+++ b/drivers/crypto/caam/regs.h
7022@@ -385,6 +385,12 @@ struct version_regs {
7023 #define CHA_VER_VID_MD_LP512	0x1ull
7024 #define CHA_VER_VID_MD_HP	0x2ull
7025
7026+/*
7027+ * caam_perfmon - Performance Monitor/Secure Memory Status/
7028+ *                CAAM Global Status/Component Version IDs
7029+ *
7030+ * Spans f00-fff wherever instantiated
7031+ */
7032 struct sec_vid {
7033 	u16 ip_id;
7034 	u8 maj_rev;
7035@@ -415,17 +421,22 @@ struct caam_perfmon {
7036 #define CTPR_MS_PG_SZ_SHIFT	4
7037 	u32 comp_parms_ms;	/* CTPR - Compile Parameters Register	*/
7038 	u32 comp_parms_ls;	/* CTPR - Compile Parameters Register	*/
7039-	u64 rsvd1[2];
7040+	/* Secure Memory State Visibility */
7041+	u32 rsvd1;
7042+	u32 smstatus;	/* Secure memory status */
7043+	u32 rsvd2;
7044+	u32 smpartown;	/* Secure memory partition owner */
7045
7046 	/* CAAM Global Status					fc0-fdf */
7047 	u64 faultaddr;	/* FAR  - Fault Address		*/
7048 	u32 faultliodn;	/* FALR - Fault Address LIODN	*/
7049 	u32 faultdetail;	/* FADR - Fault Addr Detail	*/
7050-	u32 rsvd2;
7051 #define CSTA_PLEND		BIT(10)
7052 #define CSTA_ALT_PLEND		BIT(18)
7053+	u32 rsvd3;
7054 	u32 status;		/* CSTA - CAAM Status */
7055-	u64 rsvd3;
7056+	u32 smpart;		/* Secure Memory Partition Parameters */
7057+	u32 smvid;		/* Secure Memory Version ID */
7058
7059 	/* Component Instantiation Parameters			fe0-fff */
7060 	u32 rtic_id;		/* RVID - RTIC Version ID	*/
7061@@ -444,6 +455,62 @@ struct caam_perfmon {
7062 	u32 caam_id_ls;		/* CAAMVID - CAAM Version ID LS	*/
7063 };
7064
7065+#define SMSTATUS_PART_SHIFT	28
7066+#define SMSTATUS_PART_MASK	(0xf << SMSTATUS_PART_SHIFT)
7067+#define SMSTATUS_PAGE_SHIFT	16
7068+#define SMSTATUS_PAGE_MASK	(0x7ff << SMSTATUS_PAGE_SHIFT)
7069+#define SMSTATUS_MID_SHIFT	8
7070+#define SMSTATUS_MID_MASK	(0x3f << SMSTATUS_MID_SHIFT)
7071+#define SMSTATUS_ACCERR_SHIFT	4
7072+#define SMSTATUS_ACCERR_MASK	(0xf << SMSTATUS_ACCERR_SHIFT)
7073+#define SMSTATUS_ACCERR_NONE	0
7074+#define SMSTATUS_ACCERR_ALLOC	1	/* Page not allocated */
7075+#define SMSTATUS_ACCESS_ID	2	/* Not granted by ID */
7076+#define SMSTATUS_ACCESS_WRITE	3	/* Writes not allowed */
7077+#define SMSTATUS_ACCESS_READ	4	/* Reads not allowed */
7078+#define SMSTATUS_ACCESS_NONKEY	6	/* Non-key reads not allowed */
7079+#define SMSTATUS_ACCESS_BLOB	9	/* Blob access not allowed */
7080+#define SMSTATUS_ACCESS_DESCB	10	/* Descriptor Blob access spans pages */
7081+#define SMSTATUS_ACCESS_NON_SM	11	/* Outside Secure Memory range */
7082+#define SMSTATUS_ACCESS_XPAGE	12	/* Access crosses pages */
7083+#define SMSTATUS_ACCESS_INITPG	13	/* Page still initializing */
7084+#define SMSTATUS_STATE_SHIFT	0
7085+#define SMSTATUS_STATE_MASK	(0xf << SMSTATUS_STATE_SHIFT)
7086+#define SMSTATUS_STATE_RESET	0
7087+#define SMSTATUS_STATE_INIT	1
7088+#define SMSTATUS_STATE_NORMAL	2
7089+#define SMSTATUS_STATE_FAIL	3
7090+
7091+/* up to 15 rings, 2 bits shifted by ring number */
7092+#define SMPARTOWN_RING_SHIFT	2
7093+#define SMPARTOWN_RING_MASK	3
7094+#define SMPARTOWN_AVAILABLE	0
7095+#define SMPARTOWN_NOEXIST	1
7096+#define SMPARTOWN_UNAVAILABLE	2
7097+#define SMPARTOWN_OURS		3
7098+
7099+/* Maximum number of pages possible */
7100+#define SMPART_MAX_NUMPG_SHIFT	16
7101+#define SMPART_MAX_NUMPG_MASK	(0x3f << SMPART_MAX_NUMPG_SHIFT)
7102+
7103+/* Maximum partition number */
7104+#define SMPART_MAX_PNUM_SHIFT	12
7105+#define SMPART_MAX_PNUM_MASK	(0xf << SMPART_MAX_PNUM_SHIFT)
7106+
7107+/* Highest possible page number */
7108+#define SMPART_MAX_PG_SHIFT	0
7109+#define SMPART_MAX_PG_MASK	(0x3f << SMPART_MAX_PG_SHIFT)
7110+
7111+/* Max size of a page */
7112+#define SMVID_PG_SIZE_SHIFT	16
7113+#define SMVID_PG_SIZE_MASK	(0x7 << SMVID_PG_SIZE_SHIFT)
7114+
7115+/* Major/Minor Version ID */
7116+#define SMVID_MAJ_VERS_SHIFT	8
7117+#define SMVID_MAJ_VERS		(0xf << SMVID_MAJ_VERS_SHIFT)
7118+#define SMVID_MIN_VERS_SHIFT	0
7119+#define SMVID_MIN_VERS		(0xf << SMVID_MIN_VERS_SHIFT)
7120+
7121 /* LIODN programming for DMA configuration */
7122 #define MSTRID_LOCK_LIODN	0x80000000
7123 #define MSTRID_LOCK_MAKETRUSTED	0x00010000	/* only for JR masterid */
7124@@ -454,12 +521,6 @@ struct masterid {
7125 	u32 liodn_ls;	/* LIODN for non-sequence and seq access */
7126 };
7127
7128-/* Partition ID for DMA configuration */
7129-struct partid {
7130-	u32 rsvd1;
7131-	u32 pidr;	/* partition ID, DECO */
7132-};
7133-
7134 /* RNGB test mode (replicated twice in some configurations) */
7135 /* Padded out to 0x100 */
7136 struct rngtst {
7137@@ -518,6 +579,8 @@ struct rng4tst {
7138 #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
7139 #define RTSDCTL_ENT_DLY_MIN 3200
7140 #define RTSDCTL_ENT_DLY_MAX 12800
7141+#define RTSDCTL_SAMP_SIZE_MASK 0xffff
7142+#define RTSDCTL_SAMP_SIZE_VAL 512
7143 	u32 rtsdctl;		/* seed control register */
7144 	union {
7145 		u32 rtsblim;	/* PRGM=1: sparse bit limit register */
7146@@ -529,7 +592,15 @@ struct rng4tst {
7147 		u32 rtfrqmax;	/* PRGM=1: freq. count max. limit register */
7148 		u32 rtfrqcnt;	/* PRGM=0: freq. count register */
7149 	};
7150-	u32 rsvd1[40];
7151+	union {
7152+		u32 rtscmc;	/* statistical check run monobit count */
7153+		u32 rtscml;	/* statistical check run monobit limit */
7154+	};
7155+	union {
7156+		u32 rtscrc[6];	/* statistical check run length count */
7157+		u32 rtscrl[6];	/* statistical check run length limit */
7158+	};
7159+	u32 rsvd1[33];
7160 #define RDSTA_SKVT 0x80000000
7161 #define RDSTA_SKVN 0x40000000
7162 #define RDSTA_PR0 BIT(4)
7163@@ -575,8 +646,7 @@ struct caam_ctrl {
7164 	u32 deco_rsr;			/* DECORSR - Deco Request Source */
7165 	u32 rsvd11;
7166 	u32 deco_rq;			/* DECORR - DECO Request */
7167-	struct partid deco_mid[5];	/* DECOxLIODNR - 1 per DECO */
7168-	u32 rsvd5[22];
7169+	struct masterid deco_mid[16];	/* DECOxLIODNR - 1 per DECO */
7170
7171 	/* DECO Availability/Reset Section			120-3ff */
7172 	u32 deco_avail;		/* DAR - DECO availability */
7173@@ -650,6 +720,35 @@ struct caam_ctrl {
7174 #define JRSTART_JR2_START       0x00000004 /* Start Job ring 2 */
7175 #define JRSTART_JR3_START       0x00000008 /* Start Job ring 3 */
7176
7177+/* Secure Memory Configuration - if you have it */
7178+/* Secure Memory Register Offset from JR Base Reg*/
7179+#define SM_V1_OFFSET 0x0f4
7180+#define SM_V2_OFFSET 0xa00
7181+
7182+/* Minimum SM Version ID requiring v2 SM register mapping */
7183+#define SMVID_V2 0x20105
7184+
7185+struct caam_secure_mem_v1 {
7186+	u32 sm_cmd;	/* SMCJRx - Secure memory command */
7187+	u32 rsvd1;
7188+	u32 sm_status;	/* SMCSJRx - Secure memory status */
7189+	u32 rsvd2;
7190+
7191+	u32 sm_perm;	/* SMAPJRx - Secure memory access perms */
7192+	u32 sm_group2;	/* SMAP2JRx - Secure memory access group 2 */
7193+	u32 sm_group1;	/* SMAP1JRx - Secure memory access group 1 */
7194+};
7195+
7196+struct caam_secure_mem_v2 {
7197+	u32 sm_perm;	/* SMAPJRx - Secure memory access perms */
7198+	u32 sm_group2;	/* SMAP2JRx - Secure memory access group 2 */
7199+	u32 sm_group1;	/* SMAP1JRx - Secure memory access group 1 */
7200+	u32 rsvd1[118];
7201+	u32 sm_cmd;	/* SMCJRx - Secure memory command */
7202+	u32 rsvd2;
7203+	u32 sm_status;	/* SMCSJRx - Secure memory status */
7204+};
7205+
7206 /*
7207  * caam_job_ring - direct job ring setup
7208  * 1-4 possible per instantiation, base + 1000/2000/3000/4000
7209@@ -820,6 +919,62 @@ struct caam_job_ring {
7210
7211 #define JRCR_RESET                  0x01
7212
7213+/* secure memory command */
7214+#define SMC_PAGE_SHIFT	16
7215+#define SMC_PAGE_MASK	(0xffff << SMC_PAGE_SHIFT)
7216+#define SMC_PART_SHIFT	8
7217+#define SMC_PART_MASK	(0x0f << SMC_PART_SHIFT)
7218+#define SMC_CMD_SHIFT	0
7219+#define SMC_CMD_MASK	(0x0f << SMC_CMD_SHIFT)
7220+
7221+#define SMC_CMD_ALLOC_PAGE	0x01	/* allocate page to this partition */
7222+#define SMC_CMD_DEALLOC_PAGE	0x02	/* deallocate page from partition */
7223+#define SMC_CMD_DEALLOC_PART	0x03	/* deallocate partition */
7224+#define SMC_CMD_PAGE_INQUIRY	0x05	/* find partition associate with page */
7225+
7226+/* secure memory (command) status */
7227+#define SMCS_PAGE_SHIFT		16
7228+#define SMCS_PAGE_MASK		(0x0fff << SMCS_PAGE_SHIFT)
7229+#define SMCS_CMDERR_SHIFT	14
7230+#define SMCS_CMDERR_MASK	(3 << SMCS_CMDERR_SHIFT)
7231+#define SMCS_ALCERR_SHIFT	12
7232+#define SMCS_ALCERR_MASK	(3 << SMCS_ALCERR_SHIFT)
7233+#define SMCS_PGOWN_SHIFT	6
7234+#define SMCS_PGWON_MASK		(3 << SMCS_PGOWN_SHIFT)
7235+#define SMCS_PART_SHIFT		0
7236+#define SMCS_PART_MASK		(0xf << SMCS_PART_SHIFT)
7237+
7238+#define SMCS_CMDERR_NONE	0
7239+#define SMCS_CMDERR_INCOMP	1	/* Command not yet complete */
7240+#define SMCS_CMDERR_SECFAIL	2	/* Security failure occurred */
7241+#define SMCS_CMDERR_OVERFLOW	3	/* Command overflow */
7242+
7243+#define SMCS_ALCERR_NONE	0
7244+#define SMCS_ALCERR_PSPERR	1	/* Partion marked PSP (dealloc only) */
7245+#define SMCS_ALCERR_PAGEAVAIL	2	/* Page not available */
7246+#define SMCS_ALCERR_PARTOWN	3	/* Partition ownership error */
7247+
7248+#define SMCS_PGOWN_AVAIL	0	/* Page is available */
7249+#define SMCS_PGOWN_NOEXIST	1	/* Page initializing or nonexistent */
7250+#define SMCS_PGOWN_NOOWN	2	/* Page owned by another processor */
7251+#define SMCS_PGOWN_OWNED	3	/* Page belongs to this processor */
7252+
7253+/* secure memory access permissions */
7254+#define SMCS_PERM_KEYMOD_SHIFT	16
7255+#define SMCA_PERM_KEYMOD_MASK	(0xff << SMCS_PERM_KEYMOD_SHIFT)
7256+#define SMCA_PERM_CSP_ZERO	0x8000	/* Zero when deallocated or released */
7257+#define SMCA_PERM_PSP_LOCK	0x4000	/* Part./pages can't be deallocated */
7258+#define SMCA_PERM_PERM_LOCK	0x2000	/* Lock permissions */
7259+#define SMCA_PERM_GRP_LOCK	0x1000	/* Lock access groups */
7260+#define SMCA_PERM_RINGID_SHIFT	10
7261+#define SMCA_PERM_RINGID_MASK	(3 << SMCA_PERM_RINGID_SHIFT)
7262+#define SMCA_PERM_G2_BLOB	0x0080	/* Group 2 blob import/export */
7263+#define SMCA_PERM_G2_WRITE	0x0020	/* Group 2 write */
7264+#define SMCA_PERM_G2_READ	0x0010	/* Group 2 read */
7265+#define SMCA_PERM_G1_BLOB	0x0008	/* Group 1... */
7266+#define SMCA_PERM_G1_WRITE	0x0002
7267+#define SMCA_PERM_G1_READ	0x0001
7268+
7269 /*
7270  * caam_assurance - Assurance Controller View
7271  * base + 0x6000 padded out to 0x1000
7272diff --git a/drivers/crypto/caam/secvio.c b/drivers/crypto/caam/secvio.c
7273new file mode 100644
7274index 000000000..d6ebe0af4
7275--- /dev/null
7276+++ b/drivers/crypto/caam/secvio.c
7277@@ -0,0 +1,341 @@
7278+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
7279+/*
7280+ * SNVS Security Violation Handler
7281+ *
7282+ * Copyright 2012-2016 Freescale Semiconductor, Inc.
7283+ * Copyright 2017-2019 NXP
7284+ */
7285+
7286+#include "compat.h"
7287+#include "secvio.h"
7288+#include "regs.h"
7289+#include "intern.h"
7290+#include <linux/of.h>
7291+#include <linux/of_irq.h>
7292+#include <linux/of_address.h>
7293+
7294+/* The driver is matched with node caam_snvs to get regmap
7295+ * It will then retrieve interruption and tamper alarm configuration from
7296+ * node caam-secvio searching for the compat string "fsl,imx6q-caam-secvio"
7297+ */
7298+#define DRIVER_NAME "caam-snvs"
7299+
7300+/*
7301+ * These names are associated with each violation handler.
7302+ * The source names were taken from MX6, and are based on recommendations
7303+ * for most common SoCs.
7304+ */
7305+static const u8 *violation_src_name[] = {
7306+	"CAAM Internal Security Violation",
7307+	"JTAG Alarm",
7308+	"Watchdog",
7309+	"(reserved)",
7310+	"External Boot",
7311+	"External Tamper Detect",
7312+};
7313+
7314+/* These names help describe security monitor state for the console */
7315+static const u8 *snvs_ssm_state_name[] = {
7316+	"init",
7317+	"hard fail",
7318+	"(undef:2)",
7319+	"soft fail",
7320+	"(undef:4)",
7321+	"(undef:5)",
7322+	"(undef:6)",
7323+	"(undef:7)",
7324+	"transition",
7325+	"check",
7326+	"(undef:10)",
7327+	"non-secure",
7328+	"(undef:12)",
7329+	"trusted",
7330+	"(undef:14)",
7331+	"secure",
7332+};
7333+
7334+/* Top-level security violation interrupt */
7335+static irqreturn_t snvs_secvio_interrupt(int irq, void *snvsdev)
7336+{
7337+	struct device *dev = snvsdev;
7338+	struct snvs_secvio_drv_private *svpriv = dev_get_drvdata(dev);
7339+
7340+	clk_enable(svpriv->clk);
7341+	/* Check the HP secvio status register */
7342+	svpriv->irqcause = rd_reg32(&svpriv->svregs->hp.secvio_status) &
7343+				    HP_SECVIOST_SECVIOMASK;
7344+
7345+	if (!svpriv->irqcause) {
7346+		clk_disable(svpriv->clk);
7347+		return IRQ_NONE;
7348+	}
7349+
7350+	/* Now ACK cause */
7351+	clrsetbits_32(&svpriv->svregs->hp.secvio_status, 0, svpriv->irqcause);
7352+
7353+	/* And run deferred service */
7354+	preempt_disable();
7355+	tasklet_schedule(&svpriv->irqtask[smp_processor_id()]);
7356+	preempt_enable();
7357+
7358+	clk_disable(svpriv->clk);
7359+
7360+	return IRQ_HANDLED;
7361+}
7362+
7363+/* Deferred service handler. Tasklet arg is simply the SNVS dev */
7364+static void snvs_secvio_dispatch(unsigned long indev)
7365+{
7366+	struct device *dev = (struct device *)indev;
7367+	struct snvs_secvio_drv_private *svpriv = dev_get_drvdata(dev);
7368+	unsigned long flags;
7369+	int i;
7370+
7371+
7372+	/* Look through stored causes, call each handler if exists */
7373+	for (i = 0; i < MAX_SECVIO_SOURCES; i++)
7374+		if (svpriv->irqcause & (1 << i)) {
7375+			spin_lock_irqsave(&svpriv->svlock, flags);
7376+			svpriv->intsrc[i].handler(dev, i,
7377+						  svpriv->intsrc[i].ext);
7378+			spin_unlock_irqrestore(&svpriv->svlock, flags);
7379+		};
7380+
7381+	/* Re-enable now-serviced interrupts */
7382+	clrsetbits_32(&svpriv->svregs->hp.secvio_intcfg, 0, svpriv->irqcause);
7383+}
7384+
7385+/*
7386+ * Default cause handler, used in lieu of an application-defined handler.
7387+ * All it does at this time is print a console message. It could force a halt.
7388+ */
7389+static void snvs_secvio_default(struct device *dev, u32 cause, void *ext)
7390+{
7391+	struct snvs_secvio_drv_private *svpriv = dev_get_drvdata(dev);
7392+
7393+	dev_err(dev, "Unhandled Security Violation Interrupt %d = %s\n",
7394+		cause, svpriv->intsrc[cause].intname);
7395+}
7396+
7397+/*
7398+ * Install an application-defined handler for a specified cause
7399+ * Arguments:
7400+ * - dev        points to SNVS-owning device
7401+ * - cause      interrupt source cause
7402+ * - handler    application-defined handler, gets called with dev
7403+ *              source cause, and locally-defined handler argument
7404+ * - cause_description   points to a string to override the default cause
7405+ *                       name, this can be used as an alternate for error
7406+ *                       messages and such. If left NULL, the default
7407+ *                       description string is used.
7408+ * - ext        pointer to any extra data needed by the handler.
7409+ */
7410+int snvs_secvio_install_handler(struct device *dev, enum secvio_cause cause,
7411+				void (*handler)(struct device *dev, u32 cause,
7412+						void *ext),
7413+				u8 *cause_description, void *ext)
7414+{
7415+	unsigned long flags;
7416+	struct snvs_secvio_drv_private *svpriv;
7417+
7418+	svpriv = dev_get_drvdata(dev);
7419+
7420+	if ((handler == NULL) || (cause > SECVIO_CAUSE_SOURCE_5))
7421+		return -EINVAL;
7422+
7423+	spin_lock_irqsave(&svpriv->svlock, flags);
7424+	svpriv->intsrc[cause].handler = handler;
7425+	if (cause_description != NULL)
7426+		svpriv->intsrc[cause].intname = cause_description;
7427+	if (ext != NULL)
7428+		svpriv->intsrc[cause].ext = ext;
7429+	spin_unlock_irqrestore(&svpriv->svlock, flags);
7430+
7431+	return 0;
7432+}
7433+EXPORT_SYMBOL(snvs_secvio_install_handler);
7434+
7435+/*
7436+ * Remove an application-defined handler for a specified cause (and, by
7437+ * implication, restore the "default".
7438+ * Arguments:
7439+ * - dev	points to SNVS-owning device
7440+ * - cause	interrupt source cause
7441+ */
7442+int snvs_secvio_remove_handler(struct device *dev, enum secvio_cause cause)
7443+{
7444+	unsigned long flags;
7445+	struct snvs_secvio_drv_private *svpriv;
7446+
7447+	svpriv = dev_get_drvdata(dev);
7448+
7449+	if (cause > SECVIO_CAUSE_SOURCE_5)
7450+		return -EINVAL;
7451+
7452+	spin_lock_irqsave(&svpriv->svlock, flags);
7453+	svpriv->intsrc[cause].intname = violation_src_name[cause];
7454+	svpriv->intsrc[cause].handler = snvs_secvio_default;
7455+	svpriv->intsrc[cause].ext = NULL;
7456+	spin_unlock_irqrestore(&svpriv->svlock, flags);
7457+	return 0;
7458+}
7459+EXPORT_SYMBOL(snvs_secvio_remove_handler);
7460+
7461+static int snvs_secvio_remove(struct platform_device *pdev)
7462+{
7463+	struct device *svdev;
7464+	struct snvs_secvio_drv_private *svpriv;
7465+	int i;
7466+
7467+	svdev = &pdev->dev;
7468+	svpriv = dev_get_drvdata(svdev);
7469+
7470+	clk_enable(svpriv->clk);
7471+	/* Set all sources to nonfatal */
7472+	wr_reg32(&svpriv->svregs->hp.secvio_intcfg, 0);
7473+
7474+	/* Remove tasklets and release interrupt */
7475+	for_each_possible_cpu(i)
7476+		tasklet_kill(&svpriv->irqtask[i]);
7477+
7478+	clk_disable_unprepare(svpriv->clk);
7479+	free_irq(svpriv->irq, svdev);
7480+	iounmap(svpriv->svregs);
7481+	kfree(svpriv);
7482+
7483+	return 0;
7484+}
7485+
7486+static int snvs_secvio_probe(struct platform_device *pdev)
7487+{
7488+	struct device *svdev;
7489+	struct snvs_secvio_drv_private *svpriv;
7490+	struct device_node *np, *npirq;
7491+	struct snvs_full __iomem *snvsregs;
7492+	int i, error;
7493+	u32 hpstate;
7494+	const void *jtd, *wtd, *itd, *etd;
7495+	u32 td_en;
7496+
7497+	svpriv = kzalloc(sizeof(struct snvs_secvio_drv_private), GFP_KERNEL);
7498+	if (!svpriv)
7499+		return -ENOMEM;
7500+
7501+	svdev = &pdev->dev;
7502+	dev_set_drvdata(svdev, svpriv);
7503+	svpriv->pdev = pdev;
7504+	spin_lock_init(&svpriv->svlock);
7505+	np = pdev->dev.of_node;
7506+
7507+	npirq = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio");
7508+	if (!npirq) {
7509+		dev_err(svdev, "can't find secvio node\n");
7510+		kfree(svpriv);
7511+		return -EINVAL;
7512+	}
7513+	svpriv->irq = irq_of_parse_and_map(npirq, 0);
7514+	if (svpriv->irq <= 0) {
7515+		dev_err(svdev, "can't identify secvio interrupt\n");
7516+		kfree(svpriv);
7517+		return -EINVAL;
7518+	}
7519+
7520+	jtd = of_get_property(npirq, "jtag-tamper", NULL);
7521+	wtd = of_get_property(npirq, "watchdog-tamper", NULL);
7522+	itd = of_get_property(npirq, "internal-boot-tamper", NULL);
7523+	etd = of_get_property(npirq, "external-pin-tamper", NULL);
7524+	if (!jtd | !wtd | !itd | !etd ) {
7525+		dev_err(svdev, "can't identify all tamper alarm configuration\n");
7526+		kfree(svpriv);
7527+		return -EINVAL;
7528+	}
7529+
7530+	/*
7531+	 * Configure all sources  according to device tree property.
7532+	 * If the property is enabled then the source is ser as
7533+	 * fatal violations except LP section,
7534+	 * source #5 (typically used as an external tamper detect), and
7535+	 * source #3 (typically unused). Whenever the transition to
7536+	 * secure mode has occurred, these will now be "fatal" violations
7537+	 */
7538+	td_en = HP_SECVIO_INTEN_SRC0;
7539+	if (!strcmp(jtd, "enabled"))
7540+		td_en |= HP_SECVIO_INTEN_SRC1;
7541+	if (!strcmp(wtd, "enabled"))
7542+		td_en |= HP_SECVIO_INTEN_SRC2;
7543+	if (!strcmp(itd, "enabled"))
7544+		td_en |= HP_SECVIO_INTEN_SRC4;
7545+	if (!strcmp(etd, "enabled"))
7546+		td_en |= HP_SECVIO_INTEN_SRC5;
7547+
7548+	snvsregs = of_iomap(np, 0);
7549+	if (!snvsregs) {
7550+		dev_err(svdev, "register mapping failed\n");
7551+		return -ENOMEM;
7552+	}
7553+	svpriv->svregs = (struct snvs_full __force *)snvsregs;
7554+
7555+	svpriv->clk = devm_clk_get_optional(&pdev->dev, "ipg");
7556+	if (IS_ERR(svpriv->clk))
7557+		return PTR_ERR(svpriv->clk);
7558+
7559+	clk_prepare_enable(svpriv->clk);
7560+
7561+	/* Write the Secvio Enable Config the SVCR */
7562+	wr_reg32(&svpriv->svregs->hp.secvio_ctl, td_en);
7563+	wr_reg32(&svpriv->svregs->hp.secvio_intcfg, td_en);
7564+
7565+	 /* Device data set up. Now init interrupt source descriptions */
7566+	for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
7567+		svpriv->intsrc[i].intname = violation_src_name[i];
7568+		svpriv->intsrc[i].handler = snvs_secvio_default;
7569+	}
7570+	/* Connect main handler */
7571+	for_each_possible_cpu(i)
7572+		tasklet_init(&svpriv->irqtask[i], snvs_secvio_dispatch,
7573+			     (unsigned long)svdev);
7574+
7575+	error = request_irq(svpriv->irq, snvs_secvio_interrupt,
7576+			    IRQF_SHARED, DRIVER_NAME, svdev);
7577+	if (error) {
7578+		dev_err(svdev, "can't connect secvio interrupt\n");
7579+		irq_dispose_mapping(svpriv->irq);
7580+		svpriv->irq = 0;
7581+		iounmap(svpriv->svregs);
7582+		kfree(svpriv);
7583+		return -EINVAL;
7584+	}
7585+
7586+	hpstate = (rd_reg32(&svpriv->svregs->hp.status) &
7587+			    HP_STATUS_SSM_ST_MASK) >> HP_STATUS_SSM_ST_SHIFT;
7588+	dev_info(svdev, "violation handlers armed - %s state\n",
7589+		 snvs_ssm_state_name[hpstate]);
7590+
7591+	clk_disable(svpriv->clk);
7592+
7593+	return 0;
7594+}
7595+
7596+static struct of_device_id snvs_secvio_match[] = {
7597+	{
7598+		.compatible = "fsl,imx6q-caam-snvs",
7599+	},
7600+	{},
7601+};
7602+MODULE_DEVICE_TABLE(of, snvs_secvio_match);
7603+
7604+static struct platform_driver snvs_secvio_driver = {
7605+	.driver = {
7606+		.name = DRIVER_NAME,
7607+		.owner = THIS_MODULE,
7608+		.of_match_table = snvs_secvio_match,
7609+	},
7610+	.probe       = snvs_secvio_probe,
7611+	.remove      = snvs_secvio_remove,
7612+};
7613+
7614+module_platform_driver(snvs_secvio_driver);
7615+
7616+MODULE_LICENSE("Dual BSD/GPL");
7617+MODULE_DESCRIPTION("FSL SNVS Security Violation Handler");
7618+MODULE_AUTHOR("Freescale Semiconductor - MCU");
7619diff --git a/drivers/crypto/caam/secvio.h b/drivers/crypto/caam/secvio.h
7620new file mode 100644
7621index 000000000..a7a245a3d
7622--- /dev/null
7623+++ b/drivers/crypto/caam/secvio.h
7624@@ -0,0 +1,69 @@
7625+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
7626+/*
7627+ * CAAM Security Violation Handler
7628+ *
7629+ * Copyright 2012-2015 Freescale Semiconductor, Inc.
7630+ * Copyright 2016-2019 NXP
7631+ */
7632+
7633+#ifndef SECVIO_H
7634+#define SECVIO_H
7635+
7636+#include "snvsregs.h"
7637+
7638+
7639+/*
7640+ * Defines the published interfaces to install/remove application-specified
7641+ * handlers for catching violations
7642+ */
7643+
7644+#define MAX_SECVIO_SOURCES 6
7645+
7646+/* these are the untranslated causes */
7647+enum secvio_cause {
7648+	SECVIO_CAUSE_SOURCE_0,
7649+	SECVIO_CAUSE_SOURCE_1,
7650+	SECVIO_CAUSE_SOURCE_2,
7651+	SECVIO_CAUSE_SOURCE_3,
7652+	SECVIO_CAUSE_SOURCE_4,
7653+	SECVIO_CAUSE_SOURCE_5
7654+};
7655+
7656+/* These are common "recommended" cause definitions for most devices */
7657+#define SECVIO_CAUSE_CAAM_VIOLATION	SECVIO_CAUSE_SOURCE_0
7658+#define SECVIO_CAUSE_JTAG_ALARM		SECVIO_CAUSE_SOURCE_1
7659+#define SECVIO_CAUSE_WATCHDOG		SECVIO_CAUSE_SOURCE_2
7660+#define SECVIO_CAUSE_EXTERNAL_BOOT	SECVIO_CAUSE_SOURCE_4
7661+#define SECVIO_CAUSE_TAMPER_DETECT	SECVIO_CAUSE_SOURCE_5
7662+
7663+int snvs_secvio_install_handler(struct device *dev, enum secvio_cause cause,
7664+				void (*handler)(struct device *dev, u32 cause,
7665+						void *ext),
7666+				u8 *cause_description, void *ext);
7667+int snvs_secvio_remove_handler(struct device *dev, enum  secvio_cause cause);
7668+
7669+/*
7670+ * Private data definitions for the secvio "driver"
7671+ */
7672+
7673+struct secvio_int_src {
7674+	const u8 *intname;	/* Points to a descriptive name for source */
7675+	void *ext;		/* Extended data to pass to the handler */
7676+	void (*handler)(struct device *dev, u32 cause, void *ext);
7677+};
7678+
7679+struct snvs_secvio_drv_private {
7680+	struct platform_device *pdev;
7681+	spinlock_t svlock ____cacheline_aligned;
7682+	struct tasklet_struct irqtask[NR_CPUS];
7683+	struct snvs_full __iomem *svregs;	/* both HP and LP domains */
7684+	struct clk *clk;
7685+	int irq;
7686+	u32 irqcause; /* stashed cause of violation interrupt */
7687+
7688+	/* Registered handlers for each violation */
7689+	struct secvio_int_src intsrc[MAX_SECVIO_SOURCES];
7690+
7691+};
7692+
7693+#endif /* SECVIO_H */
7694diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
7695index d56cc7efb..9465b5773 100644
7696--- a/drivers/crypto/caam/sg_sw_qm.h
7697+++ b/drivers/crypto/caam/sg_sw_qm.h
7698@@ -7,46 +7,61 @@
7699 #ifndef __SG_SW_QM_H
7700 #define __SG_SW_QM_H
7701
7702-#include <soc/fsl/qman.h>
7703+#include <linux/fsl_qman.h>
7704 #include "regs.h"
7705
7706+static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
7707+{
7708+	dma_addr_t addr = qm_sg_ptr->opaque;
7709+
7710+	qm_sg_ptr->opaque = cpu_to_caam64(addr);
7711+	qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
7712+}
7713+
7714 static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
7715-				  u16 offset)
7716+				  u32 len, u16 offset)
7717 {
7718-	qm_sg_entry_set64(qm_sg_ptr, dma);
7719+	qm_sg_ptr->addr = dma;
7720+	qm_sg_ptr->length = len;
7721 	qm_sg_ptr->__reserved2 = 0;
7722 	qm_sg_ptr->bpid = 0;
7723-	qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
7724+	qm_sg_ptr->__reserved3 = 0;
7725+	qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
7726+
7727+	cpu_to_hw_sg(qm_sg_ptr);
7728 }
7729
7730 static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
7731 				    dma_addr_t dma, u32 len, u16 offset)
7732 {
7733-	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
7734-	qm_sg_entry_set_len(qm_sg_ptr, len);
7735+	qm_sg_ptr->extension = 0;
7736+	qm_sg_ptr->final = 0;
7737+	__dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
7738 }
7739
7740 static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
7741 					 dma_addr_t dma, u32 len, u16 offset)
7742 {
7743-	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
7744-	qm_sg_entry_set_f(qm_sg_ptr, len);
7745+	qm_sg_ptr->extension = 0;
7746+	qm_sg_ptr->final = 1;
7747+	__dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
7748 }
7749
7750 static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
7751 					dma_addr_t dma, u32 len, u16 offset)
7752 {
7753-	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
7754-	qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
7755+	qm_sg_ptr->extension = 1;
7756+	qm_sg_ptr->final = 0;
7757+	__dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
7758 }
7759
7760 static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
7761 					     dma_addr_t dma, u32 len,
7762 					     u16 offset)
7763 {
7764-	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
7765-	qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
7766-				     (len & QM_SG_LEN_MASK));
7767+	qm_sg_ptr->extension = 1;
7768+	qm_sg_ptr->final = 1;
7769+	__dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
7770 }
7771
7772 /*
7773@@ -79,7 +94,10 @@ static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
7774 				    struct qm_sg_entry *qm_sg_ptr, u16 offset)
7775 {
7776 	qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
7777-	qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
7778+
7779+	qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
7780+	qm_sg_ptr->final = 1;
7781+	qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
7782 }
7783
7784 #endif /* __SG_SW_QM_H */
7785diff --git a/drivers/crypto/caam/sm.h b/drivers/crypto/caam/sm.h
7786new file mode 100644
7787index 000000000..614c9b4d3
7788--- /dev/null
7789+++ b/drivers/crypto/caam/sm.h
7790@@ -0,0 +1,126 @@
7791+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
7792+/*
7793+ * CAAM Secure Memory/Keywrap API Definitions
7794+ *
7795+ * Copyright 2008-2015 Freescale Semiconductor, Inc.
7796+ * Copyright 2016-2019 NXP
7797+ */
7798+
7799+#ifndef SM_H
7800+#define SM_H
7801+
7802+
7803+/* Storage access permissions */
7804+#define SM_PERM_READ 0x01
7805+#define SM_PERM_WRITE 0x02
7806+#define SM_PERM_BLOB 0x03
7807+
7808+/* Define treatment of secure memory vs. general memory blobs */
7809+#define SM_SECMEM 0
7810+#define SM_GENMEM 1
7811+
7812+/* Define treatment of red/black keys */
7813+#define RED_KEY 0
7814+#define BLACK_KEY 1
7815+
7816+/* Define key encryption/covering options */
7817+#define KEY_COVER_ECB 0	/* cover key in AES-ECB */
7818+#define KEY_COVER_CCM 1 /* cover key with AES-CCM */
7819+
7820+/*
7821+ * Round a key size up to an AES blocksize boundary so to allow for
7822+ * padding out to a full block
7823+ */
7824+#define AES_BLOCK_PAD(x) ((x % 16) ? ((x >> 4) + 1) << 4 : x)
7825+
7826+/* Define space required for BKEK + MAC tag storage in any blob */
7827+#define BLOB_OVERHEAD (32 + 16)
7828+
7829+/* Keystore maintenance functions */
7830+void sm_init_keystore(struct device *dev);
7831+u32 sm_detect_keystore_units(struct device *dev);
7832+int sm_establish_keystore(struct device *dev, u32 unit);
7833+void sm_release_keystore(struct device *dev, u32 unit);
7834+int caam_sm_example_init(struct platform_device *pdev);
7835+
7836+/* Keystore accessor functions */
7837+extern int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size,
7838+				  u32 *slot);
7839+extern int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot);
7840+extern int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
7841+				 const u8 *key_data, u32 key_length);
7842+extern int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
7843+				 u32 key_length, u8 *key_data);
7844+extern int sm_keystore_cover_key(struct device *dev, u32 unit, u32 slot,
7845+				 u16 key_length, u8 keyauth);
7846+extern int sm_keystore_slot_export(struct device *dev, u32 unit, u32 slot,
7847+				   u8 keycolor, u8 keyauth, u8 *outbuf,
7848+				   u16 keylen, u8 *keymod);
7849+extern int sm_keystore_slot_import(struct device *dev, u32 unit, u32 slot,
7850+				   u8 keycolor, u8 keyauth, u8 *inbuf,
7851+				   u16 keylen, u8 *keymod);
7852+
7853+/* Prior functions from legacy API, deprecated */
7854+extern int sm_keystore_slot_encapsulate(struct device *dev, u32 unit,
7855+					u32 inslot, u32 outslot, u16 secretlen,
7856+					u8 *keymod, u16 keymodlen);
7857+extern int sm_keystore_slot_decapsulate(struct device *dev, u32 unit,
7858+					u32 inslot, u32 outslot, u16 secretlen,
7859+					u8 *keymod, u16 keymodlen);
7860+
7861+/* Data structure to hold per-slot information */
7862+struct keystore_data_slot_info {
7863+	u8	allocated;	/* Track slot assignments */
7864+	u32	key_length;	/* Size of the key */
7865+};
7866+
7867+/* Data structure to hold keystore information */
7868+struct keystore_data {
7869+	void	*base_address;	/* Virtual base of secure memory pages */
7870+	void	*phys_address;	/* Physical base of secure memory pages */
7871+	u32	slot_count;	/* Number of slots in the keystore */
7872+	struct keystore_data_slot_info *slot; /* Per-slot information */
7873+};
7874+
7875+/* store the detected attributes of a secure memory page */
7876+struct sm_page_descriptor {
7877+	u16 phys_pagenum;	/* may be discontiguous */
7878+	u16 own_part;		/* Owning partition */
7879+	void *pg_base;		/* Calculated virtual address */
7880+	void *pg_phys;		/* Calculated physical address */
7881+	struct keystore_data *ksdata;
7882+};
7883+
7884+struct caam_drv_private_sm {
7885+	struct device *parentdev;	/* this ends up as the controller */
7886+	struct device *smringdev;	/* ring that owns this instance */
7887+	struct platform_device *sm_pdev;  /* Secure Memory platform device */
7888+	spinlock_t kslock ____cacheline_aligned;
7889+
7890+	/* SM Register offset from JR base address */
7891+	u32 sm_reg_offset;
7892+
7893+	/* Default parameters for geometry */
7894+	u32 max_pages;		/* maximum pages this instance can support */
7895+	u32 top_partition;	/* highest partition number in this instance */
7896+	u32 top_page;		/* highest page number in this instance */
7897+	u32 page_size;		/* page size */
7898+	u32 slot_size;		/* selected size of each storage block */
7899+
7900+	/* Partition/Page Allocation Map */
7901+	u32 localpages;		/* Number of pages we can access */
7902+	struct sm_page_descriptor *pagedesc;	/* Allocated per-page */
7903+
7904+	/* Installed handlers for keystore access */
7905+	int (*data_init)(struct device *dev, u32 unit);
7906+	void (*data_cleanup)(struct device *dev, u32 unit);
7907+	int (*slot_alloc)(struct device *dev, u32 unit, u32 size, u32 *slot);
7908+	int (*slot_dealloc)(struct device *dev, u32 unit, u32 slot);
7909+	void *(*slot_get_address)(struct device *dev, u32 unit, u32 handle);
7910+	void *(*slot_get_physical)(struct device *dev, u32 unit, u32 handle);
7911+	u32 (*slot_get_base)(struct device *dev, u32 unit, u32 handle);
7912+	u32 (*slot_get_offset)(struct device *dev, u32 unit, u32 handle);
7913+	u32 (*slot_get_slot_size)(struct device *dev, u32 unit, u32 handle);
7914+};
7915+
7916+#endif /* SM_H */
7917diff --git a/drivers/crypto/caam/sm_store.c b/drivers/crypto/caam/sm_store.c
7918new file mode 100644
7919index 000000000..88e0706e9
7920--- /dev/null
7921+++ b/drivers/crypto/caam/sm_store.c
7922@@ -0,0 +1,1270 @@
7923+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
7924+/*
7925+ * CAAM Secure Memory Storage Interface
7926+ *
7927+ * Copyright 2008-2015 Freescale Semiconductor, Inc.
7928+ * Copyright 2016-2019 NXP
7929+ *
7930+ * Loosely based on the SHW Keystore API for SCC/SCC2
7931+ * Experimental implementation and NOT intended for upstream use. Expect
7932+ * this interface to be amended significantly in the future once it becomes
7933+ * integrated into live applications.
7934+ *
7935+ * Known issues:
7936+ *
7937+ * - Executes one instance of an secure memory "driver". This is tied to the
7938+ *   fact that job rings can't run as standalone instances in the present
7939+ *   configuration.
7940+ *
7941+ * - It does not expose a userspace interface. The value of a userspace
7942+ *   interface for access to secrets is a point for further architectural
7943+ *   discussion.
7944+ *
7945+ * - Partition/permission management is not part of this interface. It
7946+ *   depends on some level of "knowledge" agreed upon between bootloader,
7947+ *   provisioning applications, and OS-hosted software (which uses this
7948+ *   driver).
7949+ *
7950+ * - No means of identifying the location or purpose of secrets managed by
7951+ *   this interface exists; "slot location" and format of a given secret
7952+ *   needs to be agreed upon between bootloader, provisioner, and OS-hosted
7953+ *   application.
7954+ */
7955+
7956+#include "compat.h"
7957+#include "regs.h"
7958+#include "jr.h"
7959+#include "desc.h"
7960+#include "intern.h"
7961+#include "error.h"
7962+#include "sm.h"
7963+#include <linux/of_address.h>
7964+
7965+#define SECMEM_KEYMOD_LEN 8
7966+#define GENMEM_KEYMOD_LEN 16
7967+
7968+#ifdef SM_DEBUG_CONT
7969+void sm_show_page(struct device *dev, struct sm_page_descriptor *pgdesc)
7970+{
7971+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
7972+	u32 i, *smdata;
7973+
7974+	dev_info(dev, "physical page %d content at 0x%08x\n",
7975+		 pgdesc->phys_pagenum, pgdesc->pg_base);
7976+	smdata = pgdesc->pg_base;
7977+	for (i = 0; i < (smpriv->page_size / sizeof(u32)); i += 4)
7978+		dev_info(dev, "[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
7979+			 (u32)&smdata[i], smdata[i], smdata[i+1], smdata[i+2],
7980+			 smdata[i+3]);
7981+}
7982+#endif
7983+
7984+#define INITIAL_DESCSZ 16	/* size of tmp buffer for descriptor const. */
7985+
7986+static __always_inline u32 sm_send_cmd(struct caam_drv_private_sm *smpriv,
7987+					     struct caam_drv_private_jr *jrpriv,
7988+					     u32 cmd, u32 *status)
7989+{
7990+	void __iomem *write_address;
7991+	void __iomem *read_address;
7992+
7993+	if (smpriv->sm_reg_offset == SM_V1_OFFSET) {
7994+		struct caam_secure_mem_v1 *sm_regs_v1;
7995+
7996+		sm_regs_v1 = (struct caam_secure_mem_v1 *)
7997+			((void *)jrpriv->rregs + SM_V1_OFFSET);
7998+		write_address = &sm_regs_v1->sm_cmd;
7999+		read_address = &sm_regs_v1->sm_status;
8000+
8001+	} else if (smpriv->sm_reg_offset == SM_V2_OFFSET) {
8002+		struct caam_secure_mem_v2 *sm_regs_v2;
8003+
8004+		sm_regs_v2 = (struct caam_secure_mem_v2 *)
8005+			((void *)jrpriv->rregs + SM_V2_OFFSET);
8006+		write_address = &sm_regs_v2->sm_cmd;
8007+		read_address = &sm_regs_v2->sm_status;
8008+
8009+	} else {
8010+		return -EINVAL;
8011+	}
8012+
8013+	wr_reg32(write_address, cmd);
8014+
8015+	udelay(10);
8016+
8017+	/* Read until the command has terminated and the status is correct */
8018+	do {
8019+		*status = rd_reg32(read_address);
8020+	} while (((*status & SMCS_CMDERR_MASK) >>  SMCS_CMDERR_SHIFT)
8021+				   == SMCS_CMDERR_INCOMP);
8022+
8023+	return 0;
8024+}
8025+
8026+/*
8027+ * Construct a black key conversion job descriptor
8028+ *
8029+ * This function constructs a job descriptor capable of performing
8030+ * a key blackening operation on a plaintext secure memory resident object.
8031+ *
8032+ * - desc	pointer to a pointer to the descriptor generated by this
8033+ *		function. Caller will be responsible to kfree() this
8034+ *		descriptor after execution.
8035+ * - key	physical pointer to the plaintext, which will also hold
8036+ *		the result. Since encryption occurs in place, caller must
8037+ *              ensure that the space is large enough to accommodate the
8038+ *              blackened key
8039+ * - keysz	size of the plaintext
8040+ * - auth	if a CCM-covered key is required, use KEY_COVER_CCM, else
8041+ *		use KEY_COVER_ECB.
8042+ *
8043+ * KEY to key1 from @key_addr LENGTH 16 BYTES;
8044+ * FIFO STORE from key1[ecb] TO @key_addr LENGTH 16 BYTES;
8045+ *
8046+ * Note that this variant uses the JDKEK only; it does not accommodate the
8047+ * trusted key encryption key at this time.
8048+ *
8049+ */
8050+static int blacken_key_jobdesc(u32 **desc, void *key, u16 keysz, bool auth)
8051+{
8052+	u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
8053+	u16 dsize, idx;
8054+
8055+	memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
8056+	idx = 1;
8057+
8058+	/* Load key to class 1 key register */
8059+	tmpdesc[idx++] = CMD_KEY | CLASS_1 | (keysz & KEY_LENGTH_MASK);
8060+	tmpdesc[idx++] = (uintptr_t)key;
8061+
8062+	/* ...and write back out via FIFO store*/
8063+	tmpdesc[idx] = CMD_FIFO_STORE | CLASS_1 | (keysz & KEY_LENGTH_MASK);
8064+
8065+	/* plus account for ECB/CCM option in FIFO_STORE */
8066+	if (auth == KEY_COVER_ECB)
8067+		tmpdesc[idx] |= FIFOST_TYPE_KEY_KEK;
8068+	else
8069+		tmpdesc[idx] |= FIFOST_TYPE_KEY_CCM_JKEK;
8070+
8071+	idx++;
8072+	tmpdesc[idx++] = (uintptr_t)key;
8073+
8074+	/* finish off the job header */
8075+	tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
8076+	dsize = idx * sizeof(u32);
8077+
8078+	/* now allocate execution buffer and coat it with executable */
8079+	tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
8080+	if (tdesc == NULL)
8081+		return 0;
8082+
8083+	memcpy(tdesc, tmpdesc, dsize);
8084+	*desc = tdesc;
8085+
8086+	return dsize;
8087+}
8088+
8089+/*
8090+ * Construct a blob encapsulation job descriptor
8091+ *
8092+ * This function dynamically constructs a blob encapsulation job descriptor
8093+ * from the following arguments:
8094+ *
8095+ * - desc	pointer to a pointer to the descriptor generated by this
8096+ *		function. Caller will be responsible to kfree() this
8097+ *		descriptor after execution.
8098+ * - keymod	Physical pointer to a key modifier, which must reside in a
8099+ *		contiguous piece of memory. Modifier will be assumed to be
8100+ *		8 bytes long for a blob of type SM_SECMEM, or 16 bytes long
8101+ *		for a blob of type SM_GENMEM (see blobtype argument).
8102+ * - secretbuf	Physical pointer to a secret, normally a black or red key,
8103+ *		possibly residing within an accessible secure memory page,
8104+ *		of the secret to be encapsulated to an output blob.
8105+ * - outbuf	Physical pointer to the destination buffer to receive the
8106+ *		encapsulated output. This buffer will need to be 48 bytes
8107+ *		larger than the input because of the added encapsulation data.
8108+ *		The generated descriptor will account for the increase in size,
8109+ *		but the caller must also account for this increase in the
8110+ *		buffer allocator.
8111+ * - secretsz	Size of input secret, in bytes. This is limited to 65536
8112+ *		less the size of blob overhead, since the length embeds into
8113+ *		DECO pointer in/out instructions.
8114+ * - keycolor   Determines if the source data is covered (black key) or
8115+ *		plaintext (red key). RED_KEY or BLACK_KEY are defined in
8116+ *		for this purpose.
8117+ * - blobtype	Determine if encapsulated blob should be a secure memory
8118+ *		blob (SM_SECMEM), with partition data embedded with key
8119+ *		material, or a general memory blob (SM_GENMEM).
8120+ * - auth	If BLACK_KEY source is covered via AES-CCM, specify
8121+ *		KEY_COVER_CCM, else uses AES-ECB (KEY_COVER_ECB).
8122+ *
8123+ * Upon completion, desc points to a buffer containing a CAAM job
8124+ * descriptor which encapsulates data into an externally-storable blob
8125+ * suitable for use across power cycles.
8126+ *
8127+ * This is an example of a black key encapsulation job into a general memory
8128+ * blob. Notice the 16-byte key modifier in the LOAD instruction. Also note
8129+ * the output 48 bytes longer than the input:
8130+ *
8131+ * [00] B0800008       jobhdr: stidx=0 len=8
8132+ * [01] 14400010           ld: ccb2-key len=16 offs=0
8133+ * [02] 08144891               ptr->@0x08144891
8134+ * [03] F800003A    seqoutptr: len=58
8135+ * [04] 01000000               out_ptr->@0x01000000
8136+ * [05] F000000A     seqinptr: len=10
8137+ * [06] 09745090               in_ptr->@0x09745090
8138+ * [07] 870D0004    operation: encap blob  reg=memory, black, format=normal
8139+ *
8140+ * This is an example of a red key encapsulation job for storing a red key
8141+ * into a secure memory blob. Note the 8 byte modifier on the 12 byte offset
8142+ * in the LOAD instruction; this accounts for blob permission storage:
8143+ *
8144+ * [00] B0800008       jobhdr: stidx=0 len=8
8145+ * [01] 14400C08           ld: ccb2-key len=8 offs=12
8146+ * [02] 087D0784               ptr->@0x087d0784
8147+ * [03] F8000050    seqoutptr: len=80
8148+ * [04] 09251BB2               out_ptr->@0x09251bb2
8149+ * [05] F0000020     seqinptr: len=32
8150+ * [06] 40000F31               in_ptr->@0x40000f31
8151+ * [07] 870D0008    operation: encap blob  reg=memory, red, sec_mem,
8152+ *                             format=normal
8153+ *
8154+ * Note: this function only generates 32-bit pointers at present, and should
8155+ * be refactored using a scheme that allows both 32 and 64 bit addressing
8156+ */
8157+
8158+static int blob_encap_jobdesc(u32 **desc, dma_addr_t keymod,
8159+			      void *secretbuf, dma_addr_t outbuf,
8160+			      u16 secretsz, u8 keycolor, u8 blobtype, u8 auth)
8161+{
8162+	u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
8163+	u16 dsize, idx;
8164+
8165+	memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
8166+	idx = 1;
8167+
8168+	/*
8169+	 * Key modifier works differently for secure/general memory blobs
8170+	 * This accounts for the permission/protection data encapsulated
8171+	 * within the blob if a secure memory blob is requested
8172+	 */
8173+	if (blobtype == SM_SECMEM)
8174+		tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
8175+				 LDST_SRCDST_BYTE_KEY |
8176+				 ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK)
8177+				 | (8 & LDST_LEN_MASK);
8178+	else /* is general memory blob */
8179+		tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
8180+				 LDST_SRCDST_BYTE_KEY | (16 & LDST_LEN_MASK);
8181+
8182+	tmpdesc[idx++] = (u32)keymod;
8183+
8184+	/*
8185+	 * Encapsulation output must include space for blob key encryption
8186+	 * key and MAC tag
8187+	 */
8188+	tmpdesc[idx++] = CMD_SEQ_OUT_PTR | (secretsz + BLOB_OVERHEAD);
8189+	tmpdesc[idx++] = (u32)outbuf;
8190+
8191+	/* Input data, should be somewhere in secure memory */
8192+	tmpdesc[idx++] = CMD_SEQ_IN_PTR | secretsz;
8193+	tmpdesc[idx++] = (uintptr_t)secretbuf;
8194+
8195+	/* Set blob encap, then color */
8196+	tmpdesc[idx] = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB;
8197+
8198+	if (blobtype == SM_SECMEM)
8199+		tmpdesc[idx] |= OP_PCL_BLOB_PTXT_SECMEM;
8200+
8201+	if (auth == KEY_COVER_CCM)
8202+		tmpdesc[idx] |= OP_PCL_BLOB_EKT;
8203+
8204+	if (keycolor == BLACK_KEY)
8205+		tmpdesc[idx] |= OP_PCL_BLOB_BLACK;
8206+
8207+	idx++;
8208+	tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
8209+	dsize = idx * sizeof(u32);
8210+
8211+	tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
8212+	if (tdesc == NULL)
8213+		return 0;
8214+
8215+	memcpy(tdesc, tmpdesc, dsize);
8216+	*desc = tdesc;
8217+	return dsize;
8218+}
8219+
8220+/*
8221+ * Construct a blob decapsulation job descriptor
8222+ *
8223+ * This function dynamically constructs a blob decapsulation job descriptor
8224+ * from the following arguments:
8225+ *
8226+ * - desc	pointer to a pointer to the descriptor generated by this
8227+ *		function. Caller will be responsible to kfree() this
8228+ *		descriptor after execution.
8229+ * - keymod	Physical pointer to a key modifier, which must reside in a
8230+ *		contiguous piece of memory. Modifier will be assumed to be
8231+ *		8 bytes long for a blob of type SM_SECMEM, or 16 bytes long
8232+ *		for a blob of type SM_GENMEM (see blobtype argument).
8233+ * - blobbuf	Physical pointer (into external memory) of the blob to
8234+ *		be decapsulated. Blob must reside in a contiguous memory
8235+ *		segment.
8236+ * - outbuf	Physical pointer of the decapsulated output, possibly into
8237+ *		a location within a secure memory page. Must be contiguous.
8238+ * - secretsz	Size of encapsulated secret in bytes (not the size of the
8239+ *		input blob).
8240+ * - keycolor   Determines if decapsulated content is encrypted (BLACK_KEY)
8241+ *		or left as plaintext (RED_KEY).
8242+ * - blobtype	Determine if encapsulated blob should be a secure memory
8243+ *		blob (SM_SECMEM), with partition data embedded with key
8244+ *		material, or a general memory blob (SM_GENMEM).
8245+ * - auth	If decapsulation path is specified by BLACK_KEY, then if
8246+ *		AES-CCM is requested for key covering use KEY_COVER_CCM, else
8247+ *		use AES-ECB (KEY_COVER_ECB).
8248+ *
8249+ * Upon completion, desc points to a buffer containing a CAAM job descriptor
8250+ * that decapsulates a key blob from external memory into a black (encrypted)
8251+ * key or red (plaintext) content.
8252+ *
8253+ * This is an example of a black key decapsulation job from a general memory
8254+ * blob. Notice the 16-byte key modifier in the LOAD instruction.
8255+ *
8256+ * [00] B0800008       jobhdr: stidx=0 len=8
8257+ * [01] 14400010           ld: ccb2-key len=16 offs=0
8258+ * [02] 08A63B7F               ptr->@0x08a63b7f
8259+ * [03] F8000010    seqoutptr: len=16
8260+ * [04] 01000000               out_ptr->@0x01000000
8261+ * [05] F000003A     seqinptr: len=58
8262+ * [06] 01000010               in_ptr->@0x01000010
8263+ * [07] 860D0004    operation: decap blob  reg=memory, black, format=normal
8264+ *
8265+ * This is an example of a red key decapsulation job for restoring a red key
8266+ * from a secure memory blob. Note the 8 byte modifier on the 12 byte offset
8267+ * in the LOAD instruction:
8268+ *
8269+ * [00] B0800008       jobhdr: stidx=0 len=8
8270+ * [01] 14400C08           ld: ccb2-key len=8 offs=12
8271+ * [02] 01000000               ptr->@0x01000000
8272+ * [03] F8000020    seqoutptr: len=32
8273+ * [04] 400000E6               out_ptr->@0x400000e6
8274+ * [05] F0000050     seqinptr: len=80
8275+ * [06] 08F0C0EA               in_ptr->@0x08f0c0ea
8276+ * [07] 860D0008    operation: decap blob  reg=memory, red, sec_mem,
8277+ *			       format=normal
8278+ *
8279+ * Note: this function only generates 32-bit pointers at present, and should
8280+ * be refactored using a scheme that allows both 32 and 64 bit addressing
8281+ */
8282+
8283+static int blob_decap_jobdesc(u32 **desc, dma_addr_t keymod, dma_addr_t blobbuf,
8284+			      u8 *outbuf, u16 secretsz, u8 keycolor,
8285+			      u8 blobtype, u8 auth)
8286+{
8287+	u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
8288+	u16 dsize, idx;
8289+
8290+	memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
8291+	idx = 1;
8292+
8293+	/* Load key modifier */
8294+	if (blobtype == SM_SECMEM)
8295+		tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
8296+				 LDST_SRCDST_BYTE_KEY |
8297+				 ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK)
8298+				 | (8 & LDST_LEN_MASK);
8299+	else /* is general memory blob */
8300+		tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
8301+				 LDST_SRCDST_BYTE_KEY | (16 & LDST_LEN_MASK);
8302+
8303+	tmpdesc[idx++] = (u32)keymod;
8304+
8305+	/* Compensate BKEK + MAC tag over size of encapsulated secret */
8306+	tmpdesc[idx++] = CMD_SEQ_IN_PTR | (secretsz + BLOB_OVERHEAD);
8307+	tmpdesc[idx++] = (u32)blobbuf;
8308+	tmpdesc[idx++] = CMD_SEQ_OUT_PTR | secretsz;
8309+	tmpdesc[idx++] = (uintptr_t)outbuf;
8310+
8311+	/* Decapsulate from secure memory partition to black blob */
8312+	tmpdesc[idx] = CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB;
8313+
8314+	if (blobtype == SM_SECMEM)
8315+		tmpdesc[idx] |= OP_PCL_BLOB_PTXT_SECMEM;
8316+
8317+	if (auth == KEY_COVER_CCM)
8318+		tmpdesc[idx] |= OP_PCL_BLOB_EKT;
8319+
8320+	if (keycolor == BLACK_KEY)
8321+		tmpdesc[idx] |= OP_PCL_BLOB_BLACK;
8322+
8323+	idx++;
8324+	tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
8325+	dsize = idx * sizeof(u32);
8326+
8327+	tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
8328+	if (tdesc == NULL)
8329+		return 0;
8330+
8331+	memcpy(tdesc, tmpdesc, dsize);
8332+	*desc = tdesc;
8333+	return dsize;
8334+}
8335+
8336+/*
8337+ * Pseudo-synchronous ring access functions for carrying out key
8338+ * encapsulation and decapsulation
8339+ */
8340+
8341+struct sm_key_job_result {
8342+	int error;
8343+	struct completion completion;
8344+};
8345+
8346+void sm_key_job_done(struct device *dev, u32 *desc, u32 err, void *context)
8347+{
8348+	struct sm_key_job_result *res = context;
8349+
8350+	if (err)
8351+		caam_jr_strstatus(dev, err);
8352+
8353+	res->error = err;	/* save off the error for postprocessing */
8354+
8355+	complete(&res->completion);	/* mark us complete */
8356+}
8357+
8358+static int sm_key_job(struct device *ksdev, u32 *jobdesc)
8359+{
8360+	struct sm_key_job_result testres = {0};
8361+	struct caam_drv_private_sm *kspriv;
8362+	int rtn = 0;
8363+
8364+	kspriv = dev_get_drvdata(ksdev);
8365+
8366+	init_completion(&testres.completion);
8367+
8368+	rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, sm_key_job_done,
8369+			      &testres);
8370+	if (rtn != -EINPROGRESS)
8371+		goto exit;
8372+
8373+	wait_for_completion(&testres.completion);
8374+	rtn = testres.error;
8375+
8376+exit:
8377+	return rtn;
8378+}
8379+
8380+/*
8381+ * Following section establishes the default methods for keystore access
8382+ * They are NOT intended for use external to this module
8383+ *
8384+ * In the present version, these are the only means for the higher-level
8385+ * interface to deal with the mechanics of accessing the phyiscal keystore
8386+ */
8387+
8388+
8389+int slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
8390+{
8391+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8392+	struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
8393+	u32 i;
8394+#ifdef SM_DEBUG
8395+	dev_info(dev, "slot_alloc(): requesting slot for %d bytes\n", size);
8396+#endif
8397+
8398+	if (size > smpriv->slot_size)
8399+		return -EKEYREJECTED;
8400+
8401+	for (i = 0; i < ksdata->slot_count; i++) {
8402+		if (ksdata->slot[i].allocated == 0) {
8403+			ksdata->slot[i].allocated = 1;
8404+			(*slot) = i;
8405+#ifdef SM_DEBUG
8406+			dev_info(dev, "slot_alloc(): new slot %d allocated\n",
8407+				 *slot);
8408+#endif
8409+			return 0;
8410+		}
8411+	}
8412+
8413+	return -ENOSPC;
8414+}
8415+EXPORT_SYMBOL(slot_alloc);
8416+
8417+int slot_dealloc(struct device *dev, u32 unit, u32 slot)
8418+{
8419+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8420+	struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
8421+	u8 __iomem *slotdata;
8422+
8423+#ifdef SM_DEBUG
8424+	dev_info(dev, "slot_dealloc(): releasing slot %d\n", slot);
8425+#endif
8426+	if (slot >= ksdata->slot_count)
8427+		return -EINVAL;
8428+	slotdata = ksdata->base_address + slot * smpriv->slot_size;
8429+
8430+	if (ksdata->slot[slot].allocated == 1) {
8431+		/* Forcibly overwrite the data from the keystore */
8432+		memset_io(ksdata->base_address + slot * smpriv->slot_size, 0,
8433+		       smpriv->slot_size);
8434+
8435+		ksdata->slot[slot].allocated = 0;
8436+#ifdef SM_DEBUG
8437+		dev_info(dev, "slot_dealloc(): slot %d released\n", slot);
8438+#endif
8439+		return 0;
8440+	}
8441+
8442+	return -EINVAL;
8443+}
8444+EXPORT_SYMBOL(slot_dealloc);
8445+
8446+void *slot_get_address(struct device *dev, u32 unit, u32 slot)
8447+{
8448+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8449+	struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
8450+
8451+	if (slot >= ksdata->slot_count)
8452+		return NULL;
8453+
8454+#ifdef SM_DEBUG
8455+	dev_info(dev, "slot_get_address(): slot %d is 0x%08x\n", slot,
8456+		 (u32)ksdata->base_address + slot * smpriv->slot_size);
8457+#endif
8458+
8459+	return ksdata->base_address + slot * smpriv->slot_size;
8460+}
8461+
8462+void *slot_get_physical(struct device *dev, u32 unit, u32 slot)
8463+{
8464+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8465+	struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
8466+
8467+	if (slot >= ksdata->slot_count)
8468+		return NULL;
8469+
8470+#ifdef SM_DEBUG
8471+	dev_info(dev, "%s: slot %d is 0x%08x\n", __func__, slot,
8472+		 (u32)ksdata->phys_address + slot * smpriv->slot_size);
8473+#endif
8474+
8475+	return ksdata->phys_address + slot * smpriv->slot_size;
8476+}
8477+
8478+u32 slot_get_base(struct device *dev, u32 unit, u32 slot)
8479+{
8480+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8481+	struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
8482+
8483+	/*
8484+	 * There could potentially be more than one secure partition object
8485+	 * associated with this keystore.  For now, there is just one.
8486+	 */
8487+
8488+	(void)slot;
8489+
8490+#ifdef SM_DEBUG
8491+	dev_info(dev, "slot_get_base(): slot %d = 0x%08x\n",
8492+		slot, (u32)ksdata->base_address);
8493+#endif
8494+
8495+	return (uintptr_t)(ksdata->base_address);
8496+}
8497+
8498+u32 slot_get_offset(struct device *dev, u32 unit, u32 slot)
8499+{
8500+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8501+	struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
8502+
8503+	if (slot >= ksdata->slot_count)
8504+		return -EINVAL;
8505+
8506+#ifdef SM_DEBUG
8507+	dev_info(dev, "slot_get_offset(): slot %d = %d\n", slot,
8508+		slot * smpriv->slot_size);
8509+#endif
8510+
8511+	return slot * smpriv->slot_size;
8512+}
8513+
8514+u32 slot_get_slot_size(struct device *dev, u32 unit, u32 slot)
8515+{
8516+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8517+
8518+
8519+#ifdef SM_DEBUG
8520+	dev_info(dev, "slot_get_slot_size(): slot %d = %d\n", slot,
8521+		 smpriv->slot_size);
8522+#endif
8523+	/* All slots are the same size in the default implementation */
8524+	return smpriv->slot_size;
8525+}
8526+
8527+
8528+
8529+int kso_init_data(struct device *dev, u32 unit)
8530+{
8531+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8532+	struct keystore_data *keystore_data = NULL;
8533+	u32 slot_count;
8534+	u32 keystore_data_size;
8535+
8536+	/*
8537+	 * Calculate the required size of the keystore data structure, based
8538+	 * on the number of keys that can fit in the partition.
8539+	 */
8540+	slot_count = smpriv->page_size / smpriv->slot_size;
8541+#ifdef SM_DEBUG
8542+	dev_info(dev, "kso_init_data: %d slots initializing\n", slot_count);
8543+#endif
8544+
8545+	keystore_data_size = sizeof(struct keystore_data) +
8546+				slot_count *
8547+				sizeof(struct keystore_data_slot_info);
8548+
8549+	keystore_data = kzalloc(keystore_data_size, GFP_KERNEL);
8550+
8551+	if (!keystore_data)
8552+		return -ENOMEM;
8553+
8554+#ifdef SM_DEBUG
8555+	dev_info(dev, "kso_init_data: keystore data size = %d\n",
8556+		 keystore_data_size);
8557+#endif
8558+
8559+	/*
8560+	 * Place the slot information structure directly after the keystore data
8561+	 * structure.
8562+	 */
8563+	keystore_data->slot = (struct keystore_data_slot_info *)
8564+			      (keystore_data + 1);
8565+	keystore_data->slot_count = slot_count;
8566+
8567+	smpriv->pagedesc[unit].ksdata = keystore_data;
8568+	smpriv->pagedesc[unit].ksdata->base_address =
8569+		smpriv->pagedesc[unit].pg_base;
8570+	smpriv->pagedesc[unit].ksdata->phys_address =
8571+		smpriv->pagedesc[unit].pg_phys;
8572+
8573+	return 0;
8574+}
8575+
8576+void kso_cleanup_data(struct device *dev, u32 unit)
8577+{
8578+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8579+	struct keystore_data *keystore_data = NULL;
8580+
8581+	if (smpriv->pagedesc[unit].ksdata != NULL)
8582+		keystore_data = smpriv->pagedesc[unit].ksdata;
8583+
8584+	/* Release the allocated keystore management data */
8585+	kfree(smpriv->pagedesc[unit].ksdata);
8586+
8587+	return;
8588+}
8589+
8590+
8591+
8592+/*
8593+ * Keystore management section
8594+ */
8595+
8596+void sm_init_keystore(struct device *dev)
8597+{
8598+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8599+
8600+	smpriv->data_init = kso_init_data;
8601+	smpriv->data_cleanup = kso_cleanup_data;
8602+	smpriv->slot_alloc = slot_alloc;
8603+	smpriv->slot_dealloc = slot_dealloc;
8604+	smpriv->slot_get_address = slot_get_address;
8605+	smpriv->slot_get_physical = slot_get_physical;
8606+	smpriv->slot_get_base = slot_get_base;
8607+	smpriv->slot_get_offset = slot_get_offset;
8608+	smpriv->slot_get_slot_size = slot_get_slot_size;
8609+#ifdef SM_DEBUG
8610+	dev_info(dev, "sm_init_keystore(): handlers installed\n");
8611+#endif
8612+}
8613+EXPORT_SYMBOL(sm_init_keystore);
8614+
8615+/* Return available pages/units */
8616+u32 sm_detect_keystore_units(struct device *dev)
8617+{
8618+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8619+
8620+	return smpriv->localpages;
8621+}
8622+EXPORT_SYMBOL(sm_detect_keystore_units);
8623+
8624+/*
8625+ * Do any keystore specific initializations
8626+ */
8627+int sm_establish_keystore(struct device *dev, u32 unit)
8628+{
8629+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8630+
8631+#ifdef SM_DEBUG
8632+	dev_info(dev, "sm_establish_keystore(): unit %d initializing\n", unit);
8633+#endif
8634+
8635+	if (smpriv->data_init == NULL)
8636+		return -EINVAL;
8637+
8638+	/* Call the data_init function for any user setup */
8639+	return smpriv->data_init(dev, unit);
8640+}
8641+EXPORT_SYMBOL(sm_establish_keystore);
8642+
8643+void sm_release_keystore(struct device *dev, u32 unit)
8644+{
8645+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8646+
8647+#ifdef SM_DEBUG
8648+	dev_info(dev, "sm_establish_keystore(): unit %d releasing\n", unit);
8649+#endif
8650+	if ((smpriv != NULL) && (smpriv->data_cleanup != NULL))
8651+		smpriv->data_cleanup(dev, unit);
8652+
8653+	return;
8654+}
8655+EXPORT_SYMBOL(sm_release_keystore);
8656+
8657+/*
8658+ * Subsequent interfacce (sm_keystore_*) forms the accessor interfacce to
8659+ * the keystore
8660+ */
8661+int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
8662+{
8663+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8664+	int retval = -EINVAL;
8665+
8666+	spin_lock(&smpriv->kslock);
8667+
8668+	if ((smpriv->slot_alloc == NULL) ||
8669+	    (smpriv->pagedesc[unit].ksdata == NULL))
8670+		goto out;
8671+
8672+	retval =  smpriv->slot_alloc(dev, unit, size, slot);
8673+
8674+out:
8675+	spin_unlock(&smpriv->kslock);
8676+	return retval;
8677+}
8678+EXPORT_SYMBOL(sm_keystore_slot_alloc);
8679+
8680+int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot)
8681+{
8682+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8683+	int retval = -EINVAL;
8684+
8685+	spin_lock(&smpriv->kslock);
8686+
8687+	if ((smpriv->slot_alloc == NULL) ||
8688+	    (smpriv->pagedesc[unit].ksdata == NULL))
8689+		goto out;
8690+
8691+	retval = smpriv->slot_dealloc(dev, unit, slot);
8692+out:
8693+	spin_unlock(&smpriv->kslock);
8694+	return retval;
8695+}
8696+EXPORT_SYMBOL(sm_keystore_slot_dealloc);
8697+
8698+int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
8699+			  const u8 *key_data, u32 key_length)
8700+{
8701+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8702+	int retval = -EINVAL;
8703+	u32 slot_size;
8704+	u8 __iomem *slot_location;
8705+
8706+	spin_lock(&smpriv->kslock);
8707+
8708+	slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
8709+
8710+	if (key_length > slot_size) {
8711+		retval = -EFBIG;
8712+		goto out;
8713+	}
8714+
8715+	slot_location = smpriv->slot_get_address(dev, unit, slot);
8716+
8717+	memcpy_toio(slot_location, key_data, key_length);
8718+
8719+	retval = 0;
8720+
8721+out:
8722+	spin_unlock(&smpriv->kslock);
8723+	return retval;
8724+}
8725+EXPORT_SYMBOL(sm_keystore_slot_load);
8726+
8727+int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
8728+			  u32 key_length, u8 *key_data)
8729+{
8730+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8731+	int retval = -EINVAL;
8732+	u8 __iomem *slot_addr;
8733+	u32 slot_size;
8734+
8735+	spin_lock(&smpriv->kslock);
8736+
8737+	slot_addr = smpriv->slot_get_address(dev, unit, slot);
8738+	slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
8739+
8740+	if (key_length > slot_size) {
8741+		retval = -EKEYREJECTED;
8742+		goto out;
8743+	}
8744+
8745+	memcpy_fromio(key_data, slot_addr, key_length);
8746+	retval = 0;
8747+
8748+out:
8749+	spin_unlock(&smpriv->kslock);
8750+	return retval;
8751+}
8752+EXPORT_SYMBOL(sm_keystore_slot_read);
8753+
8754+/*
8755+ * Blacken a clear key in a slot. Operates "in place".
8756+ * Limited to class 1 keys at the present time
8757+ */
8758+int sm_keystore_cover_key(struct device *dev, u32 unit, u32 slot,
8759+			  u16 key_length, u8 keyauth)
8760+{
8761+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8762+	int retval = 0;
8763+	u8 __iomem *slotaddr;
8764+	void *slotphys;
8765+	u32 dsize, jstat;
8766+	u32 __iomem *coverdesc = NULL;
8767+
8768+	/* Get the address of the object in the slot */
8769+	slotaddr = (u8 *)smpriv->slot_get_address(dev, unit, slot);
8770+	slotphys = (u8 *)smpriv->slot_get_physical(dev, unit, slot);
8771+
8772+	dsize = blacken_key_jobdesc(&coverdesc, slotphys, key_length, keyauth);
8773+	if (!dsize)
8774+		return -ENOMEM;
8775+	jstat = sm_key_job(dev, coverdesc);
8776+	if (jstat)
8777+		retval = -EIO;
8778+
8779+	kfree(coverdesc);
8780+	return retval;
8781+}
8782+EXPORT_SYMBOL(sm_keystore_cover_key);
8783+
8784+/* Export a black/red key to a blob in external memory */
8785+int sm_keystore_slot_export(struct device *dev, u32 unit, u32 slot, u8 keycolor,
8786+			    u8 keyauth, u8 *outbuf, u16 keylen, u8 *keymod)
8787+{
8788+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8789+	int retval = 0;
8790+	u8 __iomem *slotaddr, *lkeymod;
8791+	u8 __iomem *slotphys;
8792+	dma_addr_t keymod_dma, outbuf_dma;
8793+	u32 dsize, jstat;
8794+	u32 __iomem *encapdesc = NULL;
8795+	struct device *dev_for_dma_op;
8796+
8797+	/* Use the ring as device for DMA operations */
8798+	dev_for_dma_op = smpriv->smringdev;
8799+
8800+	/* Get the base address(es) of the specified slot */
8801+	slotaddr = (u8 *)smpriv->slot_get_address(dev, unit, slot);
8802+	slotphys = smpriv->slot_get_physical(dev, unit, slot);
8803+
8804+	/* Allocate memory for key modifier compatible with DMA */
8805+	lkeymod = kmalloc(SECMEM_KEYMOD_LEN, GFP_KERNEL | GFP_DMA);
8806+	if (!lkeymod) {
8807+		retval = (-ENOMEM);
8808+		goto exit;
8809+	}
8810+
8811+	/* Get DMA address for the key modifier */
8812+	keymod_dma = dma_map_single(dev_for_dma_op, lkeymod,
8813+					SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
8814+	if (dma_mapping_error(dev_for_dma_op, keymod_dma)) {
8815+		dev_err(dev, "unable to map keymod: %p\n", lkeymod);
8816+		retval = (-ENOMEM);
8817+		goto free_keymod;
8818+	}
8819+
8820+	/* Copy the keymod and synchronize the DMA */
8821+	memcpy(lkeymod, keymod, SECMEM_KEYMOD_LEN);
8822+	dma_sync_single_for_device(dev_for_dma_op, keymod_dma,
8823+					SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
8824+
8825+	/* Get DMA address for the destination */
8826+	outbuf_dma = dma_map_single(dev_for_dma_op, outbuf,
8827+				keylen + BLOB_OVERHEAD, DMA_FROM_DEVICE);
8828+	if (dma_mapping_error(dev_for_dma_op, outbuf_dma)) {
8829+		dev_err(dev, "unable to map outbuf: %p\n", outbuf);
8830+		retval = (-ENOMEM);
8831+		goto unmap_keymod;
8832+	}
8833+
8834+	/* Build the encapsulation job descriptor */
8835+	dsize = blob_encap_jobdesc(&encapdesc, keymod_dma, slotphys, outbuf_dma,
8836+				   keylen, keycolor, SM_SECMEM, keyauth);
8837+	if (!dsize) {
8838+		dev_err(dev, "can't alloc an encapsulation descriptor\n");
8839+		retval = -ENOMEM;
8840+		goto unmap_outbuf;
8841+	}
8842+
8843+	/* Run the job */
8844+	jstat = sm_key_job(dev, encapdesc);
8845+	if (jstat) {
8846+		retval = (-EIO);
8847+		goto free_desc;
8848+	}
8849+
8850+	/* Synchronize the data received */
8851+	dma_sync_single_for_cpu(dev_for_dma_op, outbuf_dma,
8852+			keylen + BLOB_OVERHEAD, DMA_FROM_DEVICE);
8853+
8854+free_desc:
8855+	kfree(encapdesc);
8856+
8857+unmap_outbuf:
8858+	dma_unmap_single(dev_for_dma_op, outbuf_dma, keylen + BLOB_OVERHEAD,
8859+			DMA_FROM_DEVICE);
8860+
8861+unmap_keymod:
8862+	dma_unmap_single(dev_for_dma_op, keymod_dma, SECMEM_KEYMOD_LEN,
8863+			DMA_TO_DEVICE);
8864+
8865+free_keymod:
8866+	kfree(lkeymod);
8867+
8868+exit:
8869+	return retval;
8870+}
8871+EXPORT_SYMBOL(sm_keystore_slot_export);
8872+
8873+/* Import a black/red key from a blob residing in external memory */
8874+int sm_keystore_slot_import(struct device *dev, u32 unit, u32 slot, u8 keycolor,
8875+			    u8 keyauth, u8 *inbuf, u16 keylen, u8 *keymod)
8876+{
8877+	struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
8878+	int retval = 0;
8879+	u8 __iomem *slotaddr, *lkeymod;
8880+	u8 __iomem *slotphys;
8881+	dma_addr_t keymod_dma, inbuf_dma;
8882+	u32 dsize, jstat;
8883+	u32 __iomem *decapdesc = NULL;
8884+	struct device *dev_for_dma_op;
8885+
8886+	/* Use the ring as device for DMA operations */
8887+	dev_for_dma_op = smpriv->smringdev;
8888+
8889+	/* Get the base address(es) of the specified slot */
8890+	slotaddr = (u8 *)smpriv->slot_get_address(dev, unit, slot);
8891+	slotphys = smpriv->slot_get_physical(dev, unit, slot);
8892+
8893+	/* Allocate memory for key modifier compatible with DMA */
8894+	lkeymod = kmalloc(SECMEM_KEYMOD_LEN, GFP_KERNEL | GFP_DMA);
8895+	if (!lkeymod) {
8896+		retval = (-ENOMEM);
8897+		goto exit;
8898+	}
8899+
8900+	/* Get DMA address for the key modifier */
8901+	keymod_dma = dma_map_single(dev_for_dma_op, lkeymod,
8902+					SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
8903+	if (dma_mapping_error(dev_for_dma_op, keymod_dma)) {
8904+		dev_err(dev, "unable to map keymod: %p\n", lkeymod);
8905+		retval = (-ENOMEM);
8906+		goto free_keymod;
8907+	}
8908+
8909+	/* Copy the keymod and synchronize the DMA */
8910+	memcpy(lkeymod, keymod, SECMEM_KEYMOD_LEN);
8911+	dma_sync_single_for_device(dev_for_dma_op, keymod_dma,
8912+					SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
8913+
8914+	/* Get DMA address for the input */
8915+	inbuf_dma = dma_map_single(dev_for_dma_op, inbuf,
8916+					keylen + BLOB_OVERHEAD, DMA_TO_DEVICE);
8917+	if (dma_mapping_error(dev_for_dma_op, inbuf_dma)) {
8918+		dev_err(dev, "unable to map inbuf: %p\n", inbuf);
8919+		retval = (-ENOMEM);
8920+		goto unmap_keymod;
8921+	}
8922+
8923+	/* synchronize the DMA */
8924+	dma_sync_single_for_device(dev_for_dma_op, inbuf_dma,
8925+					keylen + BLOB_OVERHEAD, DMA_TO_DEVICE);
8926+
8927+	/* Build the encapsulation job descriptor */
8928+	dsize = blob_decap_jobdesc(&decapdesc, keymod_dma, inbuf_dma, slotphys,
8929+				   keylen, keycolor, SM_SECMEM, keyauth);
8930+	if (!dsize) {
8931+		dev_err(dev, "can't alloc a decapsulation descriptor\n");
8932+		retval = -ENOMEM;
8933+		goto unmap_inbuf;
8934+	}
8935+
8936+	/* Run the job */
8937+	jstat = sm_key_job(dev, decapdesc);
8938+
8939+	/*
8940+	 * May want to expand upon error meanings a bit. Any CAAM status
8941+	 * is reported as EIO, but we might want to look for something more
8942+	 * meaningful for something like an ICV error on restore, otherwise
8943+	 * the caller is left guessing.
8944+	 */
8945+	if (jstat) {
8946+		retval = (-EIO);
8947+		goto free_desc;
8948+	}
8949+
8950+free_desc:
8951+	kfree(decapdesc);
8952+
8953+unmap_inbuf:
8954+	dma_unmap_single(dev_for_dma_op, inbuf_dma, keylen + BLOB_OVERHEAD,
8955+			DMA_TO_DEVICE);
8956+
8957+unmap_keymod:
8958+	dma_unmap_single(dev_for_dma_op, keymod_dma, SECMEM_KEYMOD_LEN,
8959+			DMA_TO_DEVICE);
8960+
8961+free_keymod:
8962+	kfree(lkeymod);
8963+
8964+exit:
8965+	return retval;
8966+}
8967+EXPORT_SYMBOL(sm_keystore_slot_import);
8968+
8969+/*
8970+ * Initialization/shutdown subsystem
8971+ * Assumes statically-invoked startup/shutdown from the controller driver
8972+ * for the present time, to be reworked when a device tree becomes
8973+ * available. This code will not modularize in present form.
8974+ *
8975+ * Also, simply uses ring 0 for execution at the present
8976+ */
8977+
8978+int caam_sm_startup(struct device *ctrldev)
8979+{
8980+	struct device *smdev;
8981+	struct caam_drv_private *ctrlpriv;
8982+	struct caam_drv_private_sm *smpriv;
8983+	struct caam_drv_private_jr *jrpriv;	/* need this for reg page */
8984+	struct platform_device *sm_pdev;
8985+	struct sm_page_descriptor *lpagedesc;
8986+	u32 page, pgstat, lpagect, detectedpage, smvid, smpart;
8987+	int ret = 0;
8988+
8989+	struct device_node *np;
8990+	ctrlpriv = dev_get_drvdata(ctrldev);
8991+
8992+	if (!ctrlpriv->sm_present)
8993+		return 0;
8994+
8995+	/*
8996+	 * Set up the private block for secure memory
8997+	 * Only one instance is possible
8998+	 */
8999+	smpriv = kzalloc(sizeof(struct caam_drv_private_sm), GFP_KERNEL);
9000+	if (smpriv == NULL) {
9001+		dev_err(ctrldev, "can't alloc private mem for secure memory\n");
9002+		ret = -ENOMEM;
9003+		goto exit;
9004+	}
9005+	smpriv->parentdev = ctrldev; /* copy of parent dev is handy */
9006+	spin_lock_init(&smpriv->kslock);
9007+
9008+	/* Create the dev */
9009+	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
9010+	if (np)
9011+		of_node_clear_flag(np, OF_POPULATED);
9012+	sm_pdev = of_platform_device_create(np, "caam_sm", ctrldev);
9013+
9014+	if (sm_pdev == NULL) {
9015+		ret = -EINVAL;
9016+		goto free_smpriv;
9017+	}
9018+
9019+	/* Save a pointer to the platform device for Secure Memory */
9020+	smpriv->sm_pdev = sm_pdev;
9021+	smdev = &sm_pdev->dev;
9022+	dev_set_drvdata(smdev, smpriv);
9023+	ctrlpriv->smdev = smdev;
9024+
9025+	/* Set the Secure Memory Register Map Version */
9026+	smvid = rd_reg32(&ctrlpriv->jr[0]->perfmon.smvid);
9027+	smpart = rd_reg32(&ctrlpriv->jr[0]->perfmon.smpart);
9028+
9029+	if (smvid < SMVID_V2)
9030+		smpriv->sm_reg_offset = SM_V1_OFFSET;
9031+	else
9032+		smpriv->sm_reg_offset = SM_V2_OFFSET;
9033+
9034+	/*
9035+	 * Collect configuration limit data for reference
9036+	 * This batch comes from the partition data/vid registers in perfmon
9037+	 */
9038+	smpriv->max_pages = ((smpart & SMPART_MAX_NUMPG_MASK) >>
9039+			    SMPART_MAX_NUMPG_SHIFT) + 1;
9040+	smpriv->top_partition = ((smpart & SMPART_MAX_PNUM_MASK) >>
9041+				SMPART_MAX_PNUM_SHIFT) + 1;
9042+	smpriv->top_page =  ((smpart & SMPART_MAX_PG_MASK) >>
9043+				SMPART_MAX_PG_SHIFT) + 1;
9044+	smpriv->page_size = 1024 << ((smvid & SMVID_PG_SIZE_MASK) >>
9045+				SMVID_PG_SIZE_SHIFT);
9046+	smpriv->slot_size = 1 << CONFIG_CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE;
9047+
9048+#ifdef SM_DEBUG
9049+	dev_info(smdev, "max pages = %d, top partition = %d\n",
9050+			smpriv->max_pages, smpriv->top_partition);
9051+	dev_info(smdev, "top page = %d, page size = %d (total = %d)\n",
9052+			smpriv->top_page, smpriv->page_size,
9053+			smpriv->top_page * smpriv->page_size);
9054+	dev_info(smdev, "selected slot size = %d\n", smpriv->slot_size);
9055+#endif
9056+
9057+	/*
9058+	 * Now probe for partitions/pages to which we have access. Note that
9059+	 * these have likely been set up by a bootloader or platform
9060+	 * provisioning application, so we have to assume that we "inherit"
9061+	 * a configuration and work within the constraints of what it might be.
9062+	 *
9063+	 * Assume use of the zeroth ring in the present iteration (until
9064+	 * we can divorce the controller and ring drivers, and then assign
9065+	 * an SM instance to any ring instance).
9066+	 */
9067+	smpriv->smringdev = caam_jr_alloc();
9068+	if (!smpriv->smringdev) {
9069+		dev_err(smdev, "Device for job ring not created\n");
9070+		ret = -ENODEV;
9071+		goto unregister_smpdev;
9072+	}
9073+
9074+	jrpriv = dev_get_drvdata(smpriv->smringdev);
9075+	lpagect = 0;
9076+	pgstat = 0;
9077+	lpagedesc = kzalloc(sizeof(struct sm_page_descriptor)
9078+			    * smpriv->max_pages, GFP_KERNEL);
9079+	if (lpagedesc == NULL) {
9080+		ret = -ENOMEM;
9081+		goto free_smringdev;
9082+	}
9083+
9084+	for (page = 0; page < smpriv->max_pages; page++) {
9085+		u32 page_ownership;
9086+
9087+		if (sm_send_cmd(smpriv, jrpriv,
9088+				((page << SMC_PAGE_SHIFT) & SMC_PAGE_MASK) |
9089+				(SMC_CMD_PAGE_INQUIRY & SMC_CMD_MASK),
9090+				&pgstat)) {
9091+			ret = -EINVAL;
9092+			goto free_lpagedesc;
9093+		}
9094+
9095+		page_ownership = (pgstat & SMCS_PGWON_MASK) >> SMCS_PGOWN_SHIFT;
9096+		if ((page_ownership == SMCS_PGOWN_OWNED)
9097+			|| (page_ownership == SMCS_PGOWN_NOOWN)) {
9098+			/* page allocated */
9099+			lpagedesc[page].phys_pagenum =
9100+				(pgstat & SMCS_PAGE_MASK) >> SMCS_PAGE_SHIFT;
9101+			lpagedesc[page].own_part =
9102+				(pgstat & SMCS_PART_SHIFT) >> SMCS_PART_MASK;
9103+			lpagedesc[page].pg_base = (u8 *)ctrlpriv->sm_base +
9104+				(smpriv->page_size * page);
9105+			if (ctrlpriv->scu_en) {
9106+/* FIXME: get different addresses viewed by CPU and CAAM from
9107+ * platform property
9108+ */
9109+				lpagedesc[page].pg_phys = (u8 *)0x20800000 +
9110+					(smpriv->page_size * page);
9111+			} else {
9112+				lpagedesc[page].pg_phys =
9113+					(u8 *) ctrlpriv->sm_phy +
9114+					(smpriv->page_size * page);
9115+			}
9116+			lpagect++;
9117+#ifdef SM_DEBUG
9118+			dev_info(smdev,
9119+				"physical page %d, owning partition = %d\n",
9120+				lpagedesc[page].phys_pagenum,
9121+				lpagedesc[page].own_part);
9122+#endif
9123+		}
9124+	}
9125+
9126+	smpriv->pagedesc = kzalloc(sizeof(struct sm_page_descriptor) * lpagect,
9127+				   GFP_KERNEL);
9128+	if (smpriv->pagedesc == NULL) {
9129+		ret = -ENOMEM;
9130+		goto free_lpagedesc;
9131+	}
9132+	smpriv->localpages = lpagect;
9133+
9134+	detectedpage = 0;
9135+	for (page = 0; page < smpriv->max_pages; page++) {
9136+		if (lpagedesc[page].pg_base != NULL) {	/* e.g. live entry */
9137+			memcpy(&smpriv->pagedesc[detectedpage],
9138+			       &lpagedesc[page],
9139+			       sizeof(struct sm_page_descriptor));
9140+#ifdef SM_DEBUG_CONT
9141+			sm_show_page(smdev, &smpriv->pagedesc[detectedpage]);
9142+#endif
9143+			detectedpage++;
9144+		}
9145+	}
9146+
9147+	kfree(lpagedesc);
9148+
9149+	sm_init_keystore(smdev);
9150+
9151+	goto exit;
9152+
9153+free_lpagedesc:
9154+	kfree(lpagedesc);
9155+free_smringdev:
9156+	caam_jr_free(smpriv->smringdev);
9157+unregister_smpdev:
9158+	of_device_unregister(smpriv->sm_pdev);
9159+free_smpriv:
9160+	kfree(smpriv);
9161+
9162+exit:
9163+	return ret;
9164+}
9165+
9166+void caam_sm_shutdown(struct device *ctrldev)
9167+{
9168+	struct device *smdev;
9169+	struct caam_drv_private *priv;
9170+	struct caam_drv_private_sm *smpriv;
9171+
9172+	priv = dev_get_drvdata(ctrldev);
9173+	if (!priv->sm_present)
9174+		return;
9175+
9176+	smdev = priv->smdev;
9177+
9178+	/* Return if resource not initialized by startup */
9179+	if (smdev == NULL)
9180+		return;
9181+
9182+	smpriv = dev_get_drvdata(smdev);
9183+
9184+	caam_jr_free(smpriv->smringdev);
9185+
9186+	/* Remove Secure Memory Platform Device */
9187+	of_device_unregister(smpriv->sm_pdev);
9188+
9189+	kfree(smpriv->pagedesc);
9190+	kfree(smpriv);
9191+}
9192+EXPORT_SYMBOL(caam_sm_shutdown);
9193diff --git a/drivers/crypto/caam/sm_test.c b/drivers/crypto/caam/sm_test.c
9194new file mode 100644
9195index 000000000..2a84a91a0
9196--- /dev/null
9197+++ b/drivers/crypto/caam/sm_test.c
9198@@ -0,0 +1,586 @@
9199+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
9200+/*
9201+ * Secure Memory / Keystore Exemplification Module
9202+ *
9203+ * Copyright 2012-2015 Freescale Semiconductor, Inc.
9204+ * Copyright 2016-2019 NXP
9205+ *
9206+ * This module has been overloaded as an example to show:
9207+ * - Secure memory subsystem initialization/shutdown
9208+ * - Allocation/deallocation of "slots" in a secure memory page
9209+ * - Loading and unloading of key material into slots
9210+ * - Covering of secure memory objects into "black keys" (ECB only at present)
9211+ * - Verification of key covering (by differentiation only)
9212+ * - Exportation of keys into secure memory blobs (with display of result)
9213+ * - Importation of keys from secure memory blobs (with display of result)
9214+ * - Verification of re-imported keys where possible.
9215+ *
9216+ * The module does not show the use of key objects as working key register
9217+ * source material at this time.
9218+ *
9219+ * This module can use a substantial amount of refactoring, which may occur
9220+ * after the API gets some mileage. Furthermore, expect this module to
9221+ * eventually disappear once the API is integrated into "real" software.
9222+ */
9223+
9224+#include "compat.h"
9225+#include "regs.h"
9226+#include "intern.h"
9227+#include "desc.h"
9228+#include "error.h"
9229+#include "jr.h"
9230+#include "sm.h"
9231+
9232+/* Fixed known pattern for a key modifier */
9233+static u8 skeymod[] = {
9234+	0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
9235+	0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
9236+};
9237+
9238+/* Fixed known pattern for a key */
9239+static u8 clrkey[] = {
9240+	0x00, 0x01, 0x02, 0x03, 0x04, 0x0f, 0x06, 0x07,
9241+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
9242+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
9243+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
9244+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
9245+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
9246+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
9247+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
9248+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
9249+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
9250+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
9251+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
9252+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
9253+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
9254+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
9255+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
9256+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
9257+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
9258+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
9259+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
9260+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
9261+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
9262+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
9263+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
9264+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
9265+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
9266+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
9267+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
9268+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
9269+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
9270+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
9271+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
9272+};
9273+
9274+static void key_display(struct device *dev, const char *label, u16 size,
9275+			u8 *key)
9276+{
9277+	unsigned i;
9278+
9279+	dev_dbg(dev, "%s", label);
9280+	for (i = 0; i < size; i += 8)
9281+		dev_dbg(dev,
9282+			"[%04d] %02x %02x %02x %02x %02x %02x %02x %02x\n",
9283+			i, key[i], key[i + 1], key[i + 2], key[i + 3],
9284+			key[i + 4], key[i + 5], key[i + 6], key[i + 7]);
9285+}
9286+
9287+int caam_sm_example_init(struct platform_device *pdev)
9288+{
9289+	struct device *ctrldev, *ksdev;
9290+	struct caam_drv_private *ctrlpriv;
9291+	struct caam_drv_private_sm *kspriv;
9292+	u32 unit, units;
9293+	int rtnval;
9294+	u8 clrkey8[8], clrkey16[16], clrkey24[24], clrkey32[32];
9295+	u8 blkkey8[AES_BLOCK_PAD(8)], blkkey16[AES_BLOCK_PAD(16)];
9296+	u8 blkkey24[AES_BLOCK_PAD(24)], blkkey32[AES_BLOCK_PAD(32)];
9297+	u8 rstkey8[AES_BLOCK_PAD(8)], rstkey16[AES_BLOCK_PAD(16)];
9298+	u8 rstkey24[AES_BLOCK_PAD(24)], rstkey32[AES_BLOCK_PAD(32)];
9299+	u8 __iomem *blob8, *blob16, *blob24, *blob32;
9300+	u32 keyslot8, keyslot16, keyslot24, keyslot32 = 0;
9301+
9302+	blob8 = blob16 = blob24 = blob32 = NULL;
9303+
9304+	/*
9305+	 * 3.5.x and later revs for MX6 should be able to ditch this
9306+	 * and detect via dts property
9307+	 */
9308+	ctrldev = &pdev->dev;
9309+	ctrlpriv = dev_get_drvdata(ctrldev);
9310+
9311+	/*
9312+	 * If ctrlpriv is NULL, it's probably because the caam driver wasn't
9313+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
9314+	 */
9315+	if (!ctrlpriv)
9316+		return -ENODEV;
9317+
9318+	ksdev = ctrlpriv->smdev;
9319+	kspriv = dev_get_drvdata(ksdev);
9320+	if (kspriv == NULL)
9321+		return -ENODEV;
9322+
9323+	/* What keystores are available ? */
9324+	units = sm_detect_keystore_units(ksdev);
9325+	if (!units)
9326+		dev_err(ksdev, "blkkey_ex: no keystore units available\n");
9327+
9328+	/*
9329+	 * MX6 bootloader stores some stuff in unit 0, so let's
9330+	 * use 1 or above
9331+	 */
9332+	if (units < 2) {
9333+		dev_err(ksdev, "blkkey_ex: insufficient keystore units\n");
9334+		return -ENODEV;
9335+	}
9336+	unit = 1;
9337+
9338+	dev_info(ksdev, "blkkey_ex: %d keystore units available\n", units);
9339+
9340+	/* Initialize/Establish Keystore */
9341+	sm_establish_keystore(ksdev, unit);	/* Initalize store in #1 */
9342+
9343+	/*
9344+	 * Now let's set up buffers for blobs in DMA-able memory. All are
9345+	 * larger than need to be so that blob size can be seen.
9346+	 */
9347+	blob8 = kzalloc(128, GFP_KERNEL | GFP_DMA);
9348+	blob16 = kzalloc(128, GFP_KERNEL | GFP_DMA);
9349+	blob24 = kzalloc(128, GFP_KERNEL | GFP_DMA);
9350+	blob32 = kzalloc(128, GFP_KERNEL | GFP_DMA);
9351+
9352+	if ((blob8 == NULL) || (blob16 == NULL) || (blob24 == NULL) ||
9353+	    (blob32 == NULL)) {
9354+		rtnval = -ENOMEM;
9355+		dev_err(ksdev, "blkkey_ex: can't get blob buffers\n");
9356+		goto freemem;
9357+	}
9358+
9359+	/* Initialize clear keys with a known and recognizable pattern */
9360+	memcpy(clrkey8, clrkey, 8);
9361+	memcpy(clrkey16, clrkey, 16);
9362+	memcpy(clrkey24, clrkey, 24);
9363+	memcpy(clrkey32, clrkey, 32);
9364+
9365+	memset(blkkey8, 0, AES_BLOCK_PAD(8));
9366+	memset(blkkey16, 0, AES_BLOCK_PAD(16));
9367+	memset(blkkey24, 0, AES_BLOCK_PAD(24));
9368+	memset(blkkey32, 0, AES_BLOCK_PAD(32));
9369+
9370+	memset(rstkey8, 0, AES_BLOCK_PAD(8));
9371+	memset(rstkey16, 0, AES_BLOCK_PAD(16));
9372+	memset(rstkey24, 0, AES_BLOCK_PAD(24));
9373+	memset(rstkey32, 0, AES_BLOCK_PAD(32));
9374+
9375+	/*
9376+	 * Allocate keyslots. Since we're going to blacken keys in-place,
9377+	 * we want slots big enough to pad out to the next larger AES blocksize
9378+	 * so pad them out.
9379+	 */
9380+	rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(8),
9381+					&keyslot8);
9382+	if (rtnval)
9383+		goto freemem;
9384+
9385+	rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(16),
9386+					&keyslot16);
9387+	if (rtnval)
9388+		goto dealloc_slot8;
9389+
9390+	rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(24),
9391+					&keyslot24);
9392+	if (rtnval)
9393+		goto dealloc_slot16;
9394+
9395+	rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(32),
9396+					&keyslot32);
9397+	if (rtnval)
9398+		goto dealloc_slot24;
9399+
9400+
9401+	/* Now load clear key data into the newly allocated slots */
9402+	rtnval = sm_keystore_slot_load(ksdev, unit, keyslot8, clrkey8, 8);
9403+	if (rtnval)
9404+		goto dealloc;
9405+
9406+	rtnval = sm_keystore_slot_load(ksdev, unit, keyslot16, clrkey16, 16);
9407+	if (rtnval)
9408+		goto dealloc;
9409+
9410+	rtnval = sm_keystore_slot_load(ksdev, unit, keyslot24, clrkey24, 24);
9411+	if (rtnval)
9412+		goto dealloc;
9413+
9414+	rtnval = sm_keystore_slot_load(ksdev, unit, keyslot32, clrkey32, 32);
9415+	if (rtnval)
9416+		goto dealloc;
9417+
9418+	/*
9419+	 * All cleartext keys are loaded into slots (in an unprotected
9420+	 * partition at this time)
9421+	 *
9422+	 * Cover keys in-place
9423+	 */
9424+	rtnval = sm_keystore_cover_key(ksdev, unit, keyslot8, 8, KEY_COVER_ECB);
9425+	if (rtnval) {
9426+		dev_err(ksdev, "blkkey_ex: can't cover 64-bit key\n");
9427+		goto dealloc;
9428+	}
9429+
9430+	rtnval = sm_keystore_cover_key(ksdev, unit, keyslot16, 16,
9431+				       KEY_COVER_ECB);
9432+	if (rtnval) {
9433+		dev_err(ksdev, "blkkey_ex: can't cover 128-bit key\n");
9434+		goto dealloc;
9435+	}
9436+
9437+	rtnval = sm_keystore_cover_key(ksdev, unit, keyslot24, 24,
9438+				       KEY_COVER_ECB);
9439+	if (rtnval) {
9440+		dev_err(ksdev, "blkkey_ex: can't cover 192-bit key\n");
9441+		goto dealloc;
9442+	}
9443+
9444+	rtnval = sm_keystore_cover_key(ksdev, unit, keyslot32, 32,
9445+				       KEY_COVER_ECB);
9446+	if (rtnval) {
9447+		dev_err(ksdev, "blkkey_ex: can't cover 256-bit key\n");
9448+		goto dealloc;
9449+	}
9450+
9451+	/*
9452+	 * Keys should be covered and appear sufficiently "random"
9453+	 * as a result of the covering (blackening) process. Assuming
9454+	 * non-secure mode, read them back out for examination; they should
9455+	 * appear as random data, completely differing from the clear
9456+	 * inputs. So, this will read them back from secure memory and
9457+	 * compare them. If they match the clear key, then the covering
9458+	 * operation didn't occur.
9459+	 */
9460+
9461+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot8, AES_BLOCK_PAD(8),
9462+				       blkkey8);
9463+	if (rtnval) {
9464+		dev_err(ksdev, "blkkey_ex: can't read 64-bit black key\n");
9465+		goto dealloc;
9466+	}
9467+
9468+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot16,
9469+				       AES_BLOCK_PAD(16), blkkey16);
9470+	if (rtnval) {
9471+		dev_err(ksdev, "blkkey_ex: can't read 128-bit black key\n");
9472+		goto dealloc;
9473+	}
9474+
9475+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot24,
9476+				       AES_BLOCK_PAD(24), blkkey24);
9477+	if (rtnval) {
9478+		dev_err(ksdev, "blkkey_ex: can't read 192-bit black key\n");
9479+		goto dealloc;
9480+	}
9481+
9482+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot32,
9483+				       AES_BLOCK_PAD(32), blkkey32);
9484+	if (rtnval) {
9485+		dev_err(ksdev, "blkkey_ex: can't read 256-bit black key\n");
9486+		goto dealloc;
9487+	}
9488+
9489+	rtnval = -EINVAL;
9490+	if (!memcmp(blkkey8, clrkey8, 8)) {
9491+		dev_err(ksdev, "blkkey_ex: 64-bit key cover failed\n");
9492+		goto dealloc;
9493+	}
9494+
9495+	if (!memcmp(blkkey16, clrkey16, 16)) {
9496+		dev_err(ksdev, "blkkey_ex: 128-bit key cover failed\n");
9497+		goto dealloc;
9498+	}
9499+
9500+	if (!memcmp(blkkey24, clrkey24, 24)) {
9501+		dev_err(ksdev, "blkkey_ex: 192-bit key cover failed\n");
9502+		goto dealloc;
9503+	}
9504+
9505+	if (!memcmp(blkkey32, clrkey32, 32)) {
9506+		dev_err(ksdev, "blkkey_ex: 256-bit key cover failed\n");
9507+		goto dealloc;
9508+	}
9509+
9510+
9511+	key_display(ksdev, "64-bit clear key:", 8, clrkey8);
9512+	key_display(ksdev, "64-bit black key:", AES_BLOCK_PAD(8), blkkey8);
9513+
9514+	key_display(ksdev, "128-bit clear key:", 16, clrkey16);
9515+	key_display(ksdev, "128-bit black key:", AES_BLOCK_PAD(16), blkkey16);
9516+
9517+	key_display(ksdev, "192-bit clear key:", 24, clrkey24);
9518+	key_display(ksdev, "192-bit black key:", AES_BLOCK_PAD(24), blkkey24);
9519+
9520+	key_display(ksdev, "256-bit clear key:", 32, clrkey32);
9521+	key_display(ksdev, "256-bit black key:", AES_BLOCK_PAD(32), blkkey32);
9522+
9523+	/*
9524+	 * Now encapsulate all keys as SM blobs out to external memory
9525+	 * Blobs will appear as random-looking blocks of data different
9526+	 * from the original source key, and 48 bytes longer than the
9527+	 * original key, to account for the extra data encapsulated within.
9528+	 */
9529+	key_display(ksdev, "64-bit unwritten blob:", 96, blob8);
9530+	key_display(ksdev, "128-bit unwritten blob:", 96, blob16);
9531+	key_display(ksdev, "196-bit unwritten blob:", 96, blob24);
9532+	key_display(ksdev, "256-bit unwritten blob:", 96, blob32);
9533+
9534+	rtnval = sm_keystore_slot_export(ksdev, unit, keyslot8, BLACK_KEY,
9535+					 KEY_COVER_ECB, blob8, 8, skeymod);
9536+	if (rtnval) {
9537+		dev_err(ksdev, "blkkey_ex: can't encapsulate 64-bit key\n");
9538+		goto dealloc;
9539+	}
9540+
9541+	rtnval = sm_keystore_slot_export(ksdev, unit, keyslot16, BLACK_KEY,
9542+					 KEY_COVER_ECB, blob16, 16, skeymod);
9543+	if (rtnval) {
9544+		dev_err(ksdev, "blkkey_ex: can't encapsulate 128-bit key\n");
9545+		goto dealloc;
9546+	}
9547+
9548+	rtnval = sm_keystore_slot_export(ksdev, unit, keyslot24, BLACK_KEY,
9549+					 KEY_COVER_ECB, blob24, 24, skeymod);
9550+	if (rtnval) {
9551+		dev_err(ksdev, "blkkey_ex: can't encapsulate 192-bit key\n");
9552+		goto dealloc;
9553+	}
9554+
9555+	rtnval = sm_keystore_slot_export(ksdev, unit, keyslot32, BLACK_KEY,
9556+					 KEY_COVER_ECB, blob32, 32, skeymod);
9557+	if (rtnval) {
9558+		dev_err(ksdev, "blkkey_ex: can't encapsulate 256-bit key\n");
9559+		goto dealloc;
9560+	}
9561+
9562+	key_display(ksdev, "64-bit black key in blob:", 96, blob8);
9563+	key_display(ksdev, "128-bit black key in blob:", 96, blob16);
9564+	key_display(ksdev, "192-bit black key in blob:", 96, blob24);
9565+	key_display(ksdev, "256-bit black key in blob:", 96, blob32);
9566+
9567+	/*
9568+	 * Now re-import black keys from secure-memory blobs stored
9569+	 * in general memory from the previous operation. Since we are
9570+	 * working with black keys, and since power has not cycled, the
9571+	 * restored black keys should match the original blackened keys
9572+	 * (this would not be true if the blobs were save in some non-volatile
9573+	 * store, and power was cycled between the save and restore)
9574+	 */
9575+	rtnval = sm_keystore_slot_import(ksdev, unit, keyslot8, BLACK_KEY,
9576+					 KEY_COVER_ECB, blob8, 8, skeymod);
9577+	if (rtnval) {
9578+		dev_err(ksdev, "blkkey_ex: can't decapsulate 64-bit blob\n");
9579+		goto dealloc;
9580+	}
9581+
9582+	rtnval = sm_keystore_slot_import(ksdev, unit, keyslot16, BLACK_KEY,
9583+					 KEY_COVER_ECB, blob16, 16, skeymod);
9584+	if (rtnval) {
9585+		dev_err(ksdev, "blkkey_ex: can't decapsulate 128-bit blob\n");
9586+		goto dealloc;
9587+	}
9588+
9589+	rtnval = sm_keystore_slot_import(ksdev, unit, keyslot24, BLACK_KEY,
9590+					 KEY_COVER_ECB, blob24, 24, skeymod);
9591+	if (rtnval) {
9592+		dev_err(ksdev, "blkkey_ex: can't decapsulate 196-bit blob\n");
9593+		goto dealloc;
9594+	}
9595+
9596+	rtnval = sm_keystore_slot_import(ksdev, unit, keyslot32, BLACK_KEY,
9597+					 KEY_COVER_ECB, blob32, 32, skeymod);
9598+	if (rtnval) {
9599+		dev_err(ksdev, "blkkey_ex: can't decapsulate 256-bit blob\n");
9600+		goto dealloc;
9601+	}
9602+
9603+
9604+	/*
9605+	 * Blobs are now restored as black keys. Read those black keys back
9606+	 * for a comparison with the original black key, they should match
9607+	 */
9608+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot8, AES_BLOCK_PAD(8),
9609+				       rstkey8);
9610+	if (rtnval) {
9611+		dev_err(ksdev,
9612+			"blkkey_ex: can't read restored 64-bit black key\n");
9613+		goto dealloc;
9614+	}
9615+
9616+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot16,
9617+				       AES_BLOCK_PAD(16), rstkey16);
9618+	if (rtnval) {
9619+		dev_err(ksdev,
9620+			"blkkey_ex: can't read restored 128-bit black key\n");
9621+		goto dealloc;
9622+	}
9623+
9624+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot24,
9625+				       AES_BLOCK_PAD(24), rstkey24);
9626+	if (rtnval) {
9627+		dev_err(ksdev,
9628+			"blkkey_ex: can't read restored 196-bit black key\n");
9629+		goto dealloc;
9630+	}
9631+
9632+	rtnval = sm_keystore_slot_read(ksdev, unit, keyslot32,
9633+				       AES_BLOCK_PAD(32), rstkey32);
9634+	if (rtnval) {
9635+		dev_err(ksdev,
9636+			"blkkey_ex: can't read restored 256-bit black key\n");
9637+		goto dealloc;
9638+	}
9639+
9640+	key_display(ksdev, "restored 64-bit black key:", AES_BLOCK_PAD(8),
9641+		    rstkey8);
9642+	key_display(ksdev, "restored 128-bit black key:", AES_BLOCK_PAD(16),
9643+		    rstkey16);
9644+	key_display(ksdev, "restored 192-bit black key:", AES_BLOCK_PAD(24),
9645+		    rstkey24);
9646+	key_display(ksdev, "restored 256-bit black key:", AES_BLOCK_PAD(32),
9647+		    rstkey32);
9648+
9649+	/*
9650+	 * Compare the restored black keys with the original blackened keys
9651+	 * As long as we're operating within the same power cycle, a black key
9652+	 * restored from a blob should match the original black key IF the
9653+	 * key happens to be of a size that matches a multiple of the AES
9654+	 * blocksize. Any key that is padded to fill the block size will not
9655+	 * match, excepting a key that exceeds a block; only the first full
9656+	 * blocks will match (assuming ECB).
9657+	 *
9658+	 * Therefore, compare the 16 and 32 bit keys, they should match.
9659+	 * The 24 bit key can only match within the first 16 byte block.
9660+	 */
9661+
9662+	if (memcmp(rstkey16, blkkey16, AES_BLOCK_PAD(16))) {
9663+		dev_err(ksdev, "blkkey_ex: 128-bit restored key mismatch\n");
9664+		rtnval = -EINVAL;
9665+	}
9666+
9667+	/* Only first AES block will match, remainder subject to padding */
9668+	if (memcmp(rstkey24, blkkey24, 16)) {
9669+		dev_err(ksdev, "blkkey_ex: 192-bit restored key mismatch\n");
9670+		rtnval = -EINVAL;
9671+	}
9672+
9673+	if (memcmp(rstkey32, blkkey32, AES_BLOCK_PAD(32))) {
9674+		dev_err(ksdev, "blkkey_ex: 256-bit restored key mismatch\n");
9675+		rtnval = -EINVAL;
9676+	}
9677+
9678+
9679+	/* Remove keys from keystore */
9680+dealloc:
9681+	sm_keystore_slot_dealloc(ksdev, unit, keyslot32);
9682+dealloc_slot24:
9683+	sm_keystore_slot_dealloc(ksdev, unit, keyslot24);
9684+dealloc_slot16:
9685+	sm_keystore_slot_dealloc(ksdev, unit, keyslot16);
9686+dealloc_slot8:
9687+	sm_keystore_slot_dealloc(ksdev, unit, keyslot8);
9688+
9689+	/* Free resources */
9690+freemem:
9691+	kfree(blob8);
9692+	kfree(blob16);
9693+	kfree(blob24);
9694+	kfree(blob32);
9695+
9696+	/* Disconnect from keystore and leave */
9697+	sm_release_keystore(ksdev, unit);
9698+
9699+	return rtnval;
9700+}
9701+EXPORT_SYMBOL(caam_sm_example_init);
9702+
9703+void caam_sm_example_shutdown(void)
9704+{
9705+	/* unused in present version */
9706+	struct device_node *dev_node;
9707+	struct platform_device *pdev;
9708+
9709+	/*
9710+	 * Do of_find_compatible_node() then of_find_device_by_node()
9711+	 * once a functional device tree is available
9712+	 */
9713+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
9714+	if (!dev_node) {
9715+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
9716+		if (!dev_node)
9717+			return;
9718+	}
9719+
9720+	pdev = of_find_device_by_node(dev_node);
9721+	if (!pdev)
9722+		return;
9723+
9724+	of_node_get(dev_node);
9725+
9726+}
9727+
9728+static int __init caam_sm_test_init(void)
9729+{
9730+	struct device_node *dev_node;
9731+	struct platform_device *pdev;
9732+	struct caam_drv_private *priv;
9733+	int ret;
9734+
9735+	/*
9736+	 * Do of_find_compatible_node() then of_find_device_by_node()
9737+	 * once a functional device tree is available
9738+	 */
9739+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
9740+	if (!dev_node) {
9741+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
9742+		if (!dev_node)
9743+			return -ENODEV;
9744+	}
9745+
9746+	pdev = of_find_device_by_node(dev_node);
9747+	if (!pdev)
9748+		return -ENODEV;
9749+
9750+	of_node_put(dev_node);
9751+
9752+	priv = dev_get_drvdata(&pdev->dev);
9753+	if (!priv) {
9754+		dev_info(&pdev->dev, "SM driver not ready, aborting tests\n");
9755+		return -ENODEV;
9756+	}
9757+	if (!priv->sm_present) {
9758+		dev_info(&pdev->dev, "No SM support, skipping tests\n");
9759+		return -ENODEV;
9760+	}
9761+	if (!priv->smdev) {
9762+		dev_info(&pdev->dev, "SM not initialized (no job rings?) skipping tests\n");
9763+		return -ENODEV;
9764+	}
9765+
9766+	ret = caam_sm_example_init(pdev);
9767+	if (ret)
9768+		dev_err(&pdev->dev, "SM test failed: %d\n", ret);
9769+	else
9770+		dev_info(&pdev->dev, "SM test passed\n");
9771+
9772+	return ret;
9773+}
9774+
9775+
9776+/* Module-based initialization needs to wait for dev tree */
9777+#ifdef CONFIG_OF
9778+module_init(caam_sm_test_init);
9779+module_exit(caam_sm_example_shutdown);
9780+
9781+MODULE_LICENSE("Dual BSD/GPL");
9782+MODULE_DESCRIPTION("FSL CAAM Black Key Usage Example");
9783+MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
9784+#endif
9785diff --git a/drivers/crypto/caam/snvsregs.h b/drivers/crypto/caam/snvsregs.h
9786new file mode 100644
9787index 000000000..4a7e76933
9788--- /dev/null
9789+++ b/drivers/crypto/caam/snvsregs.h
9790@@ -0,0 +1,239 @@
9791+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
9792+/*
9793+ * SNVS hardware register-level view
9794+ *
9795+ * Copyright 2012-2015 Freescale Semiconductor, Inc.
9796+ * Copyright 2016-2019 NXP
9797+ */
9798+
9799+#ifndef SNVSREGS_H
9800+#define SNVSREGS_H
9801+
9802+#include <linux/types.h>
9803+#include <linux/io.h>
9804+
9805+/*
9806+ * SNVS High Power Domain
9807+ * Includes security violations, HA counter, RTC, alarm
9808+ */
9809+struct snvs_hp {
9810+	u32 lock;		/* HPLR - HP Lock */
9811+	u32 cmd;		/* HPCOMR - HP Command */
9812+	u32 ctl;		/* HPCR - HP Control */
9813+	u32 secvio_intcfg;	/* HPSICR - Security Violation Int Config */
9814+	u32 secvio_ctl;		/* HPSVCR - Security Violation Control */
9815+	u32 status;		/* HPSR - HP Status */
9816+	u32 secvio_status;	/* HPSVSR - Security Violation Status */
9817+	u32 ha_counteriv;	/* High Assurance Counter IV */
9818+	u32 ha_counter;		/* High Assurance Counter */
9819+	u32 rtc_msb;		/* Real Time Clock/Counter MSB */
9820+	u32 rtc_lsb;		/* Real Time Counter LSB */
9821+	u32 time_alarm_msb;	/* Time Alarm MSB */
9822+	u32 time_alarm_lsb;	/* Time Alarm LSB */
9823+};
9824+
9825+#define HP_LOCK_HAC_LCK		0x00040000
9826+#define HP_LOCK_HPSICR_LCK	0x00020000
9827+#define HP_LOCK_HPSVCR_LCK	0x00010000
9828+#define HP_LOCK_MKEYSEL_LCK	0x00000200
9829+#define HP_LOCK_TAMPCFG_LCK	0x00000100
9830+#define HP_LOCK_TAMPFLT_LCK	0x00000080
9831+#define HP_LOCK_SECVIO_LCK	0x00000040
9832+#define HP_LOCK_GENP_LCK	0x00000020
9833+#define HP_LOCK_MONOCTR_LCK	0x00000010
9834+#define HP_LOCK_CALIB_LCK	0x00000008
9835+#define HP_LOCK_SRTC_LCK	0x00000004
9836+#define HP_LOCK_ZMK_RD_LCK	0x00000002
9837+#define HP_LOCK_ZMK_WT_LCK	0x00000001
9838+
9839+#define HP_CMD_NONPRIV_AXS	0x80000000
9840+#define HP_CMD_HAC_STOP		0x00080000
9841+#define HP_CMD_HAC_CLEAR	0x00040000
9842+#define HP_CMD_HAC_LOAD		0x00020000
9843+#define HP_CMD_HAC_CFG_EN	0x00010000
9844+#define HP_CMD_SNVS_MSTR_KEY	0x00002000
9845+#define HP_CMD_PROG_ZMK		0x00001000
9846+#define HP_CMD_SW_LPSV		0x00000400
9847+#define HP_CMD_SW_FSV		0x00000200
9848+#define HP_CMD_SW_SV		0x00000100
9849+#define HP_CMD_LP_SWR_DIS	0x00000020
9850+#define HP_CMD_LP_SWR		0x00000010
9851+#define HP_CMD_SSM_SFNS_DIS	0x00000004
9852+#define HP_CMD_SSM_ST_DIS	0x00000002
9853+#define HP_CMD_SMM_ST		0x00000001
9854+
9855+#define HP_CTL_TIME_SYNC	0x00010000
9856+#define HP_CTL_CAL_VAL_SHIFT	10
9857+#define HP_CTL_CAL_VAL_MASK	(0x1f << HP_CTL_CALIB_SHIFT)
9858+#define HP_CTL_CALIB_EN		0x00000100
9859+#define HP_CTL_PI_FREQ_SHIFT	4
9860+#define HP_CTL_PI_FREQ_MASK	(0xf << HP_CTL_PI_FREQ_SHIFT)
9861+#define HP_CTL_PI_EN		0x00000008
9862+#define HP_CTL_TIMEALARM_EN	0x00000002
9863+#define HP_CTL_RTC_EN		0x00000001
9864+
9865+#define HP_SECVIO_INTEN_EN	0x10000000
9866+#define HP_SECVIO_INTEN_SRC5	0x00000020
9867+#define HP_SECVIO_INTEN_SRC4	0x00000010
9868+#define HP_SECVIO_INTEN_SRC3	0x00000008
9869+#define HP_SECVIO_INTEN_SRC2	0x00000004
9870+#define HP_SECVIO_INTEN_SRC1	0x00000002
9871+#define HP_SECVIO_INTEN_SRC0	0x00000001
9872+#define HP_SECVIO_INTEN_ALL	0x8000003f
9873+
9874+#define HP_SECVIO_ICTL_CFG_SHIFT	30
9875+#define HP_SECVIO_ICTL_CFG_MASK		(0x3 << HP_SECVIO_ICTL_CFG_SHIFT)
9876+#define HP_SECVIO_ICTL_CFG5_SHIFT	5
9877+#define HP_SECVIO_ICTL_CFG5_MASK	(0x3 << HP_SECVIO_ICTL_CFG5_SHIFT)
9878+#define HP_SECVIO_ICTL_CFG_DISABLE	0
9879+#define HP_SECVIO_ICTL_CFG_NONFATAL	1
9880+#define HP_SECVIO_ICTL_CFG_FATAL	2
9881+#define HP_SECVIO_ICTL_CFG4_FATAL	0x00000010
9882+#define HP_SECVIO_ICTL_CFG3_FATAL	0x00000008
9883+#define HP_SECVIO_ICTL_CFG2_FATAL	0x00000004
9884+#define HP_SECVIO_ICTL_CFG1_FATAL	0x00000002
9885+#define HP_SECVIO_ICTL_CFG0_FATAL	0x00000001
9886+
9887+#define HP_STATUS_ZMK_ZERO		0x80000000
9888+#define HP_STATUS_OTPMK_ZERO		0x08000000
9889+#define HP_STATUS_OTPMK_SYN_SHIFT	16
9890+#define HP_STATUS_OTPMK_SYN_MASK	(0x1ff << HP_STATUS_OTPMK_SYN_SHIFT)
9891+#define HP_STATUS_SSM_ST_SHIFT		8
9892+#define HP_STATUS_SSM_ST_MASK		(0xf << HP_STATUS_SSM_ST_SHIFT)
9893+#define HP_STATUS_SSM_ST_INIT		0
9894+#define HP_STATUS_SSM_ST_HARDFAIL	1
9895+#define HP_STATUS_SSM_ST_SOFTFAIL	3
9896+#define HP_STATUS_SSM_ST_INITINT	8
9897+#define HP_STATUS_SSM_ST_CHECK		9
9898+#define HP_STATUS_SSM_ST_NONSECURE	11
9899+#define HP_STATUS_SSM_ST_TRUSTED	13
9900+#define HP_STATUS_SSM_ST_SECURE		15
9901+
9902+#define HP_SECVIOST_ZMK_ECC_FAIL	0x08000000	/* write to clear */
9903+#define HP_SECVIOST_ZMK_SYN_SHIFT	16
9904+#define HP_SECVIOST_ZMK_SYN_MASK	(0x1ff << HP_SECVIOST_ZMK_SYN_SHIFT)
9905+#define HP_SECVIOST_SECVIO5		0x00000020
9906+#define HP_SECVIOST_SECVIO4		0x00000010
9907+#define HP_SECVIOST_SECVIO3		0x00000008
9908+#define HP_SECVIOST_SECVIO2		0x00000004
9909+#define HP_SECVIOST_SECVIO1		0x00000002
9910+#define HP_SECVIOST_SECVIO0		0x00000001
9911+#define HP_SECVIOST_SECVIOMASK		0x0000003f
9912+
9913+/*
9914+ * SNVS Low Power Domain
9915+ * Includes glitch detector, SRTC, alarm, monotonic counter, ZMK
9916+ */
9917+struct snvs_lp {
9918+	u32 lock;
9919+	u32 ctl;
9920+	u32 mstr_key_ctl;	/* Master Key Control */
9921+	u32 secvio_ctl;		/* Security Violation Control */
9922+	u32 tamper_filt_cfg;	/* Tamper Glitch Filters Configuration */
9923+	u32 tamper_det_cfg;	/* Tamper Detectors Configuration */
9924+	u32 status;
9925+	u32 srtc_msb;		/* Secure Real Time Clock/Counter MSB */
9926+	u32 srtc_lsb;		/* Secure Real Time Clock/Counter LSB */
9927+	u32 time_alarm;		/* Time Alarm */
9928+	u32 smc_msb;		/* Secure Monotonic Counter MSB */
9929+	u32 smc_lsb;		/* Secure Monotonic Counter LSB */
9930+	u32 pwr_glitch_det;	/* Power Glitch Detector */
9931+	u32 gen_purpose;
9932+	u32 zmk[8];		/* Zeroizable Master Key */
9933+};
9934+
9935+#define LP_LOCK_MKEYSEL_LCK	0x00000200
9936+#define LP_LOCK_TAMPDET_LCK	0x00000100
9937+#define LP_LOCK_TAMPFLT_LCK	0x00000080
9938+#define LP_LOCK_SECVIO_LCK	0x00000040
9939+#define LP_LOCK_GENP_LCK	0x00000020
9940+#define LP_LOCK_MONOCTR_LCK	0x00000010
9941+#define LP_LOCK_CALIB_LCK	0x00000008
9942+#define LP_LOCK_SRTC_LCK	0x00000004
9943+#define LP_LOCK_ZMK_RD_LCK	0x00000002
9944+#define LP_LOCK_ZMK_WT_LCK	0x00000001
9945+
9946+#define LP_CTL_CAL_VAL_SHIFT	10
9947+#define LP_CTL_CAL_VAL_MASK	(0x1f << LP_CTL_CAL_VAL_SHIFT)
9948+#define LP_CTL_CALIB_EN		0x00000100
9949+#define LP_CTL_SRTC_INVAL_EN	0x00000010
9950+#define LP_CTL_WAKE_INT_EN	0x00000008
9951+#define LP_CTL_MONOCTR_EN	0x00000004
9952+#define LP_CTL_TIMEALARM_EN	0x00000002
9953+#define LP_CTL_SRTC_EN		0x00000001
9954+
9955+#define LP_MKEYCTL_ZMKECC_SHIFT	8
9956+#define LP_MKEYCTL_ZMKECC_MASK	(0xff << LP_MKEYCTL_ZMKECC_SHIFT)
9957+#define LP_MKEYCTL_ZMKECC_EN	0x00000010
9958+#define LP_MKEYCTL_ZMKECC_VAL	0x00000008
9959+#define LP_MKEYCTL_ZMKECC_PROG	0x00000004
9960+#define LP_MKEYCTL_MKSEL_SHIFT	0
9961+#define LP_MKEYCTL_MKSEL_MASK	(3 << LP_MKEYCTL_MKSEL_SHIFT)
9962+#define LP_MKEYCTL_MK_OTP	0
9963+#define LP_MKEYCTL_MK_ZMK	2
9964+#define LP_MKEYCTL_MK_COMB	3
9965+
9966+#define LP_SECVIO_CTL_SRC5	0x20
9967+#define LP_SECVIO_CTL_SRC4	0x10
9968+#define LP_SECVIO_CTL_SRC3	0x08
9969+#define LP_SECVIO_CTL_SRC2	0x04
9970+#define LP_SECVIO_CTL_SRC1	0x02
9971+#define LP_SECVIO_CTL_SRC0	0x01
9972+
9973+#define LP_TAMPFILT_EXT2_EN	0x80000000
9974+#define LP_TAMPFILT_EXT2_SHIFT	24
9975+#define LP_TAMPFILT_EXT2_MASK	(0x1f << LP_TAMPFILT_EXT2_SHIFT)
9976+#define LP_TAMPFILT_EXT1_EN	0x00800000
9977+#define LP_TAMPFILT_EXT1_SHIFT	16
9978+#define LP_TAMPFILT_EXT1_MASK	(0x1f << LP_TAMPFILT_EXT1_SHIFT)
9979+#define LP_TAMPFILT_WM_EN	0x00000080
9980+#define LP_TAMPFILT_WM_SHIFT	0
9981+#define LP_TAMPFILT_WM_MASK	(0x1f << LP_TAMPFILT_WM_SHIFT)
9982+
9983+#define LP_TAMPDET_OSC_BPS	0x10000000
9984+#define LP_TAMPDET_VRC_SHIFT	24
9985+#define LP_TAMPDET_VRC_MASK	(3 << LP_TAMPFILT_VRC_SHIFT)
9986+#define LP_TAMPDET_HTDC_SHIFT	20
9987+#define LP_TAMPDET_HTDC_MASK	(3 << LP_TAMPFILT_HTDC_SHIFT)
9988+#define LP_TAMPDET_LTDC_SHIFT	16
9989+#define LP_TAMPDET_LTDC_MASK	(3 << LP_TAMPFILT_LTDC_SHIFT)
9990+#define LP_TAMPDET_POR_OBS	0x00008000
9991+#define LP_TAMPDET_PFD_OBS	0x00004000
9992+#define LP_TAMPDET_ET2_EN	0x00000400
9993+#define LP_TAMPDET_ET1_EN	0x00000200
9994+#define LP_TAMPDET_WMT2_EN	0x00000100
9995+#define LP_TAMPDET_WMT1_EN	0x00000080
9996+#define LP_TAMPDET_VT_EN	0x00000040
9997+#define LP_TAMPDET_TT_EN	0x00000020
9998+#define LP_TAMPDET_CT_EN	0x00000010
9999+#define LP_TAMPDET_MCR_EN	0x00000004
10000+#define LP_TAMPDET_SRTCR_EN	0x00000002
10001+
10002+#define LP_STATUS_SECURE
10003+#define LP_STATUS_NONSECURE
10004+#define LP_STATUS_SCANEXIT	0x00100000	/* all write 1 clear here on */
10005+#define LP_STATUS_EXT_SECVIO	0x00010000
10006+#define LP_STATUS_ET2		0x00000400
10007+#define LP_STATUS_ET1		0x00000200
10008+#define LP_STATUS_WMT2		0x00000100
10009+#define LP_STATUS_WMT1		0x00000080
10010+#define LP_STATUS_VTD		0x00000040
10011+#define LP_STATUS_TTD		0x00000020
10012+#define LP_STATUS_CTD		0x00000010
10013+#define LP_STATUS_PGD		0x00000008
10014+#define LP_STATUS_MCR		0x00000004
10015+#define LP_STATUS_SRTCR		0x00000002
10016+#define LP_STATUS_LPTA		0x00000001
10017+
10018+/* Full SNVS register page, including version/options */
10019+struct snvs_full {
10020+	struct snvs_hp hp;
10021+	struct snvs_lp lp;
10022+	u32 rsvd[731];		/* deadspace 0x08c-0xbf7 */
10023+
10024+	/* Version / Revision / Option ID space - end of register page */
10025+	u32 vid;		/* 0xbf8 HP Version ID (VID 1) */
10026+	u32 opt_rev;		/* 0xbfc HP Options / Revision (VID 2) */
10027+};
10028+
10029+#endif /* SNVSREGS_H */
10030diff --git a/drivers/crypto/caam/tag_object.c b/drivers/crypto/caam/tag_object.c
10031new file mode 100644
10032index 000000000..53f70129e
10033--- /dev/null
10034+++ b/drivers/crypto/caam/tag_object.c
10035@@ -0,0 +1,164 @@
10036+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
10037+/*
10038+ * Copyright 2018-2020 NXP
10039+ */
10040+
10041+#include <linux/export.h>
10042+#include <linux/string.h>
10043+#include <linux/errno.h>
10044+
10045+#include "tag_object.h"
10046+#include "desc.h"
10047+
10048+/**
10049+ * is_key_type -	Check if the object is a key
10050+ *
10051+ * @type:		The object type
10052+ *
10053+ * Return:		True if the object is a key (of black or red color),
10054+ *			false otherwise
10055+ */
10056+bool is_key_type(u32 type)
10057+{
10058+	/* Check type bitfield from object type */
10059+	return ((type >> TAG_OBJ_TYPE_OFFSET) & TAG_OBJ_TYPE_MASK) == 0;
10060+}
10061+EXPORT_SYMBOL(is_key_type);
10062+
10063+/**
10064+ * is_trusted_type -	Check if the object is a trusted key
10065+ *			Trusted Descriptor Key Encryption Key (TDKEK)
10066+ *
10067+ * @type:		The object type
10068+ *
10069+ * Return:		True if the object is a trusted key,
10070+ *			false otherwise
10071+ */
10072+bool is_trusted_type(u32 type)
10073+{
10074+	/* Check type bitfield from object type */
10075+	return ((type >> TAG_OBJ_TK_OFFSET) & TAG_OBJ_TK_MASK) == 1;
10076+}
10077+EXPORT_SYMBOL(is_trusted_type);
10078+
10079+/**
10080+ * is_black_key -	Check if the tag object header is a black key
10081+ * @header:		The tag object header configuration
10082+ *
10083+ * Return:		True if is a black key, false otherwise
10084+ */
10085+bool is_black_key(const struct header_conf *header)
10086+{
10087+	u32 type = header->type;
10088+	/* Check type and color bitfields from tag object type */
10089+	return (type & (BIT(TAG_OBJ_COLOR_OFFSET) |
10090+			BIT(TAG_OBJ_TYPE_OFFSET))) == BIT(TAG_OBJ_COLOR_OFFSET);
10091+}
10092+EXPORT_SYMBOL(is_black_key);
10093+
10094+/**
10095+ * is_valid_header_conf - Check if the header configuration is valid
10096+ * @header:		The header configuration
10097+ *
10098+ * Return:		True if the header of the tag object configuration,
10099+ *			has the TAG_OBJECT_MAGIC number and a valid type,
10100+ *			false otherwise
10101+ */
10102+bool is_valid_header_conf(const struct header_conf *header)
10103+{
10104+	return (header->_magic_number == TAG_OBJECT_MAGIC);
10105+}
10106+EXPORT_SYMBOL(is_valid_header_conf);
10107+
10108+/**
10109+ * get_key_conf -	Retrieve the key configuration,
10110+ *			meaning the length of the black key and
10111+ *			the KEY command parameters needed for CAAM
10112+ * @header:		The tag object header configuration
10113+ * @red_key_len:	Red key length
10114+ * @obj_len:		Black/Red key/blob length
10115+ * @load_param:		Load parameters for KEY command:
10116+ *			- indicator for encrypted keys: plaintext or black
10117+ *			- indicator for encryption mode: AES-ECB or AES-CCM
10118+ *			- indicator for encryption keys: JDKEK or TDKEK
10119+ */
10120+void get_key_conf(const struct header_conf *header,
10121+		  u32 *red_key_len, u32 *obj_len, u32 *load_param)
10122+{
10123+	*red_key_len = header->red_key_len;
10124+	*obj_len = header->obj_len;
10125+	/* Based on the color of the key, set key encryption bit (ENC) */
10126+	*load_param = ((header->type >> TAG_OBJ_COLOR_OFFSET) &
10127+		       TAG_OBJ_COLOR_MASK) << KEY_ENC_OFFSET;
10128+	/*
10129+	 * For red keys, the TK and EKT bits are ignored.
10130+	 * So we set them anyway, to be valid when the key is black.
10131+	 */
10132+	*load_param |= ((header->type >> TAG_OBJ_TK_OFFSET) &
10133+			 TAG_OBJ_TK_MASK) << KEY_TK_OFFSET;
10134+	*load_param |= ((header->type >> TAG_OBJ_EKT_OFFSET) &
10135+			 TAG_OBJ_EKT_MASK) << KEY_EKT_OFFSET;
10136+}
10137+EXPORT_SYMBOL(get_key_conf);
10138+
10139+/**
10140+ * init_tag_object_header - Initialize the tag object header by setting up
10141+ *			the TAG_OBJECT_MAGIC number, tag object version,
10142+ *			a valid type and the object's length
10143+ * @header:		The header configuration to initialize
10144+ * @version:		The tag object version
10145+ * @type:		The tag object type
10146+ * @red_key_len:	The red key length
10147+ * @obj_len:		The object (actual data) length
10148+ */
10149+void init_tag_object_header(struct header_conf *header, u32 version,
10150+			    u32 type, size_t red_key_len, size_t obj_len)
10151+{
10152+	header->_magic_number = TAG_OBJECT_MAGIC;
10153+	header->version = version;
10154+	header->type = type;
10155+	header->red_key_len = red_key_len;
10156+	header->obj_len = obj_len;
10157+}
10158+EXPORT_SYMBOL(init_tag_object_header);
10159+
10160+/**
10161+ * set_tag_object_header_conf - Set tag object header configuration
10162+ * @header:			The tag object header configuration to set
10163+ * @buffer:			The buffer needed to be tagged
10164+ * @buf_size:			The buffer size
10165+ * @tag_obj_size:		The tagged object size
10166+ *
10167+ * Return:			'0' on success, error code otherwise
10168+ */
10169+int set_tag_object_header_conf(const struct header_conf *header,
10170+			       void *buffer, size_t buf_size, u32 *tag_obj_size)
10171+{
10172+	/* Retrieve the tag object */
10173+	struct tagged_object *tag_obj = (struct tagged_object *)buffer;
10174+	/*
10175+	 * Requested size for the tagged object is the buffer size
10176+	 * and the header configuration size (TAG_OVERHEAD_SIZE)
10177+	 */
10178+	size_t req_size = buf_size + TAG_OVERHEAD_SIZE;
10179+
10180+	/*
10181+	 * Check if the configuration can be set,
10182+	 * based on the size of the tagged object
10183+	 */
10184+	if (*tag_obj_size < req_size)
10185+		return -EINVAL;
10186+
10187+	/*
10188+	 * Buffers might overlap, use memmove to
10189+	 * copy the buffer into the tagged object
10190+	 */
10191+	memmove(&tag_obj->object, buffer, buf_size);
10192+	/* Copy the tag object header configuration into the tagged object */
10193+	memcpy(&tag_obj->header, header, TAG_OVERHEAD_SIZE);
10194+	/* Set tagged object size */
10195+	*tag_obj_size = req_size;
10196+
10197+	return 0;
10198+}
10199+EXPORT_SYMBOL(set_tag_object_header_conf);
10200diff --git a/drivers/crypto/caam/tag_object.h b/drivers/crypto/caam/tag_object.h
10201new file mode 100644
10202index 000000000..6c840c30c
10203--- /dev/null
10204+++ b/drivers/crypto/caam/tag_object.h
10205@@ -0,0 +1,111 @@
10206+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
10207+/*
10208+ * Copyright 2018-2020 NXP
10209+ */
10210+
10211+#ifndef _TAG_OBJECT_H_
10212+#define _TAG_OBJECT_H_
10213+
10214+#include <linux/types.h>
10215+#include <linux/bitops.h>
10216+
10217+/**
10218+ * Magic number to identify the tag object structure
10219+ * 0x54 = 'T'
10220+ * 0x61 = 'a'
10221+ * 0x67 = 'g'
10222+ * 0x4f = 'O'
10223+ */
10224+#define TAG_OBJECT_MAGIC	0x5461674f
10225+#define TAG_OVERHEAD_SIZE	sizeof(struct header_conf)
10226+#define MIN_KEY_SIZE		16
10227+#define TAG_MIN_SIZE		(MIN_KEY_SIZE + TAG_OVERHEAD_SIZE)
10228+/*
10229+ * Tag object type is a bitfield:
10230+ *
10231+ * EKT:	Encrypted Key Type (AES-ECB or AES-CCM)
10232+ * TK:	Trusted Key (use Job Descriptor Key Encryption Key (JDKEK)
10233+ *	or Trusted Descriptor Key Encryption Key (TDKEK) to
10234+ *	decrypt the key to be loaded into a Key Register).
10235+ *
10236+ *| Denomination | Security state | Memory  | EKT | TK    | Type | Color |
10237+ *| ------------ | -------------- | ------- | --- | ----- | ---- | ----- |
10238+ *| bit(s)       | 5-6            | 4       | 3   | 2     | 1    | 0     |
10239+ *| option 0     | non-secure     | general | ECB | JDKEK | key  | red   |
10240+ *| option 1     | secure         | secure  | CCM | TDKEK | blob | black |
10241+ *| option 2     | trusted        |         |     |       |      |       |
10242+ *
10243+ * CAAM supports two different Black Key encapsulation schemes,
10244+ * one intended for quick decryption (uses AES-ECB encryption),
10245+ * and another intended for high assurance (uses AES-CCM encryption).
10246+ *
10247+ * CAAM implements both Trusted and normal (non-Trusted) Black Keys,
10248+ * which are encrypted with different key-encryption keys.
10249+ * Both Trusted and normal Descriptors are allowed to encrypt or decrypt
10250+ * normal Black Keys, but only Trusted Descriptors are allowed to
10251+ * encrypt or decrypt Trusted Black Keys.
10252+ */
10253+#define TAG_OBJ_COLOR_OFFSET		0
10254+#define TAG_OBJ_COLOR_MASK		0x1
10255+#define TAG_OBJ_TYPE_OFFSET		1
10256+#define TAG_OBJ_TYPE_MASK		0x1
10257+#define TAG_OBJ_TK_OFFSET		2
10258+#define TAG_OBJ_TK_MASK			0x1
10259+#define TAG_OBJ_EKT_OFFSET		3
10260+#define TAG_OBJ_EKT_MASK		0x1
10261+#define TAG_OBJ_MEM_OFFSET		4
10262+#define TAG_OBJ_MEM_MASK		0x1
10263+#define TAG_OBJ_SEC_STATE_OFFSET	5
10264+
10265+/**
10266+ * struct header_conf - Header configuration structure, which represents
10267+ *			the metadata (or simply a header) applied to the
10268+ *			actual data (e.g. black key)
10269+ * @_magic_number     : A magic number to identify the structure
10270+ * @version           : The version of the data contained (e.g. tag object)
10271+ * @type              : The type of data contained (e.g. black key, blob, etc.)
10272+ * @red_key_len       : Length of the red key to be loaded by CAAM (for key
10273+ *                      generation or blob encapsulation)
10274+ * @obj_len           : The total length of the (black/red) object (key/blob),
10275+ *                      after encryption/encapsulation
10276+ */
10277+struct header_conf {
10278+	u32 _magic_number;
10279+	u32 version;
10280+	u32 type;
10281+	u32 red_key_len;
10282+	u32 obj_len;
10283+};
10284+
10285+/**
10286+ * struct tagged_object - Tag object structure, which represents the metadata
10287+ *                        (or simply a header) and the actual data
10288+ *                        (e.g. black key) obtained from hardware
10289+ * @tag                 : The configuration of the data (e.g. header)
10290+ * @object              : The actual data (e.g. black key)
10291+ */
10292+struct tagged_object {
10293+	struct header_conf header;
10294+	char object;
10295+};
10296+
10297+bool is_key_type(u32 type);
10298+
10299+bool is_trusted_type(u32 type);
10300+
10301+bool is_black_key(const struct header_conf * const header);
10302+
10303+bool is_black_key(const struct header_conf * const header);
10304+
10305+bool is_valid_header_conf(const struct header_conf *header);
10306+
10307+void get_key_conf(const struct header_conf *header,
10308+		  u32 *red_key_len, u32 *obj_len, u32 *load_param);
10309+
10310+void init_tag_object_header(struct header_conf *header, u32 version,
10311+			    u32 type, size_t red_key_len, size_t obj_len);
10312+
10313+int set_tag_object_header_conf(const struct header_conf *header,
10314+			       void *buffer, size_t obj_size, u32 *to_size);
10315+
10316+#endif /* _TAG_OBJECT_H_ */
10317diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
10318index 5edc91cdb..37266abbd 100644
10319--- a/drivers/crypto/mxs-dcp.c
10320+++ b/drivers/crypto/mxs-dcp.c
10321@@ -16,6 +16,10 @@
10322 #include <linux/stmp_device.h>
10323 #include <linux/clk.h>
10324
10325+#ifdef CONFIG_PM_SLEEP
10326+#include <linux/freezer.h>
10327+#endif
10328+
10329 #include <crypto/aes.h>
10330 #include <crypto/sha.h>
10331 #include <crypto/internal/hash.h>
10332@@ -123,7 +127,10 @@ struct dcp_export_state {
10333  * design of Linux Crypto API.
10334  */
10335 static struct dcp *global_sdcp;
10336-
10337+#ifdef CONFIG_PM_SLEEP
10338+static uint32_t ctrl_bak;
10339+static int dcp_vmi_irq_bak, dcp_irq_bak;
10340+#endif
10341 /* DCP register layout. */
10342 #define MXS_DCP_CTRL				0x00
10343 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
10344@@ -316,6 +323,9 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
10345 	int init = 0;
10346 	bool limit_hit = false;
10347
10348+	if (!req->cryptlen)
10349+		return 0;
10350+
10351 	actx->fill = 0;
10352
10353 	/* Copy the key from the temporary location. */
10354@@ -396,9 +406,15 @@ static int dcp_chan_thread_aes(void *data)
10355
10356 	int ret;
10357
10358+#ifdef CONFIG_PM_SLEEP
10359+	set_freezable();
10360+#endif
10361 	while (!kthread_should_stop()) {
10362 		set_current_state(TASK_INTERRUPTIBLE);
10363
10364+#ifdef CONFIG_PM_SLEEP
10365+		try_to_freeze();
10366+#endif
10367 		spin_lock(&sdcp->lock[chan]);
10368 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
10369 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
10370@@ -436,6 +452,10 @@ static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
10371 	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
10372 				   req->cryptlen, req->iv);
10373
10374+#ifdef CONFIG_PM_SLEEP
10375+set_freezable();
10376+try_to_freeze();
10377+#endif
10378 	if (enc)
10379 		ret = crypto_skcipher_encrypt(&rctx->fallback_req);
10380 	else
10381@@ -692,9 +712,15 @@ static int dcp_chan_thread_sha(void *data)
10382 	struct crypto_async_request *arq;
10383 	int ret;
10384
10385+#ifdef CONFIG_PM_SLEEP
10386+	set_freezable();
10387+#endif
10388 	while (!kthread_should_stop()) {
10389 		set_current_state(TASK_INTERRUPTIBLE);
10390
10391+#ifdef CONFIG_PM_SLEEP
10392+		try_to_freeze();
10393+#endif
10394 		spin_lock(&sdcp->lock[chan]);
10395 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
10396 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
10397@@ -967,6 +993,49 @@ static irqreturn_t mxs_dcp_irq(int irq, void *context)
10398 	return IRQ_HANDLED;
10399 }
10400
10401+#ifdef CONFIG_PM_SLEEP
10402+static int mxs_dcp_resume(struct device *dev)
10403+{
10404+	struct dcp *sdcp = global_sdcp;
10405+	int ret;
10406+
10407+	/* Restart the DCP block */
10408+	ret = stmp_reset_block(sdcp->base);
10409+	if (ret) {
10410+		dev_err(dev, "Failed reset\n");
10411+		clk_disable_unprepare(sdcp->dcp_clk);
10412+		return ret;
10413+	}
10414+
10415+	/* Restore control register */
10416+	writel(ctrl_bak, sdcp->base + MXS_DCP_CTRL);
10417+	/* Enable all DCP DMA channels */
10418+	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
10419+	       sdcp->base + MXS_DCP_CHANNELCTRL);
10420+
10421+	/* Re-enable DCP interrupts */
10422+	enable_irq(dcp_irq_bak);
10423+	enable_irq(dcp_vmi_irq_bak);
10424+
10425+	return 0;
10426+}
10427+
10428+static int mxs_dcp_suspend(struct device *dev)
10429+{
10430+	struct dcp *sdcp = global_sdcp;
10431+
10432+	/* Backup control register */
10433+	ctrl_bak = readl(sdcp->base + MXS_DCP_CTRL);
10434+	/* Temporarily disable DCP interrupts */
10435+	disable_irq(dcp_irq_bak);
10436+	disable_irq(dcp_vmi_irq_bak);
10437+
10438+	return 0;
10439+}
10440+
10441+SIMPLE_DEV_PM_OPS(mxs_dcp_pm_ops, mxs_dcp_suspend, mxs_dcp_resume);
10442+#endif
10443+
10444 static int mxs_dcp_probe(struct platform_device *pdev)
10445 {
10446 	struct device *dev = &pdev->dev;
10447@@ -986,7 +1055,10 @@ static int mxs_dcp_probe(struct platform_device *pdev)
10448 	dcp_irq = platform_get_irq(pdev, 1);
10449 	if (dcp_irq < 0)
10450 		return dcp_irq;
10451-
10452+#ifdef CONFIG_PM_SLEEP
10453+	dcp_vmi_irq_bak = dcp_vmi_irq;
10454+	dcp_irq_bak = dcp_irq;
10455+#endif
10456 	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
10457 	if (!sdcp)
10458 		return -ENOMEM;
10459@@ -1178,6 +1250,9 @@ static struct platform_driver mxs_dcp_driver = {
10460 	.driver	= {
10461 		.name		= "mxs-dcp",
10462 		.of_match_table	= mxs_dcp_dt_ids,
10463+#ifdef CONFIG_PM_SLEEP
10464+		.pm = &mxs_dcp_pm_ops
10465+#endif
10466 	},
10467 };
10468
10469