• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1commit c2145621e6d02757694378a18bdeebe1a5afd5e6
2Author: zhaoxc0502 <zhaoxc0502@thundersoft.com>
3Date:   Thu Jun 16 17:19:24 2022 +0800
4
5    linux_drivers_firmware
6
7    Change-Id: I36b7f7c1c8a25ed6657505008ad1251ef7273f99
8
9diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
10index 34b7ae798..9c1f06759 100644
11--- a/drivers/firmware/arm_scmi/common.h
12+++ b/drivers/firmware/arm_scmi/common.h
13@@ -169,6 +169,7 @@ DECLARE_SCMI_REGISTER_UNREGISTER(perf);
14 DECLARE_SCMI_REGISTER_UNREGISTER(power);
15 DECLARE_SCMI_REGISTER_UNREGISTER(reset);
16 DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
17+DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
18 DECLARE_SCMI_REGISTER_UNREGISTER(system);
19
20 #define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(id, name) \
21diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
22index 82a82a5dc..fcbe2677f 100644
23--- a/drivers/firmware/arm_scmi/smc.c
24+++ b/drivers/firmware/arm_scmi/smc.c
25@@ -9,9 +9,11 @@
26 #include <linux/arm-smccc.h>
27 #include <linux/device.h>
28 #include <linux/err.h>
29+#include <linux/interrupt.h>
30 #include <linux/mutex.h>
31 #include <linux/of.h>
32 #include <linux/of_address.h>
33+#include <linux/of_irq.h>
34 #include <linux/slab.h>
35
36 #include "common.h"
37@@ -23,6 +25,8 @@
38  * @shmem: Transmit/Receive shared memory area
39  * @shmem_lock: Lock to protect access to Tx/Rx shared memory area
40  * @func_id: smc/hvc call function id
41+ * @irq: Optional; employed when platforms indicates msg completion by intr.
42+ * @tx_complete: Optional, employed only when irq is valid.
43  */
44
45 struct scmi_smc {
46@@ -30,8 +34,19 @@ struct scmi_smc {
47 	struct scmi_shared_mem __iomem *shmem;
48 	struct mutex shmem_lock;
49 	u32 func_id;
50+	int irq;
51+	struct completion tx_complete;
52 };
53
54+static irqreturn_t smc_msg_done_isr(int irq, void *data)
55+{
56+	struct scmi_smc *scmi_info = data;
57+
58+	complete(&scmi_info->tx_complete);
59+
60+	return IRQ_HANDLED;
61+}
62+
63 static bool smc_chan_available(struct device *dev, int idx)
64 {
65 	struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
66@@ -51,7 +66,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
67 	struct resource res;
68 	struct device_node *np;
69 	u32 func_id;
70-	int ret;
71+	int ret, irq;
72
73 	if (!tx)
74 		return -ENODEV;
75@@ -79,6 +94,24 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
76 	if (ret < 0)
77 		return ret;
78
79+	/*
80+	 * If there is an interrupt named "a2p", then the service and
81+	 * completion of a message is signaled by an interrupt rather than by
82+	 * the return of the SMC call.
83+	 */
84+	irq = of_irq_get_byname(cdev->of_node, "a2p");
85+	if (irq > 0) {
86+		ret = devm_request_irq(dev, irq, smc_msg_done_isr,
87+				       IRQF_NO_SUSPEND,
88+				       dev_name(dev), scmi_info);
89+		if (ret) {
90+			dev_err(dev, "failed to setup SCMI smc irq\n");
91+			return ret;
92+		}
93+		init_completion(&scmi_info->tx_complete);
94+		scmi_info->irq = irq;
95+	}
96+
97 	scmi_info->func_id = func_id;
98 	scmi_info->cinfo = cinfo;
99 	mutex_init(&scmi_info->shmem_lock);
100@@ -110,7 +143,14 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
101
102 	shmem_tx_prepare(scmi_info->shmem, xfer);
103
104+	if (scmi_info->irq)
105+		reinit_completion(&scmi_info->tx_complete);
106+
107 	arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
108+
109+	if (scmi_info->irq)
110+		wait_for_completion(&scmi_info->tx_complete);
111+
112 	scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
113
114 	mutex_unlock(&scmi_info->shmem_lock);
115diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
116index c027d99f2..d25cc43b6 100644
117--- a/drivers/firmware/imx/Kconfig
118+++ b/drivers/firmware/imx/Kconfig
119@@ -28,3 +28,25 @@ config IMX_SCU_PD
120 	depends on IMX_SCU
121 	help
122 	  The System Controller Firmware (SCFW) based power domain driver.
123+
124+config IMX_SECO_MU
125+	tristate "i.MX Security Controller (SECO) support"
126+	depends on IMX_MBOX
127+	default y if IMX_SCU
128+
129+	help
130+	  It is possible to use APIs exposed by the SECO like HSM and SHE using the
131+	  SAB protocol via the shared Messaging Unit. This driver exposes these
132+	  interfaces via a set of file descriptors allowing to configure shared
133+	  memory, send and receive messages.
134+
135+config IMX_SENTNL_MU
136+	tristate "i.MX Embedded Security Element (SENTINEL) support."
137+	depends on IMX_MBOX
138+	default y if ARM64
139+
140+	help
141+	  It is possible to use APIs exposed by the SENTINEL like base, HSM & SHE
142+	  using the SAB protocol via the shared Messaging Unit. This driver exposes
143+	  these interfaces via a set of file descriptors allowing to configure shared
144+	  memory, send and receive messages.
145diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
146index b76acbade..e52598bfc 100644
147--- a/drivers/firmware/imx/Makefile
148+++ b/drivers/firmware/imx/Makefile
149@@ -1,4 +1,6 @@
150 # SPDX-License-Identifier: GPL-2.0
151 obj-$(CONFIG_IMX_DSP)		+= imx-dsp.o
152-obj-$(CONFIG_IMX_SCU)		+= imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
153+obj-$(CONFIG_IMX_SCU)		+= imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o seco.o
154 obj-$(CONFIG_IMX_SCU_PD)	+= scu-pd.o
155+obj-${CONFIG_IMX_SECO_MU}	+= seco_mu.o
156+obj-${CONFIG_IMX_SENTNL_MU}	+= sentnl_mu.o sentnl_base_msg.o
157diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
158index 4265e9dbe..a6c06d747 100644
159--- a/drivers/firmware/imx/imx-dsp.c
160+++ b/drivers/firmware/imx/imx-dsp.c
161@@ -60,22 +60,40 @@ static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
162 	}
163 }
164
165-static int imx_dsp_probe(struct platform_device *pdev)
166+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
167 {
168-	struct device *dev = &pdev->dev;
169-	struct imx_dsp_ipc *dsp_ipc;
170+	struct imx_dsp_chan *dsp_chan;
171+
172+	if (idx >= DSP_MU_CHAN_NUM)
173+		return ERR_PTR(-EINVAL);
174+
175+	dsp_chan = &dsp_ipc->chans[idx];
176+	dsp_chan->ch = mbox_request_channel_byname(&dsp_chan->cl, dsp_chan->name);
177+	return dsp_chan->ch;
178+}
179+EXPORT_SYMBOL(imx_dsp_request_channel);
180+
181+void imx_dsp_free_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
182+{
183+	struct imx_dsp_chan *dsp_chan;
184+
185+	if (idx >= DSP_MU_CHAN_NUM)
186+		return;
187+
188+	dsp_chan = &dsp_ipc->chans[idx];
189+	mbox_free_channel(dsp_chan->ch);
190+}
191+EXPORT_SYMBOL(imx_dsp_free_channel);
192+
193+static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
194+{
195+	struct device *dev = dsp_ipc->dev;
196 	struct imx_dsp_chan *dsp_chan;
197 	struct mbox_client *cl;
198 	char *chan_name;
199 	int ret;
200 	int i, j;
201
202-	device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
203-
204-	dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
205-	if (!dsp_ipc)
206-		return -ENOMEM;
207-
208 	for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
209 		if (i < 2)
210 			chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
211@@ -86,6 +104,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
212 			return -ENOMEM;
213
214 		dsp_chan = &dsp_ipc->chans[i];
215+		dsp_chan->name = chan_name;
216 		cl = &dsp_chan->cl;
217 		cl->dev = dev;
218 		cl->tx_block = false;
219@@ -104,27 +123,43 @@ static int imx_dsp_probe(struct platform_device *pdev)
220 		}
221
222 		dev_dbg(dev, "request mbox chan %s\n", chan_name);
223-		/* chan_name is not used anymore by framework */
224-		kfree(chan_name);
225 	}
226
227-	dsp_ipc->dev = dev;
228-
229-	dev_set_drvdata(dev, dsp_ipc);
230-
231-	dev_info(dev, "NXP i.MX DSP IPC initialized\n");
232-
233 	return 0;
234 out:
235-	kfree(chan_name);
236 	for (j = 0; j < i; j++) {
237 		dsp_chan = &dsp_ipc->chans[j];
238 		mbox_free_channel(dsp_chan->ch);
239+		kfree(dsp_chan->name);
240 	}
241
242 	return ret;
243 }
244
245+static int imx_dsp_probe(struct platform_device *pdev)
246+{
247+	struct device *dev = &pdev->dev;
248+	struct imx_dsp_ipc *dsp_ipc;
249+	int ret;
250+
251+	device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
252+
253+	dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
254+	if (!dsp_ipc)
255+		return -ENOMEM;
256+
257+	dsp_ipc->dev = dev;
258+	dev_set_drvdata(dev, dsp_ipc);
259+
260+	ret = imx_dsp_setup_channels(dsp_ipc);
261+	if (ret < 0)
262+		return ret;
263+
264+	dev_info(dev, "NXP i.MX DSP IPC initialized\n");
265+
266+	return 0;
267+}
268+
269 static int imx_dsp_remove(struct platform_device *pdev)
270 {
271 	struct imx_dsp_chan *dsp_chan;
272@@ -136,6 +171,7 @@ static int imx_dsp_remove(struct platform_device *pdev)
273 	for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
274 		dsp_chan = &dsp_ipc->chans[i];
275 		mbox_free_channel(dsp_chan->ch);
276+		kfree(dsp_chan->name);
277 	}
278
279 	return 0;
280diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
281index d9dcc2094..cc081a6a1 100644
282--- a/drivers/firmware/imx/imx-scu-irq.c
283+++ b/drivers/firmware/imx/imx-scu-irq.c
284@@ -1,6 +1,6 @@
285 // SPDX-License-Identifier: GPL-2.0+
286 /*
287- * Copyright 2019 NXP
288+ * Copyright 2019-2020 NXP
289  *
290  * Implementation of the SCU IRQ functions using MU.
291  *
292@@ -14,7 +14,7 @@
293
294 #define IMX_SC_IRQ_FUNC_ENABLE	1
295 #define IMX_SC_IRQ_FUNC_STATUS	2
296-#define IMX_SC_IRQ_NUM_GROUP	4
297+#define IMX_SC_IRQ_NUM_GROUP	7
298
299 static u32 mu_resource_id;
300
301@@ -42,53 +42,42 @@ struct imx_sc_msg_irq_enable {
302
303 static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
304 static struct work_struct imx_sc_irq_work;
305-static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
306+static BLOCKING_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
307
308 int imx_scu_irq_register_notifier(struct notifier_block *nb)
309 {
310-	return atomic_notifier_chain_register(
311+	return blocking_notifier_chain_register(
312 		&imx_scu_irq_notifier_chain, nb);
313 }
314 EXPORT_SYMBOL(imx_scu_irq_register_notifier);
315
316 int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
317 {
318-	return atomic_notifier_chain_unregister(
319+	return blocking_notifier_chain_unregister(
320 		&imx_scu_irq_notifier_chain, nb);
321 }
322 EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
323
324 static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
325 {
326-	return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain,
327+	return blocking_notifier_call_chain(&imx_scu_irq_notifier_chain,
328 		status, (void *)group);
329 }
330
331 static void imx_scu_irq_work_handler(struct work_struct *work)
332 {
333-	struct imx_sc_msg_irq_get_status msg;
334-	struct imx_sc_rpc_msg *hdr = &msg.hdr;
335 	u32 irq_status;
336 	int ret;
337 	u8 i;
338
339 	for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
340-		hdr->ver = IMX_SC_RPC_VERSION;
341-		hdr->svc = IMX_SC_RPC_SVC_IRQ;
342-		hdr->func = IMX_SC_IRQ_FUNC_STATUS;
343-		hdr->size = 2;
344-
345-		msg.data.req.resource = mu_resource_id;
346-		msg.data.req.group = i;
347-
348-		ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
349+		ret = imx_scu_irq_get_status(i, &irq_status);
350 		if (ret) {
351 			pr_err("get irq group %d status failed, ret %d\n",
352 			       i, ret);
353 			return;
354 		}
355
356-		irq_status = msg.data.resp.status;
357 		if (!irq_status)
358 			continue;
359
360@@ -97,6 +86,31 @@ static void imx_scu_irq_work_handler(struct work_struct *work)
361 	}
362 }
363
364+int imx_scu_irq_get_status(u8 group, u32 *irq_status)
365+{
366+	struct imx_sc_msg_irq_get_status msg;
367+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
368+	int ret;
369+
370+	hdr->ver = IMX_SC_RPC_VERSION;
371+	hdr->svc = IMX_SC_RPC_SVC_IRQ;
372+	hdr->func = IMX_SC_IRQ_FUNC_STATUS;
373+	hdr->size = 2;
374+
375+	msg.data.req.resource = mu_resource_id;
376+	msg.data.req.group = group;
377+
378+	ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
379+	if (ret)
380+		return ret;
381+
382+	if (irq_status)
383+		*irq_status = msg.data.resp.status;
384+
385+	return 0;
386+}
387+EXPORT_SYMBOL(imx_scu_irq_get_status);
388+
389 int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
390 {
391 	struct imx_sc_msg_irq_enable msg;
392diff --git a/drivers/firmware/imx/imx-scu-soc.c b/drivers/firmware/imx/imx-scu-soc.c
393index 2f32353de..c8d14315d 100644
394--- a/drivers/firmware/imx/imx-scu-soc.c
395+++ b/drivers/firmware/imx/imx-scu-soc.c
396@@ -12,6 +12,8 @@
397
398 static struct imx_sc_ipc *imx_sc_soc_ipc_handle;
399
400+extern bool TKT340553_SW_WORKAROUND;
401+
402 struct imx_sc_msg_misc_get_soc_id {
403 	struct imx_sc_rpc_msg hdr;
404 	union {
405@@ -35,18 +37,15 @@ static int imx_scu_soc_uid(u64 *soc_uid)
406 {
407 	struct imx_sc_msg_misc_get_soc_uid msg;
408 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
409-	int ret;
410+
411+	memset(&msg, 0, sizeof(msg));
412
413 	hdr->ver = IMX_SC_RPC_VERSION;
414 	hdr->svc = IMX_SC_RPC_SVC_MISC;
415 	hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
416 	hdr->size = 1;
417
418-	ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
419-	if (ret) {
420-		pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
421-		return ret;
422-	}
423+	imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
424
425 	*soc_uid = msg.uid_high;
426 	*soc_uid <<= 32;
427@@ -113,9 +112,13 @@ int imx_scu_soc_init(struct device *dev)
428
429 	/* format soc_id value passed from SCU firmware */
430 	val = id & 0x1f;
431-	soc_dev_attr->soc_id = devm_kasprintf(dev, GFP_KERNEL, "0x%x", val);
432-	if (!soc_dev_attr->soc_id)
433-		return -ENOMEM;
434+	if (of_machine_is_compatible("fsl,imx8qm")) {
435+		soc_dev_attr->soc_id = "i.MX8QM";
436+		TKT340553_SW_WORKAROUND = true;
437+	} else if (of_machine_is_compatible("fsl,imx8qxp"))
438+		soc_dev_attr->soc_id = "i.MX8QXP";
439+	else if (of_machine_is_compatible("fsl,imx8dxl"))
440+		soc_dev_attr->soc_id = "i.MX8DXL";
441
442 	/* format revision value passed from SCU firmware */
443 	val = (id >> 5) & 0xf;
444diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
445index dca79cacc..fd6de5771 100644
446--- a/drivers/firmware/imx/imx-scu.c
447+++ b/drivers/firmware/imx/imx-scu.c
448@@ -7,6 +7,7 @@
449  *
450  */
451
452+#include <linux/arm-smccc.h>
453 #include <linux/err.h>
454 #include <linux/firmware/imx/ipc.h>
455 #include <linux/firmware/imx/sci.h>
456@@ -19,8 +20,11 @@
457 #include <linux/of_platform.h>
458 #include <linux/platform_device.h>
459
460+#include <xen/xen.h>
461+
462+#define FSL_HVC_SC                      0xC6000000
463 #define SCU_MU_CHAN_NUM		8
464-#define MAX_RX_TIMEOUT		(msecs_to_jiffies(30))
465+#define MAX_RX_TIMEOUT		(msecs_to_jiffies(3000))
466
467 struct imx_sc_chan {
468 	struct imx_sc_ipc *sc_ipc;
469@@ -204,6 +208,7 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
470 {
471 	uint8_t saved_svc, saved_func;
472 	struct imx_sc_rpc_msg *hdr;
473+	struct arm_smccc_res res;
474 	int ret;
475
476 	if (WARN_ON(!sc_ipc || !msg))
477@@ -218,33 +223,45 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
478 		saved_func = ((struct imx_sc_rpc_msg *)msg)->func;
479 	}
480 	sc_ipc->count = 0;
481-	ret = imx_scu_ipc_write(sc_ipc, msg);
482-	if (ret < 0) {
483-		dev_err(sc_ipc->dev, "RPC send msg failed: %d\n", ret);
484-		goto out;
485-	}
486-
487-	if (have_resp) {
488-		if (!wait_for_completion_timeout(&sc_ipc->done,
489-						 MAX_RX_TIMEOUT)) {
490-			dev_err(sc_ipc->dev, "RPC send msg timeout\n");
491-			mutex_unlock(&sc_ipc->lock);
492-			return -ETIMEDOUT;
493+	sc_ipc->rx_size = 0;
494+	if (xen_initial_domain()) {
495+		arm_smccc_hvc(FSL_HVC_SC, (uint64_t)msg, !have_resp, 0, 0, 0,
496+			      0, 0, &res);
497+		if (res.a0)
498+			printk("Error FSL_HVC_SC %ld\n", res.a0);
499+
500+		ret = res.a0;
501+
502+	} else {
503+		ret = imx_scu_ipc_write(sc_ipc, msg);
504+		if (ret < 0) {
505+			dev_err(sc_ipc->dev, "RPC send msg failed: %d\n", ret);
506+			goto out;
507 		}
508
509-		/* response status is stored in hdr->func field */
510-		hdr = msg;
511-		ret = hdr->func;
512-		/*
513-		 * Some special SCU firmware APIs do NOT have return value
514-		 * in hdr->func, but they do have response data, those special
515-		 * APIs are defined as void function in SCU firmware, so they
516-		 * should be treated as return success always.
517-		 */
518-		if ((saved_svc == IMX_SC_RPC_SVC_MISC) &&
519-			(saved_func == IMX_SC_MISC_FUNC_UNIQUE_ID ||
520-			 saved_func == IMX_SC_MISC_FUNC_GET_BUTTON_STATUS))
521-			ret = 0;
522+		if (have_resp) {
523+			if (!wait_for_completion_timeout(&sc_ipc->done,
524+							 MAX_RX_TIMEOUT)) {
525+				dev_err(sc_ipc->dev, "RPC send msg timeout\n");
526+				mutex_unlock(&sc_ipc->lock);
527+				return -ETIMEDOUT;
528+			}
529+
530+			/* response status is stored in hdr->func field */
531+			hdr = msg;
532+			ret = hdr->func;
533+
534+			/*
535+			 * Some special SCU firmware APIs do NOT have return value
536+			 * in hdr->func, but they do have response data, those special
537+			 * APIs are defined as void function in SCU firmware, so they
538+			 * should be treated as return success always.
539+			 */
540+			if ((saved_svc == IMX_SC_RPC_SVC_MISC) &&
541+				(saved_func == IMX_SC_MISC_FUNC_UNIQUE_ID ||
542+				 saved_func == IMX_SC_MISC_FUNC_GET_BUTTON_STATUS))
543+				ret = 0;
544+		}
545 	}
546
547 out:
548@@ -354,7 +371,12 @@ static struct platform_driver imx_scu_driver = {
549 	},
550 	.probe = imx_scu_probe,
551 };
552-builtin_platform_driver(imx_scu_driver);
553+
554+static int __init imx_scu_driver_init(void)
555+{
556+	return platform_driver_register(&imx_scu_driver);
557+}
558+subsys_initcall_sync(imx_scu_driver_init);
559
560 MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
561 MODULE_DESCRIPTION("IMX SCU firmware protocol driver");
562diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
563index d073cb3ce..01878451d 100644
564--- a/drivers/firmware/imx/misc.c
565+++ b/drivers/firmware/imx/misc.c
566@@ -18,6 +18,13 @@ struct imx_sc_msg_req_misc_set_ctrl {
567 	u16 resource;
568 } __packed __aligned(4);
569
570+
571+struct imx_sc_msg_req_misc_set_dma_group {
572+	struct imx_sc_rpc_msg hdr;
573+	u16 resource;
574+	u8 val;
575+} __packed __aligned(4);
576+
577 struct imx_sc_msg_req_cpu_start {
578 	struct imx_sc_rpc_msg hdr;
579 	u32 address_hi;
580@@ -67,6 +74,24 @@ int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
581 }
582 EXPORT_SYMBOL(imx_sc_misc_set_control);
583
584+int imx_sc_misc_set_dma_group(struct imx_sc_ipc *ipc, u32 resource,
585+			    u32 val)
586+{
587+	struct imx_sc_msg_req_misc_set_dma_group msg;
588+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
589+
590+	hdr->ver = IMX_SC_RPC_VERSION;
591+	hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
592+	hdr->func = (uint8_t)IMX_SC_MISC_FUNC_SET_DMA_GROUP;
593+	hdr->size = 2;
594+
595+	msg.val = val;
596+	msg.resource = resource;
597+
598+	return imx_scu_call_rpc(ipc, &msg, true);
599+}
600+EXPORT_SYMBOL(imx_sc_misc_set_dma_group);
601+
602 /*
603  * This function gets a miscellaneous control value.
604  *
605diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c
606index a12db6ff3..6dd4db386 100644
607--- a/drivers/firmware/imx/rm.c
608+++ b/drivers/firmware/imx/rm.c
609@@ -13,6 +13,11 @@ struct imx_sc_msg_rm_rsrc_owned {
610 	u16 resource;
611 } __packed __aligned(4);
612
613+struct imx_sc_msg_rm_pt {
614+	struct imx_sc_rpc_msg hdr;
615+	u8 val;
616+} __packed __aligned(4);
617+
618 /*
619  * This function check @resource is owned by current partition or not
620  *
621@@ -43,3 +48,160 @@ bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
622 	return hdr->func;
623 }
624 EXPORT_SYMBOL(imx_sc_rm_is_resource_owned);
625+
626+/*
627+ * This function returns the current partition number
628+ *
629+ * @param[in]     ipc         IPC handle
630+ * @param[out]    pt          holding the partition number
631+ *
632+ * @return Returns 0 for success and < 0 for errors.
633+ */
634+int imx_sc_rm_get_partition(struct imx_sc_ipc *ipc, u8 *pt)
635+{
636+	struct imx_sc_msg_rm_pt msg;
637+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
638+	int ret;
639+
640+	hdr->ver = IMX_SC_RPC_VERSION;
641+	hdr->svc = IMX_SC_RPC_SVC_RM;
642+	hdr->func = IMX_SC_RM_FUNC_GET_PARTITION;
643+	hdr->size = 1;
644+
645+	ret = imx_scu_call_rpc(ipc, &msg, true);
646+	if (ret)
647+		return ret;
648+
649+	if (pt != NULL)
650+		*pt = msg.val;
651+
652+	return 0;
653+}
654+EXPORT_SYMBOL(imx_sc_rm_get_partition);
655+
656+struct imx_sc_msg_rm_find_memreg {
657+	struct imx_sc_rpc_msg hdr;
658+	union {
659+		struct {
660+			u32 add_start_hi;
661+			u32 add_start_lo;
662+			u32 add_end_hi;
663+			u32 add_end_lo;
664+		} req;
665+		struct {
666+			u8 val;
667+		} resp;
668+	} data;
669+}  __packed __aligned(4);
670+
671+int imx_sc_rm_find_memreg(struct imx_sc_ipc *ipc, u8 *mr, u64 addr_start,
672+			  u64 addr_end)
673+{
674+	struct imx_sc_msg_rm_find_memreg msg;
675+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
676+	int ret;
677+
678+	hdr->ver = IMX_SC_RPC_VERSION;
679+	hdr->svc = IMX_SC_RPC_SVC_RM;
680+	hdr->func = IMX_SC_RM_FUNC_FIND_MEMREG;
681+	hdr->size = 5;
682+
683+	msg.data.req.add_start_hi = addr_start >> 32;
684+	msg.data.req.add_start_lo = addr_start;
685+	msg.data.req.add_end_hi = addr_end >> 32;
686+	msg.data.req.add_end_lo = addr_end;
687+
688+	ret = imx_scu_call_rpc(ipc, &msg, true);
689+	if (ret)
690+		return ret;
691+
692+	if (mr)
693+		*mr = msg.data.resp.val;
694+
695+	return 0;
696+}
697+EXPORT_SYMBOL(imx_sc_rm_find_memreg);
698+
699+struct imx_sc_msg_rm_get_resource_owner {
700+	struct imx_sc_rpc_msg hdr;
701+	union {
702+		struct {
703+			u16 resource;
704+		} req;
705+		struct {
706+			u8 val;
707+		} resp;
708+	} data;
709+} __packed __aligned(4);
710+
711+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
712+{
713+	struct imx_sc_msg_rm_get_resource_owner msg;
714+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
715+	int ret;
716+
717+	hdr->ver = IMX_SC_RPC_VERSION;
718+	hdr->svc = IMX_SC_RPC_SVC_RM;
719+	hdr->func = IMX_SC_RM_FUNC_GET_RESOURCE_OWNER;
720+	hdr->size = 2;
721+
722+	msg.data.req.resource = resource;
723+
724+	ret = imx_scu_call_rpc(ipc, &msg, true);
725+	if (ret)
726+		return ret;
727+
728+	if (pt)
729+		*pt = msg.data.resp.val;
730+
731+	return 0;
732+}
733+EXPORT_SYMBOL(imx_sc_rm_get_resource_owner);
734+
735+struct imx_sc_msg_set_memreg_permissions {
736+	struct imx_sc_rpc_msg hdr;
737+	u8 mr;
738+	u8 pt;
739+	u8 perm;
740+} __packed __aligned(4);
741+
742+int imx_sc_rm_set_memreg_permissions(struct imx_sc_ipc *ipc, u8 mr,
743+				     u8 pt, u8 perm)
744+{
745+	struct imx_sc_msg_set_memreg_permissions msg;
746+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
747+
748+	hdr->ver = IMX_SC_RPC_VERSION;
749+	hdr->svc = IMX_SC_RPC_SVC_RM;
750+	hdr->func = IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS;
751+	hdr->size = 2;
752+
753+	msg.mr = mr;
754+	msg.pt = pt;
755+	msg.perm = perm;
756+
757+	return imx_scu_call_rpc(ipc, &msg, true);
758+}
759+EXPORT_SYMBOL(imx_sc_rm_set_memreg_permissions);
760+
761+int imx_sc_rm_get_did(struct imx_sc_ipc *ipc, u8 *did)
762+{
763+	struct imx_sc_rpc_msg msg;
764+	struct imx_sc_rpc_msg *hdr = &msg;
765+	int ret;
766+
767+	hdr->ver = IMX_SC_RPC_VERSION;
768+	hdr->svc = IMX_SC_RPC_SVC_RM;
769+	hdr->func = IMX_SC_RM_FUNC_GET_DID;
770+	hdr->size = 1;
771+
772+	ret = imx_scu_call_rpc(ipc, &msg, true);
773+	if (ret < 0)
774+		return ret;
775+
776+	if (did)
777+		*did = msg.func;
778+
779+	return 0;
780+}
781+EXPORT_SYMBOL(imx_sc_rm_get_did);
782diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
783old mode 100644
784new mode 100755
785index 946eea292..2328a9ed6
786--- a/drivers/firmware/imx/scu-pd.c
787+++ b/drivers/firmware/imx/scu-pd.c
788@@ -1,7 +1,7 @@
789 // SPDX-License-Identifier: GPL-2.0+
790 /*
791  * Copyright (C) 2016 Freescale Semiconductor, Inc.
792- * Copyright 2017-2018 NXP
793+ * Copyright 2017-2018,2020 NXP
794  *	Dong Aisheng <aisheng.dong@nxp.com>
795  *
796  * Implementation of the SCU based Power Domains
797@@ -44,10 +44,13 @@
798  *
799  */
800
801+#include <linux/arm-smccc.h>
802 #include <dt-bindings/firmware/imx/rsrc.h>
803+#include <linux/console.h>
804 #include <linux/firmware/imx/sci.h>
805 #include <linux/firmware/imx/svc/rm.h>
806 #include <linux/io.h>
807+#include <linux/irqchip/arm-gic-v3.h>
808 #include <linux/module.h>
809 #include <linux/of.h>
810 #include <linux/of_address.h>
811@@ -56,6 +59,17 @@
812 #include <linux/pm.h>
813 #include <linux/pm_domain.h>
814 #include <linux/slab.h>
815+#include <linux/syscore_ops.h>
816+
817+#define IMX_WU_MAX_IRQS	(((IMX_SC_R_LAST + 31) / 32 ) * 32 )
818+
819+#define IMX_SIP_WAKEUP_SRC              0xc2000009
820+#define IMX_SIP_WAKEUP_SRC_SCU          0x1
821+#define IMX_SIP_WAKEUP_SRC_IRQSTEER     0x2
822+
823+static u32 wu[IMX_WU_MAX_IRQS];
824+static int wu_num;
825+static void __iomem *gic_dist_base;
826
827 /* SCU Power Mode Protocol definition */
828 struct imx_sc_msg_req_set_resource_power_mode {
829@@ -86,6 +100,8 @@ struct imx_sc_pd_soc {
830 	u8 num_ranges;
831 };
832
833+int imx_con_rsrc;
834+
835 static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
836 	/* LSIO SS */
837 	{ "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
838@@ -99,24 +115,29 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
839 	/* CONN SS */
840 	{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
841 	{ "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 },
842+	{ "usb1phy", IMX_SC_R_USB_1_PHY, 1, false, 0},
843 	{ "usb2", IMX_SC_R_USB_2, 1, false, 0 },
844 	{ "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 },
845 	{ "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 },
846 	{ "enet", IMX_SC_R_ENET_0, 2, true, 0 },
847 	{ "nand", IMX_SC_R_NAND, 1, false, 0 },
848-	{ "mlb", IMX_SC_R_MLB_0, 1, true, 0 },
849
850 	/* AUDIO SS */
851 	{ "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
852 	{ "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
853 	{ "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
854 	{ "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 },
855-	{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
856+	{ "mclk-out-0", IMX_SC_R_MCLK_OUT_0, 1, false, 0 },
857+	{ "mclk-out-1", IMX_SC_R_MCLK_OUT_1, 1, false, 0 },
858+	{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 32, true, 0 },
859 	{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
860-	{ "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
861+	{ "dma2-ch-0", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
862+	{ "dma2-ch-1", IMX_SC_R_DMA_2_CH5, 27, true, 0 },
863+	{ "dma3-ch", IMX_SC_R_DMA_3_CH0, 32, true, 0 },
864 	{ "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
865 	{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
866 	{ "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
867+	{ "esai1", IMX_SC_R_ESAI_1, 1, false, 0 },
868 	{ "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
869 	{ "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 },
870 	{ "sai", IMX_SC_R_SAI_0, 3, true, 0 },
871@@ -133,11 +154,13 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
872 	/* DMA SS */
873 	{ "can", IMX_SC_R_CAN_0, 3, true, 0 },
874 	{ "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
875-	{ "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
876+	{ "lpi2c", IMX_SC_R_I2C_0, 5, true, 0 },
877 	{ "adc", IMX_SC_R_ADC_0, 1, true, 0 },
878 	{ "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
879+	{ "lcd-pll", IMX_SC_R_ELCDIF_PLL, 1, true, 0 },
880 	{ "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
881-	{ "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
882+	{ "lpuart", IMX_SC_R_UART_0, 5, true, 0 },
883+	{ "sim", IMX_SC_R_EMVSIM_0, 2, true, 0 },
884 	{ "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
885 	{ "irqstr_dsp", IMX_SC_R_IRQSTR_DSP, 1, false, 0 },
886
887@@ -146,13 +169,22 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
888 	{ "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
889 	{ "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
890 	{ "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
891+	{ "vpu-enc1", IMX_SC_R_VPU_ENC_1, 1, false, 0 },
892+	{ "vpu-mu0", IMX_SC_R_VPU_MU_0, 1, false, 0 },
893+	{ "vpu-mu1", IMX_SC_R_VPU_MU_1, 1, false, 0 },
894+	{ "vpu-mu2", IMX_SC_R_VPU_MU_2, 1, false, 0 },
895
896 	/* GPU SS */
897 	{ "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
898+	{ "gpu1-pid", IMX_SC_R_GPU_1_PID0, 4, true, 0 },
899+
900
901 	/* HSIO SS */
902+	{ "pcie-a", IMX_SC_R_PCIE_A, 1, false, 0 },
903+	{ "serdes-0", IMX_SC_R_SERDES_0, 1, false, 0 },
904 	{ "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 },
905 	{ "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 },
906+	{ "sata-0", IMX_SC_R_SATA_0, 1, false, 0 },
907 	{ "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 },
908
909 	/* MIPI SS */
910@@ -160,12 +192,27 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
911 	{ "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
912 	{ "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
913
914+	{ "mipi1", IMX_SC_R_MIPI_1, 1, 0 },
915+	{ "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, 0 },
916+	{ "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, 1 },
917+
918 	/* LVDS SS */
919 	{ "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
920+	{ "lvds0-i2c0", IMX_SC_R_LVDS_0_I2C_0, 1, false, 0 },
921+	{ "lvds0-pwm0", IMX_SC_R_LVDS_0_PWM_0, 1, false, 0 },
922+
923+	{ "lvds1", IMX_SC_R_LVDS_1, 1, false, 0 },
924+	{ "lvds1-i2c0", IMX_SC_R_LVDS_1_I2C_0, 1, false, 0 },
925+	{ "lvds1-pwm0", IMX_SC_R_LVDS_1_PWM_0, 1, false, 0 },
926
927 	/* DC SS */
928 	{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
929 	{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
930+	{ "dc0-video", IMX_SC_R_DC_0_VIDEO0, 2, true, 0 },
931+
932+	{ "dc1", IMX_SC_R_DC_1, 1, false, 0 },
933+	{ "dc1-pll", IMX_SC_R_DC_1_PLL_0, 2, true, 0 },
934+	{ "dc1-video", IMX_SC_R_DC_1_VIDEO0, 2, true, 0 },
935
936 	/* CM40 SS */
937 	{ "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
938@@ -180,6 +227,53 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
939 	{ "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
940 	{ "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
941 	{ "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
942+
943+	/* SECO SS */
944+	{ "seco_mu", IMX_SC_R_SECO_MU_2, 3, true, 2},
945+
946+	/* V2X SS */
947+	{ "v2x_mu", IMX_SC_R_V2X_MU_0, 2, true, 0},
948+	{ "v2x_mu", IMX_SC_R_V2X_MU_2, 1, true, 2},
949+	{ "v2x_mu", IMX_SC_R_V2X_MU_3, 2, true, 3},
950+
951+	/* DB SS */
952+	{ "perf", IMX_SC_R_PERF, 1, false, 0},
953+
954+	/* IMAGE SS */
955+	{ "img-pdma", IMX_SC_R_ISI_CH0, 8, true, 0 },
956+	{ "img-csi0", IMX_SC_R_CSI_0, 1, false, 0 },
957+	{ "img-csi0-i2c0", IMX_SC_R_CSI_0_I2C_0, 1, false, 0 },
958+	{ "img-csi0-pwm0", IMX_SC_R_CSI_0_PWM_0, 1, false, 0 },
959+	{ "img-csi1", IMX_SC_R_CSI_1, 1, false, 0 },
960+	{ "img-csi1-i2c0", IMX_SC_R_CSI_1_I2C_0, 1, false, 0 },
961+	{ "img-csi1-pwm0", IMX_SC_R_CSI_1_PWM_0, 1, false, 0 },
962+	{ "img-parallel", IMX_SC_R_PI_0, 1, false, 0 },
963+	{ "img-parallel-i2c0", IMX_SC_R_PI_0_I2C_0, 1, false, 0 },
964+	{ "img-parallel-pwm0", IMX_SC_R_PI_0_PWM_0, 2, true, 0 },
965+	{ "img-parallel-pll", IMX_SC_R_PI_0_PLL, 1, false, 0 },
966+	{ "img-jpegdec-mp", IMX_SC_R_MJPEG_DEC_MP, 1, false, 0 },
967+	{ "img-jpegdec-s0", IMX_SC_R_MJPEG_DEC_S0, 4, true, 0 },
968+	{ "img-jpegenc-mp", IMX_SC_R_MJPEG_ENC_MP, 1, false, 0 },
969+	{ "img-jpegenc-s0", IMX_SC_R_MJPEG_ENC_S0, 4, true, 0 },
970+
971+	/* HDMI TX SS */
972+	{ "hdmi-tx", IMX_SC_R_HDMI, 1, false, 0},
973+	{ "hdmi-tx-i2s", IMX_SC_R_HDMI_I2S, 1, false, 0},
974+	{ "hdmi-tx-i2c0", IMX_SC_R_HDMI_I2C_0, 1, false, 0},
975+	{ "hdmi-tx-pll0", IMX_SC_R_HDMI_PLL_0, 1, false, 0},
976+	{ "hdmi-tx-pll1", IMX_SC_R_HDMI_PLL_1, 1, false, 0},
977+
978+	/* HDMI RX SS */
979+	{ "hdmi-rx", IMX_SC_R_HDMI_RX, 1, false, 0},
980+	{ "hdmi-rx-pwm", IMX_SC_R_HDMI_RX_PWM_0, 1, false, 0},
981+	{ "hdmi-rx-i2c0", IMX_SC_R_HDMI_RX_I2C_0, 1, false, 0},
982+	{ "hdmi-rx-bypass", IMX_SC_R_HDMI_RX_BYPASS, 1, false, 0},
983+
984+	/* SECURITY SS */
985+	{ "sec-jr", IMX_SC_R_CAAM_JR2, 2, true, 2},
986+
987+	/* BOARD SS */
988+	{ "board", IMX_SC_R_BOARD_R0, 8, true, 0},
989 };
990
991 static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
992@@ -195,6 +289,73 @@ to_imx_sc_pd(struct generic_pm_domain *genpd)
993 	return container_of(genpd, struct imx_sc_pm_domain, pd);
994 }
995
996+static int imx_pm_domains_suspend(void)
997+{
998+	struct arm_smccc_res res;
999+	u32 offset;
1000+	int i;
1001+
1002+	for (i = 0; i < wu_num; i++) {
1003+		offset = GICD_ISENABLER + ((wu[i] + 32) / 32) * 4;
1004+		if (BIT(wu[i] % 32) & readl_relaxed(gic_dist_base + offset)) {
1005+			arm_smccc_smc(IMX_SIP_WAKEUP_SRC,
1006+				      IMX_SIP_WAKEUP_SRC_IRQSTEER,
1007+				      0, 0, 0, 0, 0, 0, &res);
1008+			return 0;
1009+		}
1010+	}
1011+
1012+	arm_smccc_smc(IMX_SIP_WAKEUP_SRC,
1013+		      IMX_SIP_WAKEUP_SRC_SCU,
1014+		      0, 0, 0, 0, 0, 0, &res);
1015+
1016+	return 0;
1017+}
1018+
1019+struct syscore_ops imx_pm_domains_syscore_ops = {
1020+	.suspend = imx_pm_domains_suspend,
1021+};
1022+
1023+static void imx_sc_pd_enable_irqsteer_wakeup(struct device_node *np)
1024+{
1025+	struct device_node *gic_node;
1026+	unsigned int i;
1027+
1028+	wu_num = of_property_count_u32_elems(np, "wakeup-irq");
1029+	if (wu_num <= 0) {
1030+		pr_warn("no irqsteer wakeup source supported!\n");
1031+		return;
1032+	}
1033+
1034+	gic_node = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
1035+	WARN_ON(!gic_node);
1036+
1037+	gic_dist_base = of_iomap(gic_node, 0);
1038+	WARN_ON(!gic_dist_base);
1039+
1040+	for (i = 0; i < wu_num; i++)
1041+		WARN_ON(of_property_read_u32_index(np, "wakeup-irq", i, &wu[i]));
1042+
1043+	register_syscore_ops(&imx_pm_domains_syscore_ops);
1044+}
1045+
1046+static void imx_sc_pd_get_console_rsrc(void)
1047+{
1048+	struct of_phandle_args specs;
1049+	int ret;
1050+
1051+	if (!of_stdout)
1052+		return;
1053+
1054+	ret = of_parse_phandle_with_args(of_stdout, "power-domains",
1055+					 "#power-domain-cells",
1056+					 0, &specs);
1057+	if (ret)
1058+		return;
1059+
1060+	imx_con_rsrc = specs.args[0];
1061+}
1062+
1063 static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on)
1064 {
1065 	struct imx_sc_msg_req_set_resource_power_mode msg;
1066@@ -210,7 +371,12 @@ static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on)
1067 	hdr->size = 2;
1068
1069 	msg.resource = pd->rsrc;
1070-	msg.mode = power_on ? IMX_SC_PM_PW_MODE_ON : IMX_SC_PM_PW_MODE_LP;
1071+	msg.mode = power_on ? IMX_SC_PM_PW_MODE_ON : pd->pd.state_idx ?
1072+		   IMX_SC_PM_PW_MODE_OFF : IMX_SC_PM_PW_MODE_LP;
1073+
1074+	/* keep uart console power on for no_console_suspend */
1075+        if (imx_con_rsrc == pd->rsrc && !console_suspend_enabled && !power_on)
1076+                return 0;
1077
1078 	ret = imx_scu_call_rpc(pm_ipc_handle, &msg, true);
1079 	if (ret)
1080@@ -255,6 +421,8 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
1081 		      const struct imx_sc_pd_range *pd_ranges)
1082 {
1083 	struct imx_sc_pm_domain *sc_pd;
1084+	struct genpd_power_state *states;
1085+	bool is_off = true;
1086 	int ret;
1087
1088 	if (!imx_sc_rm_is_resource_owned(pm_ipc_handle, pd_ranges->rsrc + idx))
1089@@ -264,9 +432,23 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
1090 	if (!sc_pd)
1091 		return ERR_PTR(-ENOMEM);
1092
1093+	states = devm_kcalloc(dev, 2, sizeof(*states), GFP_KERNEL);
1094+	if (!states) {
1095+		devm_kfree(dev, sc_pd);
1096+		return ERR_PTR(-ENOMEM);
1097+	}
1098+
1099 	sc_pd->rsrc = pd_ranges->rsrc + idx;
1100 	sc_pd->pd.power_off = imx_sc_pd_power_off;
1101 	sc_pd->pd.power_on = imx_sc_pd_power_on;
1102+	sc_pd->pd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
1103+	states[0].power_off_latency_ns = 25000;
1104+	states[0].power_on_latency_ns =  25000;
1105+	states[1].power_off_latency_ns = 2500000;
1106+	states[1].power_on_latency_ns =  2500000;
1107+
1108+	sc_pd->pd.states = states;
1109+	sc_pd->pd.state_count = 2;
1110
1111 	if (pd_ranges->postfix)
1112 		snprintf(sc_pd->name, sizeof(sc_pd->name),
1113@@ -276,20 +458,26 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
1114 			 "%s", pd_ranges->name);
1115
1116 	sc_pd->pd.name = sc_pd->name;
1117+	if (imx_con_rsrc == sc_pd->rsrc) {
1118+		sc_pd->pd.flags |= GENPD_FLAG_RPM_ALWAYS_ON;
1119+		is_off = false;
1120+	}
1121
1122 	if (sc_pd->rsrc >= IMX_SC_R_LAST) {
1123 		dev_warn(dev, "invalid pd %s rsrc id %d found",
1124 			 sc_pd->name, sc_pd->rsrc);
1125
1126 		devm_kfree(dev, sc_pd);
1127+		devm_kfree(dev, states);
1128 		return NULL;
1129 	}
1130
1131-	ret = pm_genpd_init(&sc_pd->pd, NULL, true);
1132+	ret = pm_genpd_init(&sc_pd->pd, NULL, is_off);
1133 	if (ret) {
1134 		dev_warn(dev, "failed to init pd %s rsrc id %d",
1135 			 sc_pd->name, sc_pd->rsrc);
1136 		devm_kfree(dev, sc_pd);
1137+		devm_kfree(dev, states);
1138 		return NULL;
1139 	}
1140
1141@@ -351,6 +539,9 @@ static int imx_sc_pd_probe(struct platform_device *pdev)
1142 	if (!pd_soc)
1143 		return -ENODEV;
1144
1145+	imx_sc_pd_get_console_rsrc();
1146+	imx_sc_pd_enable_irqsteer_wakeup(pdev->dev.of_node);
1147+
1148 	return imx_scu_init_pm_domains(&pdev->dev, pd_soc);
1149 }
1150
1151@@ -367,7 +558,12 @@ static struct platform_driver imx_sc_pd_driver = {
1152 	},
1153 	.probe = imx_sc_pd_probe,
1154 };
1155-builtin_platform_driver(imx_sc_pd_driver);
1156+
1157+static int __init imx_sc_pd_driver_init(void)
1158+{
1159+	return platform_driver_register(&imx_sc_pd_driver);
1160+}
1161+subsys_initcall(imx_sc_pd_driver_init);
1162
1163 MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
1164 MODULE_DESCRIPTION("IMX SCU Power Domain driver");
1165diff --git a/drivers/firmware/imx/seco.c b/drivers/firmware/imx/seco.c
1166new file mode 100644
1167index 000000000..18232c700
1168--- /dev/null
1169+++ b/drivers/firmware/imx/seco.c
1170@@ -0,0 +1,249 @@
1171+// SPDX-License-Identifier: GPL-2.0+
1172+/*
1173+ * Copyright 2020 NXP
1174+ *
1175+ * File containing client-side RPC functions for the SECO service. These
1176+ * function are ported to clients that communicate to the SC.
1177+ */
1178+
1179+#include <linux/firmware/imx/sci.h>
1180+
1181+struct imx_sc_msg_seco_get_build_id {
1182+	struct imx_sc_rpc_msg hdr;
1183+	u32 version;
1184+	u32 commit;
1185+} __packed __aligned(4);
1186+
1187+int imx_sc_seco_build_info(struct imx_sc_ipc *ipc, uint32_t *version,
1188+			   uint32_t *commit)
1189+{
1190+	struct imx_sc_msg_seco_get_build_id msg = {0};
1191+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
1192+
1193+	hdr->ver = IMX_SC_RPC_VERSION;
1194+	hdr->svc = IMX_SC_RPC_SVC_SECO;
1195+	hdr->func = IMX_SC_SECO_FUNC_BUILD_INFO;
1196+	hdr->size = 1;
1197+
1198+	imx_scu_call_rpc(ipc, &msg, true);
1199+
1200+	if (version)
1201+		*version = msg.version;
1202+	if (commit)
1203+		*commit = msg.commit;
1204+
1205+	return 0;
1206+}
1207+EXPORT_SYMBOL(imx_sc_seco_build_info);
1208+
1209+struct imx_sc_msg_seco_sab_msg {
1210+	struct imx_sc_rpc_msg hdr;
1211+	u32 smsg_addr_hi;
1212+	u32 smsg_addr_lo;
1213+} __packed __aligned(4);
1214+
1215+int imx_sc_seco_sab_msg(struct imx_sc_ipc *ipc, u64 smsg_addr)
1216+{
1217+	struct imx_sc_msg_seco_sab_msg msg;
1218+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
1219+	int ret;
1220+
1221+	hdr->ver = IMX_SC_RPC_VERSION;
1222+	hdr->svc = IMX_SC_RPC_SVC_SECO;
1223+	hdr->func = IMX_SC_SECO_FUNC_SAB_MSG;
1224+	hdr->size = 3;
1225+
1226+	msg.smsg_addr_hi = smsg_addr >> 32;
1227+	msg.smsg_addr_lo = smsg_addr;
1228+
1229+	ret = imx_scu_call_rpc(ipc, &msg, true);
1230+	return ret;
1231+}
1232+EXPORT_SYMBOL(imx_sc_seco_sab_msg);
1233+
1234+int imx_sc_seco_secvio_enable(struct imx_sc_ipc *ipc)
1235+{
1236+	struct imx_sc_rpc_msg msg;
1237+	struct imx_sc_rpc_msg *hdr = &msg;
1238+	int ret;
1239+
1240+	hdr->ver = IMX_SC_RPC_VERSION;
1241+	hdr->svc = (uint8_t)IMX_SC_RPC_SVC_SECO;
1242+	hdr->func = (uint8_t)IMX_SC_SECO_FUNC_SECVIO_ENABLE;
1243+	hdr->size = 1;
1244+
1245+	ret = imx_scu_call_rpc(ipc, &msg, true);
1246+	if (ret)
1247+		return ret;
1248+
1249+	return 0;
1250+}
1251+EXPORT_SYMBOL(imx_sc_seco_secvio_enable);
1252+
1253+struct imx_sc_msg_req_seco_config {
1254+	struct imx_sc_rpc_msg hdr;
1255+	u32 data0;
1256+	u32 data1;
1257+	u32 data2;
1258+	u32 data3;
1259+	u32 data4;
1260+	u8 id;
1261+	u8 access;
1262+	u8 size;
1263+} __packed __aligned(4);
1264+
1265+struct imx_sc_msg_resp_seco_config {
1266+	struct imx_sc_rpc_msg hdr;
1267+	u32 data0;
1268+	u32 data1;
1269+	u32 data2;
1270+	u32 data3;
1271+	u32 data4;
1272+} __packed __aligned(4);
1273+
1274+int imx_sc_seco_secvio_config(struct imx_sc_ipc *ipc, u8 id, u8 access,
1275+			      u32 *data0, u32 *data1, u32 *data2, u32 *data3,
1276+			      u32 *data4, u8 size)
1277+{
1278+	struct imx_sc_msg_req_seco_config msg;
1279+	struct imx_sc_msg_resp_seco_config *resp;
1280+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
1281+	int ret;
1282+
1283+	if (!ipc)
1284+		return -EINVAL;
1285+
1286+	hdr->ver = IMX_SC_RPC_VERSION;
1287+	hdr->svc = (uint8_t)IMX_SC_RPC_SVC_SECO;
1288+	hdr->func = (uint8_t)IMX_SC_SECO_FUNC_SECVIO_CONFIG;
1289+	hdr->size = 7;
1290+
1291+	/* Check the pointers on data are valid and set it if doing a write */
1292+	switch (size) {
1293+	case 5:
1294+		if (data4) {
1295+			if (access)
1296+				msg.data4 = *data4;
1297+		} else {
1298+			return -EINVAL;
1299+		}
1300+		fallthrough;
1301+	case 4:
1302+		if (data3) {
1303+			if (access)
1304+				msg.data3 = *data3;
1305+		} else {
1306+			return -EINVAL;
1307+		}
1308+		fallthrough;
1309+	case 3:
1310+		if (data2) {
1311+			if (access)
1312+				msg.data2 = *data2;
1313+		} else {
1314+			return -EINVAL;
1315+		}
1316+		fallthrough;
1317+	case 2:
1318+		if (data1) {
1319+			if (access)
1320+				msg.data1 = *data1;
1321+		} else {
1322+			return -EINVAL;
1323+		}
1324+		fallthrough;
1325+	case 1:
1326+		if (data0) {
1327+			if (access)
1328+				msg.data0 = *data0;
1329+		} else {
1330+			return -EINVAL;
1331+		}
1332+		break;
1333+	default:
1334+		return -EINVAL;
1335+	}
1336+
1337+	msg.id = id;
1338+	msg.access = access;
1339+	msg.size = size;
1340+
1341+	ret = imx_scu_call_rpc(ipc, &msg, true);
1342+	if (ret)
1343+		return ret;
1344+
1345+	resp = (struct imx_sc_msg_resp_seco_config *)&msg;
1346+
1347+	/* Pointers already checked so we just copy the data if reading */
1348+	if (!access)
1349+		switch (size) {
1350+		case 5:
1351+			*data4 = resp->data4;
1352+		fallthrough;
1353+		case 4:
1354+			*data3 = resp->data3;
1355+		fallthrough;
1356+		case 3:
1357+			*data2 = resp->data2;
1358+		fallthrough;
1359+		case 2:
1360+			*data1 = resp->data1;
1361+		fallthrough;
1362+		case 1:
1363+			*data0 = resp->data0;
1364+		}
1365+
1366+	return 0;
1367+}
1368+EXPORT_SYMBOL(imx_sc_seco_secvio_config);
1369+
1370+struct imx_sc_msg_req_seco_dgo_config {
1371+	struct imx_sc_rpc_msg hdr;
1372+	u32 data;
1373+	u8 id;
1374+	u8 access;
1375+} __packed __aligned(4);
1376+
1377+struct imx_sc_msg_resp_seco_dgo_config {
1378+	struct imx_sc_rpc_msg hdr;
1379+	u32 data;
1380+} __packed __aligned(4);
1381+
1382+int imx_sc_seco_secvio_dgo_config(struct imx_sc_ipc *ipc, u8 id, u8 access,
1383+				  u32 *data)
1384+{
1385+	struct imx_sc_msg_req_seco_dgo_config msg;
1386+	struct imx_sc_msg_resp_seco_dgo_config *resp;
1387+	struct imx_sc_rpc_msg *hdr = &msg.hdr;
1388+	int ret;
1389+
1390+	if (!ipc)
1391+		return -EINVAL;
1392+
1393+	hdr->ver = IMX_SC_RPC_VERSION;
1394+	hdr->svc = (uint8_t)IMX_SC_RPC_SVC_SECO;
1395+	hdr->func = (uint8_t)IMX_SC_SECO_FUNC_SECVIO_DGO_CONFIG;
1396+	hdr->size = 3;
1397+
1398+	if (access) {
1399+		if (data)
1400+			msg.data = *data;
1401+		else
1402+			return -EINVAL;
1403+	}
1404+
1405+	msg.access = access;
1406+	msg.id = id;
1407+
1408+	ret = imx_scu_call_rpc(ipc, &msg, true);
1409+	if (ret)
1410+		return ret;
1411+
1412+	resp = (struct imx_sc_msg_resp_seco_dgo_config *)&msg;
1413+
1414+	if (!access && data)
1415+		*data = resp->data;
1416+
1417+	return 0;
1418+}
1419+EXPORT_SYMBOL(imx_sc_seco_secvio_dgo_config);
1420diff --git a/drivers/firmware/imx/seco_mu.c b/drivers/firmware/imx/seco_mu.c
1421new file mode 100644
1422index 000000000..a6c8e06cc
1423--- /dev/null
1424+++ b/drivers/firmware/imx/seco_mu.c
1425@@ -0,0 +1,1212 @@
1426+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
1427+/*
1428+ * Copyright 2019-2020 NXP
1429+ */
1430+
1431+/*
1432+ * This driver allows to send messages to the SECO using a shared mailbox. The
1433+ * messages must follow the protocol defined.
1434+ */
1435+
1436+/*
1437+ * Architecture of the driver:
1438+ *
1439+ *                                     Non-Secure           +   Secure
1440+ *                                                          |
1441+ *                                                          |
1442+ *                   +---------+      +-------------+       |
1443+ *                   |seco_mu.c+<---->+imx-mailbox.c|       |
1444+ *                   |         |      |  mailbox.c  +<-->+------+    +------+
1445+ *                   +---+-----+      +-------------+    | MU X +<-->+ SECO |
1446+ *                       |                               +------+    +------+
1447+ *                       +----------------+                 |
1448+ *                       |                |                 |
1449+ *                       v                v                 |
1450+ *                   logical           logical              |
1451+ *                   receiver          waiter               |
1452+ *                      +                 +                 |
1453+ *                      |                 |                 |
1454+ *                      |                 |                 |
1455+ *                      |            +----+------+          |
1456+ *                      |            |           |          |
1457+ *                      |            |           |          |
1458+ *               device_ctx     device_ctx     device_ctx   |
1459+ *                                                          |
1460+ *                 User 0        User 1       User Y        |
1461+ *                 +------+      +------+     +------+      |
1462+ *                 |misc.c|      |misc.c|     |misc.c|      |
1463+ * kernel space    +------+      +------+     +------+      |
1464+ *                                                          |
1465+ *  +------------------------------------------------------ |
1466+ *                     |             |           |          |
1467+ * userspace     /dev/seco_muXch0    |           |          |
1468+ *                          /dev/seco_muXch1     |          |
1469+ *                                        /dev/seco_muXchY  |
1470+ *                                                          |
1471+ *
1472+ * When a user sends a command to the seco, it registers its device_ctx as
1473+ * waiter of a response from SECO
1474+ *
1475+ * A user can be registered as receiver of command by the SECO.
1476+ *
1477+ * When a message is received, the driver select the device_ctx receiving the
1478+ * message depending on the tag in the message. It selects the device_ctx
1479+ * accordingly.
1480+ */
1481+
1482+#include <linux/dma-mapping.h>
1483+#include <linux/interrupt.h>
1484+#include <linux/miscdevice.h>
1485+#include <linux/mm.h>
1486+#include <linux/module.h>
1487+#include <linux/of_address.h>
1488+#include <linux/of_device.h>
1489+#include <linux/of_irq.h>
1490+#include <linux/uaccess.h>
1491+#include <linux/firmware/imx/sci.h>
1492+#include <dt-bindings/firmware/imx/rsrc.h>
1493+#include <linux/firmware/imx/seco_mu_ioctl.h>
1494+#include <linux/mailbox_client.h>
1495+
1496+#define MAX_RECV_SIZE 31
1497+#define MAX_RECV_SIZE_BYTES (MAX_RECV_SIZE * sizeof(u32))
1498+#define MAX_MESSAGE_SIZE 31
1499+#define MAX_MESSAGE_SIZE_BYTES (MAX_MESSAGE_SIZE * sizeof(u32))
1500+#define MESSAGE_SIZE(hdr) (((struct she_mu_hdr *)(&(hdr)))->size)
1501+#define MESSAGE_TAG(hdr) (((struct she_mu_hdr *)(&(hdr)))->tag)
1502+
1503+#define DEFAULT_MESSAGING_TAG_COMMAND           (0x17u)
1504+#define DEFAULT_MESSAGING_TAG_RESPONSE          (0xe1u)
1505+
1506+#define SECURE_RAM_BASE_ADDRESS	(0x31800000ULL)
1507+#define SECURE_RAM_BASE_ADDRESS_SCU	(0x20800000u)
1508+#define SECURE_RAM_SIZE	(0x10000ULL)
1509+
1510+#define SECO_MU_DEFAULT_MAX_USERS 4
1511+
1512+#define SECO_MU_INTERRUPT_INDEX	(0u)
1513+#define SECO_DEFAULT_MU_INDEX	(1u)
1514+#define SECO_DEFAULT_TZ		(0u)
1515+#define DEFAULT_DID		(0u)
1516+
1517+#define MAX_DATA_SIZE_PER_USER  (65 * 1024)
1518+
1519+#define SC_IRQ_V2X_RESET (1<<7)
1520+
1521+/* Header of the messages exchange with the SECO */
1522+struct she_mu_hdr {
1523+	u8 ver;
1524+	u8 size;
1525+	u8 command;
1526+	u8 tag;
1527+}  __packed;
1528+
1529+/* Status of a char device */
1530+enum mu_device_status_t {
1531+	MU_FREE,
1532+	MU_OPENED
1533+};
1534+
1535+struct seco_shared_mem {
1536+	dma_addr_t dma_addr;
1537+	u32 size;
1538+	u32 pos;
1539+	u8 *ptr;
1540+};
1541+
1542+struct seco_out_buffer_desc {
1543+	u8 *out_ptr;
1544+	u8 *out_usr_ptr;
1545+	u32 out_size;
1546+	struct list_head link;
1547+};
1548+
1549+/* Private struct for each char device instance. */
1550+struct seco_mu_device_ctx {
1551+	struct device *dev;
1552+	struct seco_mu_priv *mu_priv;
1553+	struct miscdevice miscdev;
1554+
1555+	enum mu_device_status_t status;
1556+	wait_queue_head_t wq;
1557+	struct semaphore fops_lock;
1558+
1559+	u32 pending_hdr;
1560+	struct list_head pending_out;
1561+
1562+	struct seco_shared_mem secure_mem;
1563+	struct seco_shared_mem non_secure_mem;
1564+
1565+	u32 temp_cmd[MAX_MESSAGE_SIZE];
1566+	u32 temp_resp[MAX_RECV_SIZE];
1567+	u32 temp_resp_size;
1568+	struct notifier_block scu_notify;
1569+	bool v2x_reset;
1570+};
1571+
1572+/* Private struct for seco MU driver. */
1573+struct seco_mu_priv {
1574+	struct seco_mu_device_ctx *cmd_receiver_dev;
1575+	struct seco_mu_device_ctx *waiting_rsp_dev;
1576+	/*
1577+	 * prevent parallel access to the MU registers
1578+	 * e.g. a user trying to send a command while the other one is
1579+	 * sending a response.
1580+	 */
1581+	struct mutex mu_lock;
1582+	/*
1583+	 * prevent a command to be sent on the MU while another one is still
1584+	 * processing. (response to a command is allowed)
1585+	 */
1586+	struct mutex mu_cmd_lock;
1587+	struct device *dev;
1588+	u32 seco_mu_id;
1589+	u8 cmd_tag;
1590+	u8 rsp_tag;
1591+
1592+	struct mbox_client cl;
1593+	struct mbox_chan *tx_chan;
1594+	struct mbox_chan *rx_chan;
1595+
1596+	struct imx_sc_ipc *ipc_scu;
1597+	u8 seco_part_owner;
1598+};
1599+
1600+/* macro to log operation of a misc device */
1601+#define miscdev_dbg(p_miscdev, fmt, va_args...)                                \
1602+	({                                                                     \
1603+		struct miscdevice *_p_miscdev = p_miscdev;                     \
1604+		dev_dbg((_p_miscdev)->parent, "%s: " fmt, (_p_miscdev)->name,  \
1605+		##va_args);                                                    \
1606+	})
1607+
1608+#define miscdev_info(p_miscdev, fmt, va_args...)                               \
1609+	({                                                                     \
1610+		struct miscdevice *_p_miscdev = p_miscdev;                     \
1611+		dev_info((_p_miscdev)->parent, "%s: " fmt, (_p_miscdev)->name, \
1612+		##va_args);                                                    \
1613+	})
1614+
1615+#define miscdev_err(p_miscdev, fmt, va_args...)                                \
1616+	({                                                                     \
1617+		struct miscdevice *_p_miscdev = p_miscdev;                     \
1618+		dev_err((_p_miscdev)->parent, "%s: " fmt, (_p_miscdev)->name,  \
1619+		##va_args);                                                    \
1620+	})
1621+
1622+/* macro to log operation of a device context */
1623+#define devctx_dbg(p_devctx, fmt, va_args...) \
1624+	miscdev_dbg(&((p_devctx)->miscdev), fmt, ##va_args)
1625+#define devctx_info(p_devctx, fmt, va_args...) \
1626+	miscdev_info(&((p_devctx)->miscdev), fmt, ##va_args)
1627+#define devctx_err(p_devctx, fmt, va_args...) \
1628+	miscdev_err((&(p_devctx)->miscdev), fmt, ##va_args)
1629+
1630+#define IMX_SC_RM_PERM_FULL         7U	/* Full access */
1631+
1632+/* Give access to SECU to the memory we want to share */
1633+static int seco_mu_setup_seco_memory_access(struct seco_mu_device_ctx *dev_ctx,
1634+					    u64 addr, u32 len)
1635+{
1636+	struct seco_mu_priv *priv = dev_get_drvdata(dev_ctx->dev);
1637+	int ret;
1638+	u8 mr;
1639+
1640+	ret = imx_sc_rm_find_memreg(priv->ipc_scu, &mr, addr, addr + len);
1641+	if (ret) {
1642+		devctx_err(dev_ctx, "Fail find memreg\n");
1643+		goto exit;
1644+	}
1645+
1646+	ret = imx_sc_rm_set_memreg_permissions(priv->ipc_scu, mr,
1647+					       priv->seco_part_owner,
1648+					       IMX_SC_RM_PERM_FULL);
1649+	if (ret) {
1650+		devctx_err(dev_ctx, "Fail set permission for resource\n");
1651+		goto exit;
1652+	}
1653+
1654+exit:
1655+	return ret;
1656+}
1657+
1658+/*
1659+ * File operations for user-space
1660+ */
1661+/* Open a char device. */
1662+static int seco_mu_fops_open(struct inode *nd, struct file *fp)
1663+{
1664+	struct seco_mu_device_ctx *dev_ctx = container_of(fp->private_data,
1665+					struct seco_mu_device_ctx, miscdev);
1666+	int err;
1667+
1668+	/* Avoid race if opened at the same time */
1669+	if (down_trylock(&dev_ctx->fops_lock))
1670+		return -EBUSY;
1671+
1672+	/* Authorize only 1 instance. */
1673+	if (dev_ctx->status != MU_FREE) {
1674+		err = -EBUSY;
1675+		goto exit;
1676+	}
1677+
1678+	/*
1679+	 * Allocate some memory for data exchanges with SECO.
1680+	 * This will be used for data not requiring secure memory.
1681+	 */
1682+	dev_ctx->non_secure_mem.ptr = dmam_alloc_coherent(dev_ctx->dev,
1683+					MAX_DATA_SIZE_PER_USER,
1684+					&dev_ctx->non_secure_mem.dma_addr,
1685+					GFP_KERNEL);
1686+	if (!dev_ctx->non_secure_mem.ptr) {
1687+		err = -ENOMEM;
1688+		devctx_err(dev_ctx, "Failed to map shared memory with SECO\n");
1689+		goto exit;
1690+	}
1691+
1692+	err = seco_mu_setup_seco_memory_access(dev_ctx,
1693+					       dev_ctx->non_secure_mem.dma_addr,
1694+					       MAX_DATA_SIZE_PER_USER);
1695+	if (err) {
1696+		err = -EPERM;
1697+		devctx_err(dev_ctx,
1698+			   "Failed to share access to shared memory\n");
1699+		goto free_coherent;
1700+	}
1701+
1702+	dev_ctx->non_secure_mem.size = MAX_DATA_SIZE_PER_USER;
1703+	dev_ctx->non_secure_mem.pos = 0;
1704+	dev_ctx->status = MU_OPENED;
1705+
1706+	dev_ctx->pending_hdr = 0;
1707+	dev_ctx->v2x_reset = 0;
1708+
1709+	goto exit;
1710+
1711+free_coherent:
1712+	dmam_free_coherent(dev_ctx->mu_priv->dev, MAX_DATA_SIZE_PER_USER,
1713+			   dev_ctx->non_secure_mem.ptr,
1714+			   dev_ctx->non_secure_mem.dma_addr);
1715+
1716+exit:
1717+	up(&dev_ctx->fops_lock);
1718+	return err;
1719+}
1720+
1721+/* Close a char device. */
1722+static int seco_mu_fops_close(struct inode *nd, struct file *fp)
1723+{
1724+	struct seco_mu_device_ctx *dev_ctx = container_of(fp->private_data,
1725+					struct seco_mu_device_ctx, miscdev);
1726+	struct seco_mu_priv *mu_priv = dev_ctx->mu_priv;
1727+	struct seco_out_buffer_desc *out_buf_desc;
1728+
1729+	/* Avoid race if closed at the same time */
1730+	if (down_trylock(&dev_ctx->fops_lock))
1731+		return -EBUSY;
1732+
1733+	/* The device context has not been opened */
1734+	if (dev_ctx->status != MU_OPENED)
1735+		goto exit;
1736+
1737+	/* check if this device was registered as command receiver. */
1738+	if (mu_priv->cmd_receiver_dev == dev_ctx)
1739+		mu_priv->cmd_receiver_dev = NULL;
1740+
1741+	/* check if this device was registered as waiting response. */
1742+	if (mu_priv->waiting_rsp_dev == dev_ctx) {
1743+		mu_priv->waiting_rsp_dev = NULL;
1744+		mutex_unlock(&mu_priv->mu_cmd_lock);
1745+	}
1746+
1747+	/* Unmap secure memory shared buffer. */
1748+	if (dev_ctx->secure_mem.ptr)
1749+		devm_iounmap(dev_ctx->dev, dev_ctx->secure_mem.ptr);
1750+
1751+	dev_ctx->secure_mem.ptr = NULL;
1752+	dev_ctx->secure_mem.dma_addr = 0;
1753+	dev_ctx->secure_mem.size = 0;
1754+	dev_ctx->secure_mem.pos = 0;
1755+
1756+	/* Free non-secure shared buffer. */
1757+	dmam_free_coherent(dev_ctx->mu_priv->dev, MAX_DATA_SIZE_PER_USER,
1758+			   dev_ctx->non_secure_mem.ptr,
1759+			   dev_ctx->non_secure_mem.dma_addr);
1760+
1761+	dev_ctx->non_secure_mem.ptr = NULL;
1762+	dev_ctx->non_secure_mem.dma_addr = 0;
1763+	dev_ctx->non_secure_mem.size = 0;
1764+	dev_ctx->non_secure_mem.pos = 0;
1765+
1766+	while (!list_empty(&dev_ctx->pending_out)) {
1767+		out_buf_desc = list_first_entry_or_null(&dev_ctx->pending_out,
1768+						struct seco_out_buffer_desc,
1769+						link);
1770+		__list_del_entry(&out_buf_desc->link);
1771+		devm_kfree(dev_ctx->dev, out_buf_desc);
1772+	}
1773+
1774+	dev_ctx->status = MU_FREE;
1775+
1776+exit:
1777+	up(&dev_ctx->fops_lock);
1778+	return 0;
1779+}
1780+
1781+/* Write a message to the MU. */
1782+static ssize_t seco_mu_fops_write(struct file *fp, const char __user *buf,
1783+				  size_t size, loff_t *ppos)
1784+{
1785+	struct seco_mu_device_ctx *dev_ctx = container_of(fp->private_data,
1786+					struct seco_mu_device_ctx, miscdev);
1787+	struct seco_mu_priv *mu_priv = dev_ctx->mu_priv;
1788+	u32 nb_words = 0, header;
1789+	int err;
1790+
1791+	devctx_dbg(dev_ctx, "write from buf (%p)%ld, ppos=%lld\n", buf, size,
1792+		   ((ppos) ? *ppos : 0));
1793+
1794+	if (down_interruptible(&dev_ctx->fops_lock))
1795+		return -EBUSY;
1796+
1797+	if (dev_ctx->status != MU_OPENED) {
1798+		err = -EINVAL;
1799+		goto exit;
1800+	}
1801+
1802+	if (size < sizeof(struct she_mu_hdr)) {
1803+		devctx_err(dev_ctx, "User buffer too small(%ld < %lu)\n", size,
1804+			   sizeof(struct she_mu_hdr));
1805+		err = -ENOSPC;
1806+		goto exit;
1807+	}
1808+
1809+	if (size > MAX_MESSAGE_SIZE_BYTES) {
1810+		devctx_err(dev_ctx, "User buffer too big(%ld > %lu)\n", size,
1811+			   MAX_MESSAGE_SIZE_BYTES);
1812+		err = -ENOSPC;
1813+		goto exit;
1814+	}
1815+
1816+	/* Copy data to buffer */
1817+	err = (int)copy_from_user(dev_ctx->temp_cmd, buf, size);
1818+	if (err) {
1819+		err = -EFAULT;
1820+		devctx_err(dev_ctx, "Fail copy message from user\n");
1821+		goto exit;
1822+	}
1823+
1824+	print_hex_dump_debug("from user ", DUMP_PREFIX_OFFSET, 4, 4,
1825+			     dev_ctx->temp_cmd, size, false);
1826+
1827+	header = dev_ctx->temp_cmd[0];
1828+
1829+	/* Check the message is valid according to tags */
1830+	if (MESSAGE_TAG(header) == mu_priv->cmd_tag) {
1831+		/*
1832+		 * unlocked in seco_mu_receive_work_handler when the
1833+		 * response to this command is received.
1834+		 */
1835+		mutex_lock(&mu_priv->mu_cmd_lock);
1836+		mu_priv->waiting_rsp_dev = dev_ctx;
1837+	} else if (MESSAGE_TAG(header) == mu_priv->rsp_tag) {
1838+		/* Check the device context can send the command */
1839+		if (dev_ctx != mu_priv->cmd_receiver_dev) {
1840+			devctx_err(dev_ctx,
1841+				   "This channel is not configured to send response to SECO\n");
1842+			err = -EPERM;
1843+			goto exit;
1844+		}
1845+	} else {
1846+		devctx_err(dev_ctx, "The message does not have a valid TAG\n");
1847+		err = -EINVAL;
1848+		goto exit;
1849+	}
1850+
1851+	/*
1852+	 * Check that the size passed as argument matches the size
1853+	 * carried in the message.
1854+	 */
1855+	nb_words = MESSAGE_SIZE(header);
1856+	if (nb_words * sizeof(u32) != size) {
1857+		devctx_err(dev_ctx, "User buffer too small\n");
1858+		goto exit;
1859+	}
1860+
1861+	mutex_lock(&mu_priv->mu_lock);
1862+
1863+	/* Send message */
1864+	devctx_dbg(dev_ctx, "sending message\n");
1865+	err = mbox_send_message(mu_priv->tx_chan, dev_ctx->temp_cmd);
1866+	if (err < 0) {
1867+		devctx_err(dev_ctx, "Failed to send message\n");
1868+		goto unlock;
1869+	}
1870+
1871+	err = nb_words * (u32)sizeof(u32);
1872+
1873+unlock:
1874+	mutex_unlock(&mu_priv->mu_lock);
1875+
1876+exit:
1877+	up(&dev_ctx->fops_lock);
1878+	return err;
1879+}
1880+
1881+/*
1882+ * Read a message from the MU.
1883+ * Blocking until a message is available.
1884+ */
1885+static ssize_t seco_mu_fops_read(struct file *fp, char __user *buf,
1886+				 size_t size, loff_t *ppos)
1887+{
1888+	struct seco_mu_device_ctx *dev_ctx = container_of(fp->private_data,
1889+					struct seco_mu_device_ctx, miscdev);
1890+	u32 data_size = 0, size_to_copy = 0;
1891+	struct seco_out_buffer_desc *b_desc;
1892+	int err;
1893+
1894+	devctx_dbg(dev_ctx, "read to buf %p(%ld), ppos=%lld\n", buf, size,
1895+		   ((ppos) ? *ppos : 0));
1896+
1897+	if (down_interruptible(&dev_ctx->fops_lock))
1898+		return -EBUSY;
1899+
1900+	if (dev_ctx->status != MU_OPENED) {
1901+		err = -EINVAL;
1902+		goto exit;
1903+	}
1904+
1905+	if (dev_ctx->v2x_reset) {
1906+		err = -EINVAL;
1907+		goto exit;
1908+	}
1909+
1910+	/* Wait until the complete message is received on the MU. */
1911+	err = wait_event_interruptible(dev_ctx->wq, dev_ctx->pending_hdr != 0);
1912+	if (err) {
1913+		devctx_err(dev_ctx, "Interrupted by signal\n");
1914+		goto exit;
1915+	}
1916+
1917+	if (dev_ctx->v2x_reset) {
1918+		err = -EINVAL;
1919+		dev_ctx->v2x_reset = 0;
1920+		goto exit;
1921+	}
1922+
1923+	devctx_dbg(dev_ctx, "%s %s\n", __func__,
1924+		   "message received, start transmit to user");
1925+
1926+	/* Check that the size passed as argument is larger than
1927+	 * the one carried in the message.
1928+	 */
1929+	data_size = dev_ctx->temp_resp_size * sizeof(u32);
1930+	size_to_copy = data_size;
1931+	if (size_to_copy > size) {
1932+		devctx_dbg(dev_ctx, "User buffer too small (%ld < %d)\n",
1933+			   size, size_to_copy);
1934+		size_to_copy = size;
1935+	}
1936+
1937+	/* We may need to copy the output data to user before
1938+	 * delivering the completion message.
1939+	 */
1940+	while (!list_empty(&dev_ctx->pending_out)) {
1941+		b_desc = list_first_entry_or_null(&dev_ctx->pending_out,
1942+						  struct seco_out_buffer_desc,
1943+						  link);
1944+		if (b_desc->out_usr_ptr && b_desc->out_ptr) {
1945+			devctx_dbg(dev_ctx, "Copy output data to user\n");
1946+			err = (int)copy_to_user(b_desc->out_usr_ptr,
1947+						b_desc->out_ptr,
1948+						b_desc->out_size);
1949+			if (err) {
1950+				devctx_err(dev_ctx,
1951+					   "Failed to copy output data to user\n");
1952+				err = -EFAULT;
1953+				goto exit;
1954+			}
1955+		}
1956+		__list_del_entry(&b_desc->link);
1957+		devm_kfree(dev_ctx->dev, b_desc);
1958+	}
1959+
1960+	/* Copy data from the buffer */
1961+	print_hex_dump_debug("to user ", DUMP_PREFIX_OFFSET, 4, 4,
1962+			     dev_ctx->temp_resp, size_to_copy, false);
1963+	err = (int)copy_to_user(buf, dev_ctx->temp_resp, size_to_copy);
1964+	if (err) {
1965+		devctx_err(dev_ctx, "Failed to copy to user\n");
1966+		err = -EFAULT;
1967+		goto exit;
1968+	}
1969+
1970+	err = size_to_copy;
1971+
1972+	/* free memory allocated on the shared buffers. */
1973+	dev_ctx->secure_mem.pos = 0;
1974+	dev_ctx->non_secure_mem.pos = 0;
1975+
1976+	dev_ctx->pending_hdr = 0;
1977+
1978+exit:
1979+	up(&dev_ctx->fops_lock);
1980+	return err;
1981+}
1982+
1983+/* Configure the shared memory according to user config */
1984+static int
1985+seco_mu_ioctl_shared_mem_cfg_handler(struct seco_mu_device_ctx *dev_ctx,
1986+				     unsigned long arg)
1987+{
1988+	struct seco_mu_ioctl_shared_mem_cfg cfg;
1989+	int err = -EINVAL;
1990+	u64 high_boundary;
1991+
1992+	/* Check if not already configured. */
1993+	if (dev_ctx->secure_mem.dma_addr != 0u) {
1994+		devctx_err(dev_ctx, "Shared memory not configured\n");
1995+		goto exit;
1996+	}
1997+
1998+	err = (int)copy_from_user(&cfg, (u8 *)arg,
1999+		sizeof(cfg));
2000+	if (err) {
2001+		devctx_err(dev_ctx, "Fail copy shared memory config to user\n");
2002+		err = -EFAULT;
2003+		goto exit;
2004+	}
2005+
2006+	devctx_dbg(dev_ctx, "cfg offset: %u(%d)\n", cfg.base_offset, cfg.size);
2007+
2008+	high_boundary = cfg.base_offset;
2009+	if (high_boundary > SECURE_RAM_SIZE) {
2010+		devctx_err(dev_ctx, "base offset is over secure memory\n");
2011+		err = -ENOMEM;
2012+		goto exit;
2013+	}
2014+
2015+	high_boundary += cfg.size;
2016+	if (high_boundary > SECURE_RAM_SIZE) {
2017+		devctx_err(dev_ctx, "total memory is over secure memory\n");
2018+		err = -ENOMEM;
2019+		goto exit;
2020+	}
2021+
2022+	dev_ctx->secure_mem.dma_addr = (dma_addr_t)cfg.base_offset;
2023+	dev_ctx->secure_mem.size = cfg.size;
2024+	dev_ctx->secure_mem.pos = 0;
2025+	dev_ctx->secure_mem.ptr = devm_ioremap(dev_ctx->dev,
2026+					(phys_addr_t)(SECURE_RAM_BASE_ADDRESS +
2027+					(u64)dev_ctx->secure_mem.dma_addr),
2028+					dev_ctx->secure_mem.size);
2029+	if (!dev_ctx->secure_mem.ptr) {
2030+		devctx_err(dev_ctx, "Failed to map secure memory\n");
2031+		err = -ENOMEM;
2032+		goto exit;
2033+	}
2034+
2035+exit:
2036+	return err;
2037+}
2038+
2039+/*
2040+ * Copy a buffer of daa to/from the user and return the address to use in
2041+ * messages
2042+ */
2043+static int seco_mu_ioctl_setup_iobuf_handler(struct seco_mu_device_ctx *dev_ctx,
2044+					     unsigned long arg)
2045+{
2046+	struct seco_out_buffer_desc *out_buf_desc;
2047+	struct seco_mu_ioctl_setup_iobuf io;
2048+	struct seco_shared_mem *shared_mem;
2049+	int err = -EINVAL;
2050+	u32 pos;
2051+
2052+	err = (int)copy_from_user(&io,
2053+		(u8 *)arg,
2054+		sizeof(io));
2055+	if (err) {
2056+		devctx_err(dev_ctx, "Failed copy iobuf config from user\n");
2057+		err = -EFAULT;
2058+		goto exit;
2059+	}
2060+
2061+	devctx_dbg(dev_ctx, "io [buf: %p(%d) flag: %x]\n",
2062+		   io.user_buf, io.length, io.flags);
2063+
2064+	if (io.length == 0 || !io.user_buf) {
2065+		/*
2066+		 * Accept NULL pointers since some buffers are optional
2067+		 * in SECO commands. In this case we should return 0 as
2068+		 * pointer to be embedded into the message.
2069+		 * Skip all data copy part of code below.
2070+		 */
2071+		io.seco_addr = 0;
2072+		goto copy;
2073+	}
2074+
2075+	/* Select the shared memory to be used for this buffer. */
2076+	if (io.flags & SECO_MU_IO_FLAGS_USE_SEC_MEM) {
2077+		/* App requires to use secure memory for this buffer.*/
2078+		shared_mem = &dev_ctx->secure_mem;
2079+	} else {
2080+		/* No specific requirement for this buffer. */
2081+		shared_mem = &dev_ctx->non_secure_mem;
2082+	}
2083+
2084+	/* Check there is enough space in the shared memory. */
2085+	if (io.length >= shared_mem->size - shared_mem->pos) {
2086+		devctx_err(dev_ctx, "Not enough space in shared memory\n");
2087+		err = -ENOMEM;
2088+		goto exit;
2089+	}
2090+
2091+	/* Allocate space in shared memory. 8 bytes aligned. */
2092+	pos = shared_mem->pos;
2093+	shared_mem->pos += round_up(io.length, 8u);
2094+	io.seco_addr = (u64)shared_mem->dma_addr + pos;
2095+
2096+	if ((io.flags & SECO_MU_IO_FLAGS_USE_SEC_MEM) &&
2097+	    !(io.flags & SECO_MU_IO_FLAGS_USE_SHORT_ADDR))
2098+		/*Add base address to get full address.*/
2099+		io.seco_addr += SECURE_RAM_BASE_ADDRESS_SCU;
2100+
2101+	if (io.flags & SECO_MU_IO_FLAGS_IS_INPUT) {
2102+		/*
2103+		 * buffer is input:
2104+		 * copy data from user space to this allocated buffer.
2105+		 */
2106+		err = (int)copy_from_user(shared_mem->ptr + pos, io.user_buf,
2107+					  io.length);
2108+		if (err) {
2109+			devctx_err(dev_ctx,
2110+				   "Failed copy data to shared memory\n");
2111+			err = -EFAULT;
2112+			goto exit;
2113+		}
2114+	} else {
2115+		/*
2116+		 * buffer is output:
2117+		 * add an entry in the "pending buffers" list so data
2118+		 * can be copied to user space when receiving SECO
2119+		 * response.
2120+		 */
2121+		out_buf_desc = devm_kmalloc(dev_ctx->dev, sizeof(*out_buf_desc),
2122+					    GFP_KERNEL);
2123+		if (!out_buf_desc) {
2124+			err = -ENOMEM;
2125+			devctx_err(dev_ctx,
2126+				   "Failed allocating mem for pending buffer\n"
2127+				   );
2128+			goto exit;
2129+		}
2130+
2131+		out_buf_desc->out_ptr = shared_mem->ptr + pos;
2132+		out_buf_desc->out_usr_ptr = io.user_buf;
2133+		out_buf_desc->out_size = io.length;
2134+		list_add_tail(&out_buf_desc->link, &dev_ctx->pending_out);
2135+	}
2136+
2137+copy:
2138+	/* Provide the seco address to user space only if success. */
2139+	err = (int)copy_to_user((u8 *)arg, &io,
2140+		sizeof(io));
2141+	if (err) {
2142+		devctx_err(dev_ctx, "Failed to copy iobuff setup to user\n");
2143+		err = -EFAULT;
2144+		goto exit;
2145+	}
2146+
2147+exit:
2148+	return err;
2149+}
2150+
2151+/* Retrieve info about the MU */
2152+static int seco_mu_ioctl_get_mu_info_handler(struct seco_mu_device_ctx *dev_ctx,
2153+					     unsigned long arg)
2154+{
2155+	struct seco_mu_priv *priv = dev_get_drvdata(dev_ctx->dev);
2156+	struct seco_mu_ioctl_get_mu_info info;
2157+	int err = -EINVAL;
2158+
2159+	info.seco_mu_idx = (u8)priv->seco_mu_id;
2160+	info.interrupt_idx = SECO_MU_INTERRUPT_INDEX;
2161+	info.tz = SECO_DEFAULT_TZ;
2162+
2163+	err = imx_sc_rm_get_did(priv->ipc_scu, &info.did);
2164+	if (err) {
2165+		devctx_err(dev_ctx, "Get did failed\n");
2166+		goto exit;
2167+	}
2168+
2169+	devctx_dbg(dev_ctx,
2170+		   "info [mu_idx: %d, irq_idx: %d, tz: 0x%x, did: 0x%x]\n",
2171+		   info.seco_mu_idx, info.interrupt_idx, info.tz, info.did);
2172+
2173+	err = (int)copy_to_user((u8 *)arg, &info,
2174+		sizeof(info));
2175+	if (err) {
2176+		devctx_err(dev_ctx, "Failed to copy mu info to user\n");
2177+		err = -EFAULT;
2178+		goto exit;
2179+	}
2180+
2181+exit:
2182+	return err;
2183+}
2184+
2185+static int seco_mu_ioctl_signed_msg_handler(struct seco_mu_device_ctx *dev_ctx,
2186+					    unsigned long arg)
2187+{
2188+	struct seco_shared_mem *shared_mem = &dev_ctx->non_secure_mem;
2189+	struct seco_mu_priv *priv = dev_get_drvdata(dev_ctx->dev);
2190+	struct seco_mu_ioctl_signed_message msg;
2191+	int err = -EINVAL;
2192+	u64 addr;
2193+	u32 pos;
2194+
2195+	err = (int)copy_from_user(&msg,
2196+		(u8 *)arg,
2197+		sizeof(msg));
2198+	if (err) {
2199+		devctx_err(dev_ctx, "Failed to copy from user: %d\n", err);
2200+		err = -EFAULT;
2201+		goto exit;
2202+	}
2203+
2204+	/* Check there is enough space in the shared memory. */
2205+	if (msg.msg_size >= shared_mem->size - shared_mem->pos) {
2206+		devctx_err(dev_ctx, "Not enough mem: %d left, %d required\n",
2207+			   shared_mem->size - shared_mem->pos, msg.msg_size);
2208+		err = -ENOMEM;
2209+		goto exit;
2210+	}
2211+
2212+	/* Allocate space in shared memory. 8 bytes aligned. */
2213+	pos = shared_mem->pos;
2214+
2215+	/* get physical address from the pos */
2216+	addr = (u64)shared_mem->dma_addr + pos;
2217+
2218+	/* copy signed message from user space to this allocated buffer */
2219+	err = (int)copy_from_user(shared_mem->ptr + pos, msg.message,
2220+				  msg.msg_size);
2221+	if (err) {
2222+		devctx_err(dev_ctx, "Failed to signed message from user: %d\n",
2223+			   err);
2224+		err = -EFAULT;
2225+		goto exit;
2226+	}
2227+
2228+	/* Send the message to SECO through SCU */
2229+	msg.error_code = imx_sc_seco_sab_msg(priv->ipc_scu, addr);
2230+
2231+	err = (int)copy_to_user((u8 *)arg, &msg,
2232+		sizeof(msg));
2233+	if (err) {
2234+		devctx_err(dev_ctx, "Failed to copy to user: %d\n", err);
2235+		err = -EFAULT;
2236+		goto exit;
2237+	}
2238+
2239+exit:
2240+	return err;
2241+}
2242+
2243+/* IOCTL entry point of a char device */
2244+static long seco_mu_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
2245+{
2246+	struct seco_mu_device_ctx *dev_ctx = container_of(fp->private_data,
2247+					struct seco_mu_device_ctx, miscdev);
2248+	struct seco_mu_priv *mu_priv = dev_ctx->mu_priv;
2249+	int err = -EINVAL;
2250+
2251+	/* Prevent race during change of device context */
2252+	if (down_interruptible(&dev_ctx->fops_lock))
2253+		return -EBUSY;
2254+
2255+	switch (cmd) {
2256+	case SECO_MU_IOCTL_ENABLE_CMD_RCV:
2257+		if (!mu_priv->cmd_receiver_dev) {
2258+			devctx_dbg(dev_ctx, "setting as receiver\n");
2259+			mu_priv->cmd_receiver_dev = dev_ctx;
2260+			err = 0;
2261+		};
2262+		break;
2263+	case SECO_MU_IOCTL_SHARED_BUF_CFG:
2264+		err = seco_mu_ioctl_shared_mem_cfg_handler(dev_ctx, arg);
2265+		break;
2266+	case SECO_MU_IOCTL_SETUP_IOBUF:
2267+		err = seco_mu_ioctl_setup_iobuf_handler(dev_ctx, arg);
2268+		break;
2269+	case SECO_MU_IOCTL_GET_MU_INFO:
2270+		err = seco_mu_ioctl_get_mu_info_handler(dev_ctx, arg);
2271+		break;
2272+	case SECO_MU_IOCTL_SIGNED_MESSAGE:
2273+		err = seco_mu_ioctl_signed_msg_handler(dev_ctx, arg);
2274+		break;
2275+	default:
2276+		err = -EINVAL;
2277+		devctx_dbg(dev_ctx, "IOCTL %.8x not supported\n", cmd);
2278+	}
2279+
2280+	up(&dev_ctx->fops_lock);
2281+	return (long)err;
2282+}
2283+
2284+/*
2285+ * Callback called by mailbox FW when data are received
2286+ */
2287+static void seco_mu_rx_callback(struct mbox_client *c, void *msg)
2288+{
2289+	struct device *dev = c->dev;
2290+	struct seco_mu_priv *priv = dev_get_drvdata(dev);
2291+	struct seco_mu_device_ctx *dev_ctx;
2292+	bool is_response = false;
2293+	int msg_size;
2294+	u32 header;
2295+
2296+	dev_dbg(dev, "Message received on mailbox\n");
2297+
2298+	/* The function can be called with NULL msg */
2299+	if (!msg) {
2300+		dev_err(dev, "Message is invalid\n");
2301+		return;
2302+	}
2303+
2304+	if (IS_ERR(msg)) {
2305+		dev_err(dev, "Error during reception of message: %ld\n",
2306+			PTR_ERR(msg));
2307+		return;
2308+	}
2309+
2310+	header = *(u32 *)msg;
2311+
2312+	dev_dbg(dev, "Selecting device\n");
2313+
2314+	/* Incoming command: wake up the receiver if any. */
2315+	if (MESSAGE_TAG(header) == priv->cmd_tag) {
2316+		dev_dbg(dev, "Selecting cmd receiver\n");
2317+		dev_ctx = priv->cmd_receiver_dev;
2318+	} else if (MESSAGE_TAG(header) == priv->rsp_tag) {
2319+		dev_dbg(dev, "Selecting rsp waiter\n");
2320+		dev_ctx = priv->waiting_rsp_dev;
2321+		is_response = true;
2322+	} else {
2323+		dev_err(dev, "Failed to select a device for message: %.8x\n",
2324+			header);
2325+		return;
2326+	}
2327+
2328+	if (!dev_ctx) {
2329+		dev_err(dev, "No device context selected for message: %.8x\n",
2330+			header);
2331+		return;
2332+	}
2333+
2334+	/* Init reception */
2335+	msg_size = MESSAGE_SIZE(header);
2336+	if (msg_size > MAX_RECV_SIZE) {
2337+		devctx_err(dev_ctx, "Message is too big (%d > %d)", msg_size,
2338+			   MAX_RECV_SIZE);
2339+		return;
2340+	}
2341+
2342+	memcpy(dev_ctx->temp_resp, msg, msg_size * sizeof(u32));
2343+	dev_ctx->temp_resp_size = msg_size;
2344+
2345+	/* Allow user to read */
2346+	dev_ctx->pending_hdr = dev_ctx->temp_resp[0];
2347+	wake_up_interruptible(&dev_ctx->wq);
2348+
2349+	if (is_response) {
2350+		/* Allow user to send new command */
2351+		mutex_unlock(&priv->mu_cmd_lock);
2352+	}
2353+}
2354+
2355+#define SECO_FW_VER_FEAT_MASK		(0x0000FFF0u)
2356+#define SECO_FW_VER_FEAT_SHIFT		(0x04u)
2357+#define SECO_FW_VER_FEAT_MIN_ALL_MU	(0x04u)
2358+
2359+/*
2360+ * Get SECO FW version and check if it supports receiving commands on all MUs
2361+ * The version is retrieved through SCU since this is the only communication
2362+ * channel to SECO always present.
2363+ */
2364+static int seco_mu_check_all_mu_supported(struct device *dev)
2365+{
2366+	struct seco_mu_priv *priv = dev_get_drvdata(dev);
2367+	u32 seco_ver;
2368+	int ret;
2369+
2370+	ret = imx_sc_seco_build_info(priv->ipc_scu, &seco_ver, NULL);
2371+	if (ret) {
2372+		dev_err(dev, "failed to retrieve SECO build info\n");
2373+		goto exit;
2374+	}
2375+
2376+	if (((seco_ver & SECO_FW_VER_FEAT_MASK) >> SECO_FW_VER_FEAT_SHIFT)
2377+		< SECO_FW_VER_FEAT_MIN_ALL_MU) {
2378+		dev_err(dev, "current SECO FW do not support MU with Linux\n");
2379+		ret = -ENOTSUPP;
2380+		goto exit;
2381+	}
2382+
2383+exit:
2384+	return ret;
2385+}
2386+
2387+/* Char driver setup */
2388+static const struct file_operations seco_mu_fops = {
2389+	.open		= seco_mu_fops_open,
2390+	.owner		= THIS_MODULE,
2391+	.read		= seco_mu_fops_read,
2392+	.release	= seco_mu_fops_close,
2393+	.write		= seco_mu_fops_write,
2394+	.unlocked_ioctl = seco_mu_ioctl,
2395+};
2396+
2397+/* interface for managed res to free a mailbox channel */
2398+static void if_mbox_free_channel(void *mbox_chan)
2399+{
2400+	mbox_free_channel(mbox_chan);
2401+}
2402+
2403+/* interface for managed res to unregister a char device */
2404+static void if_misc_deregister(void *miscdevice)
2405+{
2406+	misc_deregister(miscdevice);
2407+}
2408+
2409+static int seco_mu_request_channel(struct device *dev,
2410+				   struct mbox_chan **chan,
2411+				   const char *name)
2412+{
2413+	struct seco_mu_priv *priv = dev_get_drvdata(dev);
2414+	struct mbox_chan *t_chan;
2415+	int ret = 0;
2416+
2417+	t_chan = mbox_request_channel_byname(&priv->cl, name);
2418+	if (IS_ERR(t_chan)) {
2419+		ret = PTR_ERR(t_chan);
2420+		if (ret != -EPROBE_DEFER)
2421+			dev_err(dev,
2422+				"Failed to request chan %s ret %d\n", name,
2423+				ret);
2424+		goto exit;
2425+	}
2426+
2427+	ret = devm_add_action(dev, if_mbox_free_channel, t_chan);
2428+	if (ret) {
2429+		dev_err(dev, "failed to add devm removal of mbox %s\n", name);
2430+		goto exit;
2431+	}
2432+
2433+	*chan = t_chan;
2434+
2435+exit:
2436+	return ret;
2437+}
2438+
2439+static int imx_sc_v2x_reset_notify(struct notifier_block *nb,
2440+                                      unsigned long event, void *group)
2441+{
2442+	struct seco_mu_device_ctx *dev_ctx = container_of(nb,
2443+					struct seco_mu_device_ctx, scu_notify);
2444+
2445+	if (!(event & SC_IRQ_V2X_RESET))
2446+		return 0;
2447+
2448+	dev_ctx->v2x_reset = true;
2449+
2450+	wake_up_interruptible(&dev_ctx->wq);
2451+	return 0;
2452+}
2453+/* Driver probe.*/
2454+static int seco_mu_probe(struct platform_device *pdev)
2455+{
2456+	struct seco_mu_device_ctx *dev_ctx;
2457+	struct device *dev = &pdev->dev;
2458+	struct seco_mu_priv *priv;
2459+	struct device_node *np;
2460+	int max_nb_users = 0;
2461+	char *devname;
2462+	int ret;
2463+	int i;
2464+
2465+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
2466+	if (!priv) {
2467+		ret = -ENOMEM;
2468+		dev_err(dev, "Fail allocate mem for private data\n");
2469+		goto exit;
2470+	}
2471+	priv->dev = dev;
2472+	dev_set_drvdata(dev, priv);
2473+
2474+	/*
2475+	 * Get the address of MU to be used for communication with the SCU
2476+	 */
2477+	np = pdev->dev.of_node;
2478+	if (!np) {
2479+		dev_err(dev, "Cannot find MU User entry in device tree\n");
2480+		ret = -ENOTSUPP;
2481+		goto exit;
2482+	}
2483+
2484+	ret = imx_scu_get_handle(&priv->ipc_scu);
2485+	if (ret) {
2486+		dev_err(dev, "Fail to retrieve IPC handle\n");
2487+		goto exit;
2488+	}
2489+
2490+	ret = imx_sc_rm_get_resource_owner(priv->ipc_scu, IMX_SC_R_SECO,
2491+					   &priv->seco_part_owner);
2492+	if (ret) {
2493+		dev_err(dev, "Fail get owner of SECO resource\n");
2494+		goto exit;
2495+	}
2496+
2497+	ret = seco_mu_check_all_mu_supported(dev);
2498+	if (ret) {
2499+		dev_err(dev, "Fail seco_mu_check_all_mu_supported\n");
2500+		goto exit;
2501+	}
2502+
2503+	/* Initialize the mutex. */
2504+	mutex_init(&priv->mu_cmd_lock);
2505+	mutex_init(&priv->mu_lock);
2506+
2507+	priv->cmd_receiver_dev = NULL;
2508+	priv->waiting_rsp_dev = NULL;
2509+
2510+	ret = of_property_read_u32(np, "fsl,seco_mu_id", &priv->seco_mu_id);
2511+	if (ret) {
2512+		dev_warn(dev, "%s: Not able to read mu_id", __func__);
2513+		priv->seco_mu_id = SECO_DEFAULT_MU_INDEX;
2514+	}
2515+
2516+	ret = of_property_read_u32(np, "fsl,seco_max_users", &max_nb_users);
2517+	if (ret) {
2518+		dev_warn(dev, "%s: Not able to read mu_max_user", __func__);
2519+		max_nb_users = SECO_MU_DEFAULT_MAX_USERS;
2520+	}
2521+
2522+	ret = of_property_read_u8(np, "fsl,cmd_tag", &priv->cmd_tag);
2523+	if (ret)
2524+		priv->cmd_tag = DEFAULT_MESSAGING_TAG_COMMAND;
2525+
2526+	ret = of_property_read_u8(np, "fsl,rsp_tag", &priv->rsp_tag);
2527+	if (ret)
2528+		priv->rsp_tag = DEFAULT_MESSAGING_TAG_RESPONSE;
2529+
2530+	/* Mailbox client configuration */
2531+	priv->cl.dev = dev;
2532+	priv->cl.knows_txdone = true;
2533+	priv->cl.rx_callback = seco_mu_rx_callback;
2534+
2535+	ret = seco_mu_request_channel(dev, &priv->tx_chan, "txdb");
2536+	if (ret) {
2537+		if (ret != -EPROBE_DEFER)
2538+			dev_err(dev, "Failed to request txdb channel\n");
2539+
2540+		goto exit;
2541+	}
2542+
2543+	ret = seco_mu_request_channel(dev, &priv->rx_chan, "rxdb");
2544+	if (ret) {
2545+		if (ret != -EPROBE_DEFER)
2546+			dev_err(dev, "Failed to request rxdb channel\n");
2547+
2548+		goto exit;
2549+	}
2550+
2551+	/* Create users */
2552+	for (i = 0; i < max_nb_users; i++) {
2553+		dev_ctx = devm_kzalloc(dev, sizeof(*dev_ctx), GFP_KERNEL);
2554+		if (!dev_ctx) {
2555+			ret = -ENOMEM;
2556+			dev_err(dev,
2557+				"Fail to allocate memory for device context\n");
2558+			goto exit;
2559+		}
2560+
2561+		dev_ctx->dev = dev;
2562+		dev_ctx->status = MU_FREE;
2563+		dev_ctx->mu_priv = priv;
2564+		/* Default value invalid for an header. */
2565+		init_waitqueue_head(&dev_ctx->wq);
2566+
2567+		INIT_LIST_HEAD(&dev_ctx->pending_out);
2568+		sema_init(&dev_ctx->fops_lock, 1);
2569+
2570+		devname = devm_kasprintf(dev, GFP_KERNEL, "seco_mu%d_ch%d",
2571+					 priv->seco_mu_id, i);
2572+		if (!devname) {
2573+			ret = -ENOMEM;
2574+			dev_err(dev,
2575+				"Fail to allocate memory for misc dev name\n");
2576+			goto exit;
2577+		}
2578+
2579+		dev_ctx->miscdev.name = devname;
2580+		dev_ctx->miscdev.minor	= MISC_DYNAMIC_MINOR;
2581+		dev_ctx->miscdev.fops = &seco_mu_fops;
2582+		dev_ctx->miscdev.parent = dev;
2583+		ret = misc_register(&dev_ctx->miscdev);
2584+		if (ret) {
2585+			dev_err(dev, "failed to register misc device %d\n",
2586+				ret);
2587+			goto exit;
2588+		}
2589+
2590+		ret = devm_add_action(dev, if_misc_deregister,
2591+				      &dev_ctx->miscdev);
2592+
2593+		dev_ctx->scu_notify.notifier_call = imx_sc_v2x_reset_notify;
2594+
2595+		ret = imx_scu_irq_register_notifier(&dev_ctx->scu_notify);
2596+		if (ret) {
2597+			dev_err(&pdev->dev, "v2x reqister scu notifier failed.\n");
2598+			return ret;
2599+		}
2600+
2601+		if (ret)
2602+			dev_warn(dev,
2603+				 "failed to add managed removal of miscdev\n");
2604+	}
2605+
2606+	ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_WAKE,
2607+					SC_IRQ_V2X_RESET, true);
2608+	if (ret) {
2609+		dev_warn(&pdev->dev, "v2x Enable irq failed.\n");
2610+		return ret;
2611+	}
2612+
2613+exit:
2614+	return ret;
2615+}
2616+
2617+static const struct of_device_id seco_mu_match[] = {
2618+	{
2619+		.compatible = "fsl,imx-seco-mu",
2620+	},
2621+	{},
2622+};
2623+MODULE_DEVICE_TABLE(of, seco_mu_match);
2624+
2625+static struct platform_driver seco_mu_driver = {
2626+	.driver = {
2627+		.name = "seco_mu",
2628+		.of_match_table = seco_mu_match,
2629+	},
2630+	.probe       = seco_mu_probe,
2631+};
2632+
2633+module_platform_driver(seco_mu_driver);
2634+
2635+MODULE_LICENSE("GPL");
2636+MODULE_DESCRIPTION("IMX Seco MU");
2637+MODULE_AUTHOR("NXP");
2638diff --git a/drivers/firmware/imx/sentnl_base_msg.c b/drivers/firmware/imx/sentnl_base_msg.c
2639new file mode 100644
2640index 000000000..6a1033685
2641--- /dev/null
2642+++ b/drivers/firmware/imx/sentnl_base_msg.c
2643@@ -0,0 +1,141 @@
2644+// SPDX-License-Identifier: GPL-2.0+
2645+/*
2646+ * Copyright 2021 NXP
2647+ * Author: Pankaj <pankaj.gupta@nxp.com>
2648+	   Alice Guo <alice.guo@nxp.com>
2649+ */
2650+
2651+#include <linux/types.h>
2652+#include <linux/completion.h>
2653+#include <linux/mailbox_client.h>
2654+
2655+#include <linux/firmware/imx/sentnl_base_msg.h>
2656+#include <linux/firmware/imx/sentnl_mu_ioctl.h>
2657+
2658+#include "sentnl_mu.h"
2659+
2660+/* Fill a command message header with a given command ID and length in bytes. */
2661+static int plat_fill_cmd_msg_hdr(struct mu_hdr *hdr, uint8_t cmd, uint32_t len)
2662+{
2663+	struct sentnl_mu_priv *priv = NULL;
2664+	int err = 0;
2665+
2666+	err = get_sentnl_mu_priv(&priv);
2667+	if (err) {
2668+		pr_err("Error: iMX Sentinel MU is not probed successfully.\n");
2669+		return err;
2670+	}
2671+	hdr->tag = priv->cmd_tag;
2672+	hdr->ver = MESSAGING_VERSION_6;
2673+	hdr->command = cmd;
2674+	hdr->size = (uint8_t)(len / sizeof(uint32_t));
2675+
2676+	return err;
2677+}
2678+
2679+static int imx_sentnl_msg_send_rcv(struct sentnl_mu_priv *priv)
2680+{
2681+	unsigned int wait;
2682+	int err = 0;
2683+
2684+	mutex_lock(&priv->mu_cmd_lock);
2685+	mutex_lock(&priv->mu_lock);
2686+
2687+	err = mbox_send_message(priv->tx_chan, &priv->tx_msg);
2688+	if (err < 0) {
2689+		pr_err("Error: mbox_send_message failure.\n");
2690+		mutex_unlock(&priv->mu_lock);
2691+		return err;
2692+	}
2693+	mutex_unlock(&priv->mu_lock);
2694+
2695+	wait = msecs_to_jiffies(1000);
2696+	if (!wait_for_completion_timeout(&priv->done, wait)) {
2697+		mutex_unlock(&priv->mu_cmd_lock);
2698+		pr_err("Error: wait_for_completion timed out.\n");
2699+		return -ETIMEDOUT;
2700+	}
2701+
2702+	/* As part of func sentnl_mu_rx_callback() execution,
2703+	 * response will copied to sentnl_msg->rsp_msg.
2704+	 *
2705+	 * Lock: (mutex_unlock(&sentnl_mu_priv->mu_cmd_lock),
2706+	 * will be unlocked if it is a response.
2707+	 */
2708+	return err;
2709+}
2710+
2711+static int read_otp_uniq_id(struct sentnl_mu_priv *priv, u32 *value)
2712+{
2713+	unsigned int tag, command, size, ver, status;
2714+
2715+	tag = MSG_TAG(priv->rx_msg.header);
2716+	command = MSG_COMMAND(priv->rx_msg.header);
2717+	size = MSG_SIZE(priv->rx_msg.header);
2718+	ver = MSG_VER(priv->rx_msg.header);
2719+	status = RES_STATUS(priv->rx_msg.data[0]);
2720+
2721+	if (tag == 0xe1 && command == SENTNL_READ_FUSE_REQ &&
2722+	    size == 0x07 && ver == SENTNL_VERSION && status == SENTNL_SUCCESS_IND) {
2723+		value[0] = priv->rx_msg.data[1];
2724+		value[1] = priv->rx_msg.data[2];
2725+		value[2] = priv->rx_msg.data[3];
2726+		value[3] = priv->rx_msg.data[4];
2727+		return 0;
2728+	}
2729+
2730+	return -EINVAL;
2731+}
2732+
2733+static int read_fuse_word(struct sentnl_mu_priv *priv, u32 *value)
2734+{
2735+	unsigned int tag, command, size, ver, status;
2736+
2737+	tag = MSG_TAG(priv->rx_msg.header);
2738+	command = MSG_COMMAND(priv->rx_msg.header);
2739+	size = MSG_SIZE(priv->rx_msg.header);
2740+	ver = MSG_VER(priv->rx_msg.header);
2741+	status = RES_STATUS(priv->rx_msg.data[0]);
2742+
2743+	if (tag == 0xe1 && command == SENTNL_READ_FUSE_REQ &&
2744+	    size == 0x03 && ver == 0x06 && status == SENTNL_SUCCESS_IND) {
2745+		value[0] = priv->rx_msg.data[1];
2746+		return 0;
2747+	}
2748+
2749+	return -EINVAL;
2750+}
2751+
2752+int read_common_fuse(uint16_t fuse_id, u32 *value)
2753+{
2754+	struct sentnl_mu_priv *priv = NULL;
2755+	int err = 0;
2756+
2757+	err = get_sentnl_mu_priv(&priv);
2758+	if (err) {
2759+		pr_err("Error: iMX Sentinel MU is not probed successfully.\n");
2760+		return err;
2761+	}
2762+	err = plat_fill_cmd_msg_hdr((struct mu_hdr *)&priv->tx_msg.header, SENTNL_READ_FUSE_REQ, 8);
2763+	if (err) {
2764+		pr_err("Error: plat_fill_cmd_msg_hdr failed.\n");
2765+		return err;
2766+	}
2767+
2768+	priv->tx_msg.data[0] = fuse_id;
2769+	err = imx_sentnl_msg_send_rcv(priv);
2770+	if (err < 0)
2771+		return err;
2772+
2773+	switch (fuse_id) {
2774+	case OTP_UNIQ_ID:
2775+		err = read_otp_uniq_id(priv, value);
2776+		break;
2777+	default:
2778+		err = read_fuse_word(priv, value);
2779+		break;
2780+	}
2781+
2782+	return err;
2783+}
2784+EXPORT_SYMBOL_GPL(read_common_fuse);
2785diff --git a/drivers/firmware/imx/sentnl_mu.c b/drivers/firmware/imx/sentnl_mu.c
2786new file mode 100644
2787index 000000000..c96b2ab2b
2788--- /dev/null
2789+++ b/drivers/firmware/imx/sentnl_mu.c
2790@@ -0,0 +1,919 @@
2791+// SPDX-License-Identifier: GPL-2.0+
2792+/*
2793+ * Copyright 2021 NXP
2794+ * Author: Alice Guo <alice.guo@nxp.com>
2795+ */
2796+
2797+#include <asm/cacheflush.h>
2798+
2799+#include <linux/dma-mapping.h>
2800+#include <linux/completion.h>
2801+#include <linux/dev_printk.h>
2802+#include <linux/errno.h>
2803+#include <linux/export.h>
2804+#include <linux/firmware/imx/sentnl_base_msg.h>
2805+#include <linux/firmware/imx/sentnl_mu_ioctl.h>
2806+#include <linux/io.h>
2807+#include <linux/init.h>
2808+#include <linux/mailbox_client.h>
2809+#include <linux/miscdevice.h>
2810+#include <linux/mod_devicetable.h>
2811+#include <linux/module.h>
2812+#include <linux/of_platform.h>
2813+#include <linux/platform_device.h>
2814+#include <linux/slab.h>
2815+#include <linux/sys_soc.h>
2816+
2817+#include "sentnl_mu.h"
2818+
2819+struct sentnl_mu_priv *sentnl_priv_export;
2820+
2821+int get_sentnl_mu_priv(struct sentnl_mu_priv **export)
2822+{
2823+	if (!sentnl_priv_export)
2824+		return -EPROBE_DEFER;
2825+
2826+	*export = sentnl_priv_export;
2827+	return 0;
2828+}
2829+EXPORT_SYMBOL_GPL(get_sentnl_mu_priv);
2830+
2831+
2832+/*
2833+ * Callback called by mailbox FW when data are received
2834+ */
2835+static void sentnl_mu_rx_callback(struct mbox_client *c, void *msg)
2836+{
2837+	struct device *dev = c->dev;
2838+	struct sentnl_mu_priv *priv = dev_get_drvdata(dev);
2839+	struct sentnl_mu_device_ctx *dev_ctx;
2840+	bool is_response = false;
2841+	int msg_size;
2842+	struct mu_hdr header;
2843+
2844+	dev_dbg(dev, "Message received on mailbox\n");
2845+
2846+	/* The function can be called with NULL msg */
2847+	if (!msg) {
2848+		dev_err(dev, "Message is invalid\n");
2849+		return;
2850+	}
2851+
2852+	if (IS_ERR(msg)) {
2853+		dev_err(dev, "Error during reception of message: %ld\n",
2854+				PTR_ERR(msg));
2855+		return;
2856+	}
2857+
2858+	header.tag = ((u8 *)msg)[3];
2859+	header.command = ((u8 *)msg)[2];
2860+	header.size = ((u8 *)msg)[1];
2861+	header.ver = ((u8 *)msg)[0];
2862+
2863+	dev_dbg(dev, "Selecting device\n");
2864+
2865+	/* Incoming command: wake up the receiver if any. */
2866+	if (header.tag == priv->cmd_tag) {
2867+		dev_dbg(dev, "Selecting cmd receiver\n");
2868+		dev_ctx = priv->cmd_receiver_dev;
2869+	} else if (header.tag == priv->rsp_tag) {
2870+		if (priv->waiting_rsp_dev) {
2871+			dev_dbg(dev, "Selecting rsp waiter\n");
2872+			dev_ctx = priv->waiting_rsp_dev;
2873+			is_response = true;
2874+		} else {
2875+			/* Reading the Sentinel response
2876+			 * to the command sent by other
2877+			 * linux kernel services.
2878+			 */
2879+			spin_lock(&priv->lock);
2880+			priv->rx_msg = *(struct sentnl_api_msg *)msg;
2881+			complete(&priv->done);
2882+			spin_unlock(&priv->lock);
2883+			mutex_unlock(&priv->mu_cmd_lock);
2884+			return;
2885+		}
2886+	} else {
2887+		dev_err(dev, "Failed to select a device for message: %.8x\n",
2888+				*((u32 *) &header));
2889+		return;
2890+	}
2891+
2892+	if (!dev_ctx) {
2893+		dev_err(dev, "No device context selected for message: %.8x\n",
2894+				*((u32 *)&header));
2895+		return;
2896+	}
2897+	/* Init reception */
2898+	msg_size = header.size;
2899+	if (msg_size > MAX_RECV_SIZE) {
2900+		devctx_err(dev_ctx, "Message is too big (%d > %d)", msg_size,
2901+				MAX_RECV_SIZE);
2902+		return;
2903+	}
2904+
2905+	memcpy(dev_ctx->temp_resp, msg, msg_size * sizeof(u32));
2906+	dev_ctx->temp_resp_size = msg_size;
2907+
2908+	/* Allow user to read */
2909+	dev_ctx->pending_hdr = dev_ctx->temp_resp[0];
2910+	wake_up_interruptible(&dev_ctx->wq);
2911+
2912+	if (is_response) {
2913+		/* Allow user to send new command */
2914+		mutex_unlock(&priv->mu_cmd_lock);
2915+	}
2916+}
2917+
2918+struct device *imx_soc_device_register(void)
2919+{
2920+	struct soc_device_attribute *attr;
2921+	struct soc_device *dev;
2922+	u32 v[4];
2923+	int err;
2924+
2925+	err = read_common_fuse(OTP_UNIQ_ID, v);
2926+	if (err)
2927+		return NULL;
2928+
2929+	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
2930+	if (!attr)
2931+		return NULL;
2932+
2933+	err = of_property_read_string(of_root, "model", &attr->machine);
2934+	if (err) {
2935+		kfree(attr);
2936+		return NULL;
2937+	}
2938+	attr->family = kasprintf(GFP_KERNEL, "Freescale i.MX");
2939+	attr->revision = kasprintf(GFP_KERNEL, "1.0");
2940+	attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", (u64)v[3] << 32 | v[0]);
2941+	attr->soc_id = kasprintf(GFP_KERNEL, "i.MX8ULP");
2942+
2943+	dev = soc_device_register(attr);
2944+	if (IS_ERR(dev)) {
2945+		kfree(attr->soc_id);
2946+		kfree(attr->serial_number);
2947+		kfree(attr->revision);
2948+		kfree(attr->family);
2949+		kfree(attr->machine);
2950+		kfree(attr);
2951+		return ERR_CAST(dev);
2952+	}
2953+
2954+	return soc_device_to_device(dev);
2955+}
2956+
2957+/*
2958+ * File operations for user-space
2959+ */
2960+
2961+/* Write a message to the MU. */
2962+static ssize_t sentnl_mu_fops_write(struct file *fp, const char __user *buf,
2963+				  size_t size, loff_t *ppos)
2964+{
2965+	struct sentnl_mu_device_ctx *dev_ctx = container_of(fp->private_data,
2966+					   struct sentnl_mu_device_ctx, miscdev);
2967+	struct sentnl_mu_priv *sentnl_mu_priv = dev_ctx->priv;
2968+	u32 nb_words = 0;
2969+	struct mu_hdr header;
2970+	int err;
2971+
2972+	devctx_dbg(dev_ctx, "write from buf (%p)%ld, ppos=%lld\n", buf, size,
2973+		   ((ppos) ? *ppos : 0));
2974+
2975+	if (down_interruptible(&dev_ctx->fops_lock))
2976+		return -EBUSY;
2977+
2978+	if (dev_ctx->status != MU_OPENED) {
2979+		err = -EINVAL;
2980+		goto exit;
2981+	}
2982+
2983+	if (size < 4) {//sizeof(struct she_mu_hdr)) {
2984+		devctx_err(dev_ctx, "User buffer too small(%ld < %x)\n", size, 0x4);
2985+		//devctx_err(dev_ctx, "User buffer too small(%ld < %lu)\n", size, ()0x4);
2986+			  // sizeof(struct she_mu_hdr));
2987+		err = -ENOSPC;
2988+		goto exit;
2989+	}
2990+
2991+	if (size > MAX_MESSAGE_SIZE_BYTES) {
2992+		devctx_err(dev_ctx, "User buffer too big(%ld > %lu)\n", size,
2993+			   MAX_MESSAGE_SIZE_BYTES);
2994+		err = -ENOSPC;
2995+		goto exit;
2996+	}
2997+
2998+	/* Copy data to buffer */
2999+	err = (int)copy_from_user(dev_ctx->temp_cmd, buf, size);
3000+	if (err) {
3001+		err = -EFAULT;
3002+		devctx_err(dev_ctx, "Fail copy message from user\n");
3003+		goto exit;
3004+	}
3005+
3006+	print_hex_dump_debug("from user ", DUMP_PREFIX_OFFSET, 4, 4,
3007+			     dev_ctx->temp_cmd, size, false);
3008+
3009+	header = *((struct mu_hdr *) (&dev_ctx->temp_cmd[0]));
3010+
3011+	/* Check the message is valid according to tags */
3012+	if (header.tag == sentnl_mu_priv->cmd_tag) {
3013+		/*
3014+		 * unlocked in sentnl_mu_receive_work_handler when the
3015+		 * response to this command is received.
3016+		 */
3017+		mutex_lock(&sentnl_mu_priv->mu_cmd_lock);
3018+		sentnl_mu_priv->waiting_rsp_dev = dev_ctx;
3019+	} else if (header.tag == sentnl_mu_priv->rsp_tag) {
3020+		/* Check the device context can send the command */
3021+		if (dev_ctx != sentnl_mu_priv->cmd_receiver_dev) {
3022+			devctx_err(dev_ctx,
3023+				   "This channel is not configured to send response to SECO\n");
3024+			err = -EPERM;
3025+			goto exit;
3026+		}
3027+	} else {
3028+		devctx_err(dev_ctx, "The message does not have a valid TAG\n");
3029+		err = -EINVAL;
3030+		goto exit;
3031+	}
3032+
3033+	/*
3034+	 * Check that the size passed as argument matches the size
3035+	 * carried in the message.
3036+	 */
3037+	nb_words = header.size;
3038+	if (nb_words * sizeof(u32) != size) {
3039+		devctx_err(dev_ctx, "User buffer too small\n");
3040+		goto exit;
3041+	}
3042+
3043+	mutex_lock(&sentnl_mu_priv->mu_lock);
3044+
3045+	/* Send message */
3046+	devctx_dbg(dev_ctx, "sending message\n");
3047+	err = mbox_send_message(sentnl_mu_priv->tx_chan, dev_ctx->temp_cmd);
3048+	if (err < 0) {
3049+		devctx_err(dev_ctx, "Failed to send message\n");
3050+		goto unlock;
3051+	}
3052+
3053+	err = nb_words * (u32)sizeof(u32);
3054+
3055+unlock:
3056+	mutex_unlock(&sentnl_mu_priv->mu_lock);
3057+
3058+exit:
3059+	up(&dev_ctx->fops_lock);
3060+	return err;
3061+}
3062+
3063+/*
3064+ * Read a message from the MU.
3065+ * Blocking until a message is available.
3066+ */
3067+static ssize_t sentnl_mu_fops_read(struct file *fp, char __user *buf,
3068+				 size_t size, loff_t *ppos)
3069+{
3070+	struct sentnl_mu_device_ctx *dev_ctx = container_of(fp->private_data,
3071+					   struct sentnl_mu_device_ctx, miscdev);
3072+	u32 data_size = 0, size_to_copy = 0;
3073+	struct sentnl_obuf_desc *b_desc;
3074+	int err;
3075+
3076+	devctx_dbg(dev_ctx, "read to buf %p(%ld), ppos=%lld\n", buf, size,
3077+		   ((ppos) ? *ppos : 0));
3078+
3079+	if (down_interruptible(&dev_ctx->fops_lock))
3080+		return -EBUSY;
3081+
3082+	if (dev_ctx->status != MU_OPENED) {
3083+		err = -EINVAL;
3084+		goto exit;
3085+	}
3086+
3087+	/* Wait until the complete message is received on the MU. */
3088+	err = wait_event_interruptible(dev_ctx->wq, dev_ctx->pending_hdr != 0);
3089+	if (err) {
3090+		devctx_err(dev_ctx, "Interrupted by signal\n");
3091+		goto exit;
3092+	}
3093+
3094+	devctx_dbg(dev_ctx, "%s %s\n", __func__,
3095+		   "message received, start transmit to user");
3096+
3097+	/* Check that the size passed as argument is larger than
3098+	 * the one carried in the message.
3099+	 */
3100+	data_size = dev_ctx->temp_resp_size * sizeof(u32);
3101+	size_to_copy = data_size;
3102+	if (size_to_copy > size) {
3103+		devctx_dbg(dev_ctx, "User buffer too small (%ld < %d)\n",
3104+			   size, size_to_copy);
3105+		size_to_copy = size;
3106+	}
3107+
3108+	/* We may need to copy the output data to user before
3109+	 * delivering the completion message.
3110+	 */
3111+	while (!list_empty(&dev_ctx->pending_out)) {
3112+		b_desc = list_first_entry_or_null(&dev_ctx->pending_out,
3113+						  struct sentnl_obuf_desc,
3114+						  link);
3115+		if (b_desc->out_usr_ptr && b_desc->out_ptr) {
3116+			devctx_dbg(dev_ctx, "Copy output data to user\n");
3117+			err = (int)copy_to_user(b_desc->out_usr_ptr,
3118+						b_desc->out_ptr,
3119+						b_desc->out_size);
3120+			if (err) {
3121+				devctx_err(dev_ctx,
3122+					   "Failed to copy output data to user\n");
3123+				err = -EFAULT;
3124+				goto exit;
3125+			}
3126+		}
3127+		__list_del_entry(&b_desc->link);
3128+		devm_kfree(dev_ctx->dev, b_desc);
3129+	}
3130+
3131+	/* Copy data from the buffer */
3132+	print_hex_dump_debug("to user ", DUMP_PREFIX_OFFSET, 4, 4,
3133+			     dev_ctx->temp_resp, size_to_copy, false);
3134+	err = (int)copy_to_user(buf, dev_ctx->temp_resp, size_to_copy);
3135+	if (err) {
3136+		devctx_err(dev_ctx, "Failed to copy to user\n");
3137+		err = -EFAULT;
3138+		goto exit;
3139+	}
3140+
3141+	err = size_to_copy;
3142+
3143+	/* free memory allocated on the shared buffers. */
3144+	dev_ctx->secure_mem.pos = 0;
3145+	dev_ctx->non_secure_mem.pos = 0;
3146+
3147+	dev_ctx->pending_hdr = 0;
3148+
3149+exit:
3150+	up(&dev_ctx->fops_lock);
3151+	return err;
3152+}
3153+
3154+/* Give access to Sentinel, to the memory we want to share */
3155+static int sentnl_mu_setup_sentnl_mem_access(struct sentnl_mu_device_ctx *dev_ctx,
3156+					     u64 addr, u32 len)
3157+{
3158+	/* Assuming Sentinel has access to all the memory regions */
3159+	int ret = 0;
3160+
3161+	if (ret) {
3162+		devctx_err(dev_ctx, "Fail find memreg\n");
3163+		goto exit;
3164+	}
3165+
3166+	if (ret) {
3167+		devctx_err(dev_ctx, "Fail set permission for resource\n");
3168+		goto exit;
3169+	}
3170+
3171+exit:
3172+	return ret;
3173+}
3174+
3175+static int sentnl_mu_ioctl_get_mu_info(struct sentnl_mu_device_ctx *dev_ctx,
3176+				  unsigned long arg)
3177+{
3178+	struct sentnl_mu_priv *priv = dev_get_drvdata(dev_ctx->dev);
3179+	struct sentnl_mu_ioctl_get_mu_info info;
3180+	int err = -EINVAL;
3181+
3182+	info.sentnl_mu_id = (u8)priv->sentnl_mu_id;
3183+	info.interrupt_idx = 0;
3184+	info.tz = 0;
3185+	info.did = 0x7;
3186+
3187+	devctx_dbg(dev_ctx,
3188+		   "info [mu_idx: %d, irq_idx: %d, tz: 0x%x, did: 0x%x]\n",
3189+		   info.sentnl_mu_id, info.interrupt_idx, info.tz, info.did);
3190+
3191+	err = (int)copy_to_user((u8 *)arg, &info,
3192+		sizeof(info));
3193+	if (err) {
3194+		devctx_err(dev_ctx, "Failed to copy mu info to user\n");
3195+		err = -EFAULT;
3196+		goto exit;
3197+	}
3198+
3199+exit:
3200+	return err;
3201+}
3202+
3203+/*
3204+ * Copy a buffer of daa to/from the user and return the address to use in
3205+ * messages
3206+ */
3207+static int sentnl_mu_ioctl_setup_iobuf_handler(struct sentnl_mu_device_ctx *dev_ctx,
3208+					       unsigned long arg)
3209+{
3210+	struct sentnl_obuf_desc *out_buf_desc;
3211+	struct sentnl_mu_ioctl_setup_iobuf io = {0};
3212+	struct sentnl_shared_mem *shared_mem;
3213+	int err = -EINVAL;
3214+	u32 pos;
3215+
3216+	err = (int)copy_from_user(&io,
3217+		(u8 *)arg,
3218+		sizeof(io));
3219+	if (err) {
3220+		devctx_err(dev_ctx, "Failed copy iobuf config from user\n");
3221+		err = -EFAULT;
3222+		goto exit;
3223+	}
3224+
3225+	devctx_dbg(dev_ctx, "io [buf: %p(%d) flag: %x]\n",
3226+		   io.user_buf, io.length, io.flags);
3227+
3228+	if (io.length == 0 || !io.user_buf) {
3229+		/*
3230+		 * Accept NULL pointers since some buffers are optional
3231+		 * in SECO commands. In this case we should return 0 as
3232+		 * pointer to be embedded into the message.
3233+		 * Skip all data copy part of code below.
3234+		 */
3235+		io.sentnl_addr = 0;
3236+		goto copy;
3237+	}
3238+
3239+	/* Select the shared memory to be used for this buffer. */
3240+	if (io.flags & SECO_MU_IO_FLAGS_USE_SEC_MEM) {
3241+		/* App requires to use secure memory for this buffer.*/
3242+		devctx_err(dev_ctx, "Failed allocate SEC MEM memory\n");
3243+		err = -EFAULT;
3244+		goto exit;
3245+	} else {
3246+		/* No specific requirement for this buffer. */
3247+		shared_mem = &dev_ctx->non_secure_mem;
3248+	}
3249+
3250+	/* Check there is enough space in the shared memory. */
3251+	if (io.length >= shared_mem->size - shared_mem->pos) {
3252+		devctx_err(dev_ctx, "Not enough space in shared memory\n");
3253+		err = -ENOMEM;
3254+		goto exit;
3255+	}
3256+
3257+	/* Allocate space in shared memory. 8 bytes aligned. */
3258+	pos = shared_mem->pos;
3259+	shared_mem->pos += round_up(io.length, 8u);
3260+	io.sentnl_addr = (u64)shared_mem->dma_addr + pos;
3261+
3262+	if ((io.flags & SECO_MU_IO_FLAGS_USE_SEC_MEM) &&
3263+	    !(io.flags & SECO_MU_IO_FLAGS_USE_SHORT_ADDR)) {
3264+		/*Add base address to get full address.*/
3265+		devctx_err(dev_ctx, "Failed allocate SEC MEM memory\n");
3266+		err = -EFAULT;
3267+		goto exit;
3268+	}
3269+
3270+	if (io.flags & SECO_MU_IO_FLAGS_IS_INPUT) {
3271+		/*
3272+		 * buffer is input:
3273+		 * copy data from user space to this allocated buffer.
3274+		 */
3275+		err = (int)copy_from_user(shared_mem->ptr + pos, io.user_buf,
3276+					  io.length);
3277+		if (err) {
3278+			devctx_err(dev_ctx,
3279+				   "Failed copy data to shared memory\n");
3280+			err = -EFAULT;
3281+			goto exit;
3282+		}
3283+	} else {
3284+		/*
3285+		 * buffer is output:
3286+		 * add an entry in the "pending buffers" list so data
3287+		 * can be copied to user space when receiving SECO
3288+		 * response.
3289+		 */
3290+		out_buf_desc = devm_kmalloc(dev_ctx->dev, sizeof(*out_buf_desc),
3291+					    GFP_KERNEL);
3292+		if (!out_buf_desc) {
3293+			err = -ENOMEM;
3294+			devctx_err(dev_ctx,
3295+				   "Failed allocating mem for pending buffer\n"
3296+				   );
3297+			goto exit;
3298+		}
3299+
3300+		out_buf_desc->out_ptr = shared_mem->ptr + pos;
3301+		out_buf_desc->out_usr_ptr = io.user_buf;
3302+		out_buf_desc->out_size = io.length;
3303+		list_add_tail(&out_buf_desc->link, &dev_ctx->pending_out);
3304+	}
3305+
3306+copy:
3307+	/* Provide the sentinel address to user space only if success. */
3308+	err = (int)copy_to_user((u8 *)arg, &io,
3309+		sizeof(io));
3310+	if (err) {
3311+		devctx_err(dev_ctx, "Failed to copy iobuff setup to user\n");
3312+		err = -EFAULT;
3313+		goto exit;
3314+	}
3315+exit:
3316+	return err;
3317+}
3318+
3319+
3320+
3321+/* Open a char device. */
3322+static int sentnl_mu_fops_open(struct inode *nd, struct file *fp)
3323+{
3324+	struct sentnl_mu_device_ctx *dev_ctx = container_of(fp->private_data,
3325+							    struct sentnl_mu_device_ctx,
3326+							    miscdev);
3327+	int err;
3328+
3329+	/* Avoid race if opened at the same time */
3330+	if (down_trylock(&dev_ctx->fops_lock))
3331+		return -EBUSY;
3332+
3333+	/* Authorize only 1 instance. */
3334+	if (dev_ctx->status != MU_FREE) {
3335+		err = -EBUSY;
3336+		goto exit;
3337+	}
3338+
3339+	/*
3340+	 * Allocate some memory for data exchanges with S40x.
3341+	 * This will be used for data not requiring secure memory.
3342+	 */
3343+	dev_ctx->non_secure_mem.ptr = dmam_alloc_coherent(dev_ctx->dev,
3344+					MAX_DATA_SIZE_PER_USER,
3345+					&dev_ctx->non_secure_mem.dma_addr,
3346+					GFP_KERNEL);
3347+	if (!dev_ctx->non_secure_mem.ptr) {
3348+		err = -ENOMEM;
3349+		devctx_err(dev_ctx, "Failed to map shared memory with S40x\n");
3350+		goto exit;
3351+	}
3352+
3353+	err = sentnl_mu_setup_sentnl_mem_access(dev_ctx,
3354+						dev_ctx->non_secure_mem.dma_addr,
3355+						MAX_DATA_SIZE_PER_USER);
3356+	if (err) {
3357+		err = -EPERM;
3358+		devctx_err(dev_ctx,
3359+			   "Failed to share access to shared memory\n");
3360+		goto free_coherent;
3361+	}
3362+
3363+	dev_ctx->non_secure_mem.size = MAX_DATA_SIZE_PER_USER;
3364+	dev_ctx->non_secure_mem.pos = 0;
3365+	dev_ctx->status = MU_OPENED;
3366+
3367+	dev_ctx->pending_hdr = 0;
3368+
3369+	goto exit;
3370+
3371+free_coherent:
3372+	dmam_free_coherent(dev_ctx->priv->dev, MAX_DATA_SIZE_PER_USER,
3373+			   dev_ctx->non_secure_mem.ptr,
3374+			   dev_ctx->non_secure_mem.dma_addr);
3375+
3376+exit:
3377+	up(&dev_ctx->fops_lock);
3378+	return err;
3379+}
3380+
3381+/* Close a char device. */
3382+static int sentnl_mu_fops_close(struct inode *nd, struct file *fp)
3383+{
3384+	struct sentnl_mu_device_ctx *dev_ctx = container_of(fp->private_data,
3385+					struct sentnl_mu_device_ctx, miscdev);
3386+	struct sentnl_mu_priv *priv = dev_ctx->priv;
3387+	struct sentnl_obuf_desc *out_buf_desc;
3388+
3389+	/* Avoid race if closed at the same time */
3390+	if (down_trylock(&dev_ctx->fops_lock))
3391+		return -EBUSY;
3392+
3393+	/* The device context has not been opened */
3394+	if (dev_ctx->status != MU_OPENED)
3395+		goto exit;
3396+
3397+	/* check if this device was registered as command receiver. */
3398+	if (priv->cmd_receiver_dev == dev_ctx)
3399+		priv->cmd_receiver_dev = NULL;
3400+
3401+	/* check if this device was registered as waiting response. */
3402+	if (priv->waiting_rsp_dev == dev_ctx) {
3403+		priv->waiting_rsp_dev = NULL;
3404+		mutex_unlock(&priv->mu_cmd_lock);
3405+	}
3406+
3407+	/* Unmap secure memory shared buffer. */
3408+	if (dev_ctx->secure_mem.ptr)
3409+		devm_iounmap(dev_ctx->dev, dev_ctx->secure_mem.ptr);
3410+
3411+	dev_ctx->secure_mem.ptr = NULL;
3412+	dev_ctx->secure_mem.dma_addr = 0;
3413+	dev_ctx->secure_mem.size = 0;
3414+	dev_ctx->secure_mem.pos = 0;
3415+
3416+	/* Free non-secure shared buffer. */
3417+	dmam_free_coherent(dev_ctx->priv->dev, MAX_DATA_SIZE_PER_USER,
3418+			   dev_ctx->non_secure_mem.ptr,
3419+			   dev_ctx->non_secure_mem.dma_addr);
3420+
3421+	dev_ctx->non_secure_mem.ptr = NULL;
3422+	dev_ctx->non_secure_mem.dma_addr = 0;
3423+	dev_ctx->non_secure_mem.size = 0;
3424+	dev_ctx->non_secure_mem.pos = 0;
3425+
3426+	while (!list_empty(&dev_ctx->pending_out)) {
3427+		out_buf_desc = list_first_entry_or_null(&dev_ctx->pending_out,
3428+						struct sentnl_obuf_desc,
3429+						link);
3430+		__list_del_entry(&out_buf_desc->link);
3431+		devm_kfree(dev_ctx->dev, out_buf_desc);
3432+	}
3433+
3434+	dev_ctx->status = MU_FREE;
3435+
3436+exit:
3437+	up(&dev_ctx->fops_lock);
3438+	return 0;
3439+}
3440+
3441+/* IOCTL entry point of a char device */
3442+static long sentnl_mu_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
3443+{
3444+	struct sentnl_mu_device_ctx *dev_ctx = container_of(fp->private_data,
3445+							    struct sentnl_mu_device_ctx,
3446+							    miscdev);
3447+	struct sentnl_mu_priv *sentnl_mu_priv = dev_ctx->priv;
3448+	int err = -EINVAL;
3449+
3450+	/* Prevent race during change of device context */
3451+	if (down_interruptible(&dev_ctx->fops_lock))
3452+		return -EBUSY;
3453+
3454+	switch (cmd) {
3455+	case SENTNL_MU_IOCTL_ENABLE_CMD_RCV:
3456+		if (!sentnl_mu_priv->cmd_receiver_dev) {
3457+			sentnl_mu_priv->cmd_receiver_dev = dev_ctx;
3458+			err = 0;
3459+		};
3460+		break;
3461+	case SENTNL_MU_IOCTL_GET_MU_INFO:
3462+		err = sentnl_mu_ioctl_get_mu_info(dev_ctx, arg);
3463+		break;
3464+	case SENTNL_MU_IOCTL_SHARED_BUF_CFG:
3465+		devctx_err(dev_ctx, "SENTNL_MU_IOCTL_SHARED_BUF_CFG not supported [0x%x].\n", err);
3466+		break;
3467+	case SENTNL_MU_IOCTL_SETUP_IOBUF:
3468+		err = sentnl_mu_ioctl_setup_iobuf_handler(dev_ctx, arg);
3469+		break;
3470+	case SENTNL_MU_IOCTL_SIGNED_MESSAGE:
3471+		devctx_err(dev_ctx, "SENTNL_MU_IOCTL_SIGNED_MESSAGE not supported [0x%x].\n", err);
3472+		break;
3473+	default:
3474+		err = -EINVAL;
3475+		devctx_dbg(dev_ctx, "IOCTL %.8x not supported\n", cmd);
3476+	}
3477+
3478+	up(&dev_ctx->fops_lock);
3479+	return (long)err;
3480+}
3481+
3482+/* Char driver setup */
3483+static const struct file_operations sentnl_mu_fops = {
3484+	.open		= sentnl_mu_fops_open,
3485+	.owner		= THIS_MODULE,
3486+	.release	= sentnl_mu_fops_close,
3487+	.unlocked_ioctl = sentnl_mu_ioctl,
3488+	.read		= sentnl_mu_fops_read,
3489+	.write		= sentnl_mu_fops_write,
3490+};
3491+
3492+/* interface for managed res to free a mailbox channel */
3493+static void if_mbox_free_channel(void *mbox_chan)
3494+{
3495+	mbox_free_channel(mbox_chan);
3496+}
3497+
3498+/* interface for managed res to unregister a char device */
3499+static void if_misc_deregister(void *miscdevice)
3500+{
3501+	misc_deregister(miscdevice);
3502+}
3503+
3504+static int sentnl_mu_request_channel(struct device *dev,
3505+				 struct mbox_chan **chan,
3506+				 struct mbox_client *cl,
3507+				 const char *name)
3508+{
3509+	struct mbox_chan *t_chan;
3510+	int ret = 0;
3511+
3512+	t_chan = mbox_request_channel_byname(cl, name);
3513+	if (IS_ERR(t_chan)) {
3514+		ret = PTR_ERR(t_chan);
3515+		if (ret != -EPROBE_DEFER)
3516+			dev_err(dev,
3517+				"Failed to request chan %s ret %d\n", name,
3518+				ret);
3519+		goto exit;
3520+	}
3521+
3522+	ret = devm_add_action(dev, if_mbox_free_channel, t_chan);
3523+	if (ret) {
3524+		dev_err(dev, "failed to add devm removal of mbox %s\n", name);
3525+		goto exit;
3526+	}
3527+
3528+	*chan = t_chan;
3529+
3530+exit:
3531+	return ret;
3532+}
3533+
3534+static int sentnl_mu_probe(struct platform_device *pdev)
3535+{
3536+	struct sentnl_mu_device_ctx *dev_ctx;
3537+	struct device *dev = &pdev->dev;
3538+	struct sentnl_mu_priv *priv;
3539+	struct device_node *np;
3540+	int max_nb_users = 0;
3541+	char *devname;
3542+	struct device *soc;
3543+	int ret;
3544+	int i;
3545+
3546+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
3547+	if (!priv) {
3548+		ret = -ENOMEM;
3549+		dev_err(dev, "Fail allocate mem for private data\n");
3550+		goto exit;
3551+	}
3552+	priv->dev = dev;
3553+	dev_set_drvdata(dev, priv);
3554+
3555+	/*
3556+	 * Get the address of MU to be used for communication with the SCU
3557+	 */
3558+	np = pdev->dev.of_node;
3559+	if (!np) {
3560+		dev_err(dev, "Cannot find MU User entry in device tree\n");
3561+		ret = -ENOTSUPP;
3562+		goto exit;
3563+	}
3564+
3565+	/* Initialize the mutex. */
3566+	mutex_init(&priv->mu_cmd_lock);
3567+	mutex_init(&priv->mu_lock);
3568+
3569+	/* TBD */
3570+	priv->cmd_receiver_dev = NULL;
3571+	priv->waiting_rsp_dev = NULL;
3572+
3573+	ret = of_property_read_u32(np, "fsl,sentnl_mu_id", &priv->sentnl_mu_id);
3574+	if (ret) {
3575+		dev_warn(dev, "%s: Not able to read mu_id", __func__);
3576+		priv->sentnl_mu_id = S4_DEFAULT_MUAP_INDEX;
3577+	}
3578+
3579+	ret = of_property_read_u32(np, "fsl,sentnl_mu_max_users", &max_nb_users);
3580+	if (ret) {
3581+		dev_warn(dev, "%s: Not able to read mu_max_user", __func__);
3582+		max_nb_users = S4_MUAP_DEFAULT_MAX_USERS;
3583+	}
3584+
3585+	ret = of_property_read_u8(np, "fsl,cmd_tag", &priv->cmd_tag);
3586+	if (ret) {
3587+		dev_warn(dev, "%s: Not able to read cmd_tag", __func__);
3588+		priv->cmd_tag = DEFAULT_MESSAGING_TAG_COMMAND;
3589+	}
3590+
3591+	ret = of_property_read_u8(np, "fsl,rsp_tag", &priv->rsp_tag);
3592+	if (ret) {
3593+		dev_warn(dev, "%s: Not able to read rsp_tag", __func__);
3594+		priv->rsp_tag = DEFAULT_MESSAGING_TAG_RESPONSE;
3595+	}
3596+
3597+	/* Mailbox client configuration */
3598+	priv->sentnl_mb_cl.dev		= dev;
3599+	priv->sentnl_mb_cl.tx_block	= false;
3600+	priv->sentnl_mb_cl.knows_txdone	= true;
3601+	priv->sentnl_mb_cl.rx_callback	= sentnl_mu_rx_callback;
3602+
3603+	ret = sentnl_mu_request_channel(dev, &priv->tx_chan, &priv->sentnl_mb_cl, "tx");
3604+	if (ret) {
3605+		if (ret != -EPROBE_DEFER)
3606+			dev_err(dev, "Failed to request tx channel\n");
3607+
3608+		goto exit;
3609+	}
3610+
3611+	ret = sentnl_mu_request_channel(dev, &priv->rx_chan, &priv->sentnl_mb_cl, "rx");
3612+	if (ret) {
3613+		if (ret != -EPROBE_DEFER)
3614+			dev_err(dev, "Failed to request rx channel\n");
3615+
3616+		goto exit;
3617+	}
3618+
3619+	/* Create users */
3620+	for (i = 0; i < max_nb_users; i++) {
3621+		dev_ctx = devm_kzalloc(dev, sizeof(*dev_ctx), GFP_KERNEL);
3622+		if (!dev_ctx) {
3623+			ret = -ENOMEM;
3624+			dev_err(dev,
3625+				"Fail to allocate memory for device context\n");
3626+			goto exit;
3627+		}
3628+
3629+		dev_ctx->dev = dev;
3630+		dev_ctx->status = MU_FREE;
3631+		dev_ctx->priv = priv;
3632+		/* Default value invalid for an header. */
3633+		init_waitqueue_head(&dev_ctx->wq);
3634+
3635+		INIT_LIST_HEAD(&dev_ctx->pending_out);
3636+		sema_init(&dev_ctx->fops_lock, 1);
3637+
3638+		devname = devm_kasprintf(dev, GFP_KERNEL, "sentnl_mu%d_ch%d",
3639+					 priv->sentnl_mu_id, i);
3640+		if (!devname) {
3641+			ret = -ENOMEM;
3642+			dev_err(dev,
3643+				"Fail to allocate memory for misc dev name\n");
3644+			goto exit;
3645+		}
3646+
3647+		dev_ctx->miscdev.name = devname;
3648+		dev_ctx->miscdev.minor = MISC_DYNAMIC_MINOR;
3649+		dev_ctx->miscdev.fops = &sentnl_mu_fops;
3650+		dev_ctx->miscdev.parent = dev;
3651+		ret = misc_register(&dev_ctx->miscdev);
3652+		if (ret) {
3653+			dev_err(dev, "failed to register misc device %d\n",
3654+				ret);
3655+			goto exit;
3656+		}
3657+
3658+		ret = devm_add_action(dev, if_misc_deregister,
3659+				      &dev_ctx->miscdev);
3660+
3661+	}
3662+
3663+	init_completion(&priv->done);
3664+	spin_lock_init(&priv->lock);
3665+
3666+	sentnl_priv_export = priv;
3667+
3668+	soc = imx_soc_device_register();
3669+	if (IS_ERR(soc)) {
3670+		pr_err("failed to register SoC device: %ld\n", PTR_ERR(soc));
3671+		return PTR_ERR(soc);
3672+	}
3673+
3674+	dev_set_drvdata(dev, priv);
3675+	return devm_of_platform_populate(dev);
3676+
3677+exit:
3678+	return ret;
3679+}
3680+
3681+static int sentnl_mu_remove(struct platform_device *pdev)
3682+{
3683+	struct sentnl_mu_priv *priv;
3684+
3685+	priv = dev_get_drvdata(&pdev->dev);
3686+	mbox_free_channel(priv->tx_chan);
3687+	mbox_free_channel(priv->rx_chan);
3688+
3689+	return 0;
3690+}
3691+
3692+static const struct of_device_id sentnl_mu_match[] = {
3693+	{ .compatible = "fsl,imx-sentinel", },
3694+	{},
3695+};
3696+
3697+static struct platform_driver sentnl_mu_driver = {
3698+	.driver = {
3699+		.name = "fsl-sentinel-mu",
3700+		.of_match_table = sentnl_mu_match,
3701+	},
3702+	.probe = sentnl_mu_probe,
3703+	.remove = sentnl_mu_remove,
3704+};
3705+module_platform_driver(sentnl_mu_driver);
3706+
3707+MODULE_AUTHOR("Pankaj Gupta <pankaj.gupta@nxp.com>");
3708+MODULE_DESCRIPTION("Sentinel Baseline, HSM and SHE API(s)");
3709+MODULE_LICENSE("GPL v2");
3710diff --git a/drivers/firmware/imx/sentnl_mu.h b/drivers/firmware/imx/sentnl_mu.h
3711new file mode 100644
3712index 000000000..3f5a4488e
3713--- /dev/null
3714+++ b/drivers/firmware/imx/sentnl_mu.h
3715@@ -0,0 +1,139 @@
3716+/* SPDX-License-Identifier: GPL-2.0+ */
3717+/*
3718+ * Copyright 2021 NXP
3719+ */
3720+
3721+#ifndef SENTNL_MU_H
3722+#define SENTNL_MU_H
3723+
3724+#include <linux/miscdevice.h>
3725+#include <linux/semaphore.h>
3726+
3727+/* macro to log operation of a misc device */
3728+#define miscdev_dbg(p_miscdev, fmt, va_args...)                                \
3729+	({                                                                     \
3730+		struct miscdevice *_p_miscdev = p_miscdev;                     \
3731+		dev_dbg((_p_miscdev)->parent, "%s: " fmt, (_p_miscdev)->name,  \
3732+		##va_args);                                                    \
3733+	})
3734+
3735+#define miscdev_info(p_miscdev, fmt, va_args...)                               \
3736+	({                                                                     \
3737+		struct miscdevice *_p_miscdev = p_miscdev;                     \
3738+		dev_info((_p_miscdev)->parent, "%s: " fmt, (_p_miscdev)->name, \
3739+		##va_args);                                                    \
3740+	})
3741+
3742+#define miscdev_err(p_miscdev, fmt, va_args...)                                \
3743+	({                                                                     \
3744+		struct miscdevice *_p_miscdev = p_miscdev;                     \
3745+		dev_err((_p_miscdev)->parent, "%s: " fmt, (_p_miscdev)->name,  \
3746+		##va_args);                                                    \
3747+	})
3748+/* macro to log operation of a device context */
3749+#define devctx_dbg(p_devctx, fmt, va_args...) \
3750+	miscdev_dbg(&((p_devctx)->miscdev), fmt, ##va_args)
3751+#define devctx_info(p_devctx, fmt, va_args...) \
3752+	miscdev_info(&((p_devctx)->miscdev), fmt, ##va_args)
3753+#define devctx_err(p_devctx, fmt, va_args...) \
3754+	miscdev_err((&(p_devctx)->miscdev), fmt, ##va_args)
3755+
3756+#define MSG_TAG(x)			(((x) & 0xff000000) >> 24)
3757+#define MSG_COMMAND(x)			(((x) & 0x00ff0000) >> 16)
3758+#define MSG_SIZE(x)			(((x) & 0x0000ff00) >> 8)
3759+#define MSG_VER(x)			((x) & 0x000000ff)
3760+#define RES_STATUS(x)			((x) & 0x000000ff)
3761+#define MAX_DATA_SIZE_PER_USER		(65 * 1024)
3762+#define S4_DEFAULT_MUAP_INDEX		(2)
3763+#define S4_MUAP_DEFAULT_MAX_USERS	(4)
3764+
3765+#define DEFAULT_MESSAGING_TAG_COMMAND           (0x17u)
3766+#define DEFAULT_MESSAGING_TAG_RESPONSE          (0xe1u)
3767+
3768+#define SECO_MU_IO_FLAGS_IS_INPUT	(0x01u)
3769+#define SECO_MU_IO_FLAGS_USE_SEC_MEM	(0x02u)
3770+#define SECO_MU_IO_FLAGS_USE_SHORT_ADDR	(0x04u)
3771+
3772+struct sentnl_obuf_desc {
3773+	u8 *out_ptr;
3774+	u8 *out_usr_ptr;
3775+	u32 out_size;
3776+	struct list_head link;
3777+};
3778+
3779+/* Status of a char device */
3780+enum mu_device_status_t {
3781+	MU_FREE,
3782+	MU_OPENED
3783+};
3784+
3785+struct sentnl_shared_mem {
3786+	dma_addr_t dma_addr;
3787+	u32 size;
3788+	u32 pos;
3789+	u8 *ptr;
3790+};
3791+
3792+/* Private struct for each char device instance. */
3793+struct sentnl_mu_device_ctx {
3794+	struct device *dev;
3795+	struct sentnl_mu_priv *priv;
3796+	struct miscdevice miscdev;
3797+
3798+	enum mu_device_status_t status;
3799+	wait_queue_head_t wq;
3800+	struct semaphore fops_lock;
3801+
3802+	u32 pending_hdr;
3803+	struct list_head pending_out;
3804+
3805+	struct sentnl_shared_mem secure_mem;
3806+	struct sentnl_shared_mem non_secure_mem;
3807+
3808+	u32 temp_cmd[MAX_MESSAGE_SIZE];
3809+	u32 temp_resp[MAX_RECV_SIZE];
3810+	u32 temp_resp_size;
3811+	struct notifier_block sentnl_notify;
3812+};
3813+
3814+/* Header of the messages exchange with the SENTINEL */
3815+struct mu_hdr {
3816+	u8 ver;
3817+	u8 size;
3818+	u8 command;
3819+	u8 tag;
3820+}  __packed;
3821+
3822+struct sentnl_api_msg {
3823+	u32 header; /* u8 Tag; u8 Command; u8 Size; u8 Ver; */
3824+	u32 data[SENTNL_MSG_DATA_NUM];
3825+};
3826+
3827+struct sentnl_mu_priv {
3828+	struct sentnl_mu_device_ctx *cmd_receiver_dev;
3829+	struct sentnl_mu_device_ctx *waiting_rsp_dev;
3830+	/*
3831+	 * prevent parallel access to the MU registers
3832+	 * e.g. a user trying to send a command while the other one is
3833+	 * sending a response.
3834+	 */
3835+	struct mutex mu_lock;
3836+	/*
3837+	 * prevent a command to be sent on the MU while another one is still
3838+	 * processing. (response to a command is allowed)
3839+	 */
3840+	struct mutex mu_cmd_lock;
3841+	struct device *dev;
3842+	u32 sentnl_mu_id;
3843+	u8 cmd_tag;
3844+	u8 rsp_tag;
3845+
3846+	struct mbox_client sentnl_mb_cl;
3847+	struct mbox_chan *tx_chan, *rx_chan;
3848+	struct sentnl_api_msg tx_msg, rx_msg;
3849+	struct completion done;
3850+	spinlock_t lock;
3851+};
3852+
3853+int get_sentnl_mu_priv(struct sentnl_mu_priv **export);
3854+#endif
3855