1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * For transport using shared mem structure.
4  *
5  * Copyright (C) 2019-2024 ARM Ltd.
6  */
7 
8 #include <linux/ktime.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/processor.h>
13 #include <linux/types.h>
14 
15 #include <linux/bug.h>
16 
17 #include "common.h"
18 
19 /*
20  * SCMI specification requires all parameters, message headers, return
21  * arguments or any protocol data to be expressed in little endian
22  * format only.
23  */
24 struct scmi_shared_mem {
25 	__le32 reserved;
26 	__le32 channel_status;
27 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR	BIT(1)
28 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE	BIT(0)
29 	__le32 reserved1[2];
30 	__le32 flags;
31 #define SCMI_SHMEM_FLAG_INTR_ENABLED	BIT(0)
32 	__le32 length;
33 	__le32 msg_header;
34 	u8 msg_payload[];
35 };
36 
shmem_memcpy_fromio32(void * to,const void __iomem * from,size_t count)37 static inline void shmem_memcpy_fromio32(void *to,
38 					 const void __iomem *from,
39 					 size_t count)
40 {
41 	WARN_ON(!IS_ALIGNED((unsigned long)from, 4) ||
42 		!IS_ALIGNED((unsigned long)to, 4) ||
43 		count % 4);
44 
45 	__ioread32_copy(to, from, count / 4);
46 }
47 
shmem_memcpy_toio32(void __iomem * to,const void * from,size_t count)48 static inline void shmem_memcpy_toio32(void __iomem *to,
49 				       const void *from,
50 				       size_t count)
51 {
52 	WARN_ON(!IS_ALIGNED((unsigned long)to, 4) ||
53 		!IS_ALIGNED((unsigned long)from, 4) ||
54 		count % 4);
55 
56 	__iowrite32_copy(to, from, count / 4);
57 }
58 
59 static struct scmi_shmem_io_ops shmem_io_ops32 = {
60 	.fromio	= shmem_memcpy_fromio32,
61 	.toio	= shmem_memcpy_toio32,
62 };
63 
64 /* Wrappers are needed for proper memcpy_{from,to}_io expansion by the
65  * pre-processor.
66  */
shmem_memcpy_fromio(void * to,const void __iomem * from,size_t count)67 static inline void shmem_memcpy_fromio(void *to,
68 				       const void __iomem *from,
69 				       size_t count)
70 {
71 	memcpy_fromio(to, from, count);
72 }
73 
shmem_memcpy_toio(void __iomem * to,const void * from,size_t count)74 static inline void shmem_memcpy_toio(void __iomem *to,
75 				     const void *from,
76 				     size_t count)
77 {
78 	memcpy_toio(to, from, count);
79 }
80 
81 static struct scmi_shmem_io_ops shmem_io_ops_default = {
82 	.fromio = shmem_memcpy_fromio,
83 	.toio	= shmem_memcpy_toio,
84 };
85 
shmem_tx_prepare(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer,struct scmi_chan_info * cinfo,shmem_copy_toio_t copy_toio)86 static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
87 			     struct scmi_xfer *xfer,
88 			     struct scmi_chan_info *cinfo,
89 			     shmem_copy_toio_t copy_toio)
90 {
91 	ktime_t stop;
92 
93 	/*
94 	 * Ideally channel must be free by now unless OS timeout last
95 	 * request and platform continued to process the same, wait
96 	 * until it releases the shared memory, otherwise we may endup
97 	 * overwriting its response with new message payload or vice-versa.
98 	 * Giving up anyway after twice the expected channel timeout so as
99 	 * not to bail-out on intermittent issues where the platform is
100 	 * occasionally a bit slower to answer.
101 	 *
102 	 * Note that after a timeout is detected we bail-out and carry on but
103 	 * the transport functionality is probably permanently compromised:
104 	 * this is just to ease debugging and avoid complete hangs on boot
105 	 * due to a misbehaving SCMI firmware.
106 	 */
107 	stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms);
108 	spin_until_cond((ioread32(&shmem->channel_status) &
109 			 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) ||
110 			 ktime_after(ktime_get(), stop));
111 	if (!(ioread32(&shmem->channel_status) &
112 	      SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) {
113 		WARN_ON_ONCE(1);
114 		dev_err(cinfo->dev,
115 			"Timeout waiting for a free TX channel !\n");
116 		return;
117 	}
118 
119 	/* Mark channel busy + clear error */
120 	iowrite32(0x0, &shmem->channel_status);
121 	iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
122 		  &shmem->flags);
123 	iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
124 	iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
125 	if (xfer->tx.buf)
126 		copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
127 }
128 
shmem_read_header(struct scmi_shared_mem __iomem * shmem)129 static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
130 {
131 	return ioread32(&shmem->msg_header);
132 }
133 
shmem_fetch_response(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer,shmem_copy_fromio_t copy_fromio)134 static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
135 				 struct scmi_xfer *xfer,
136 				 shmem_copy_fromio_t copy_fromio)
137 {
138 	size_t len = ioread32(&shmem->length);
139 
140 	xfer->hdr.status = ioread32(shmem->msg_payload);
141 	/* Skip the length of header and status in shmem area i.e 8 bytes */
142 	xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
143 
144 	/* Take a copy to the rx buffer.. */
145 	copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
146 }
147 
shmem_fetch_notification(struct scmi_shared_mem __iomem * shmem,size_t max_len,struct scmi_xfer * xfer,shmem_copy_fromio_t copy_fromio)148 static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
149 				     size_t max_len, struct scmi_xfer *xfer,
150 				     shmem_copy_fromio_t copy_fromio)
151 {
152 	size_t len = ioread32(&shmem->length);
153 
154 	/* Skip only the length of header in shmem area i.e 4 bytes */
155 	xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
156 
157 	/* Take a copy to the rx buffer.. */
158 	copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
159 }
160 
shmem_clear_channel(struct scmi_shared_mem __iomem * shmem)161 static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
162 {
163 	iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
164 }
165 
shmem_poll_done(struct scmi_shared_mem __iomem * shmem,struct scmi_xfer * xfer)166 static bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
167 			    struct scmi_xfer *xfer)
168 {
169 	u16 xfer_id;
170 
171 	xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
172 
173 	if (xfer->hdr.seq != xfer_id)
174 		return false;
175 
176 	return ioread32(&shmem->channel_status) &
177 		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
178 		 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
179 }
180 
shmem_channel_free(struct scmi_shared_mem __iomem * shmem)181 static bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
182 {
183 	return (ioread32(&shmem->channel_status) &
184 			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
185 }
186 
shmem_channel_intr_enabled(struct scmi_shared_mem __iomem * shmem)187 static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
188 {
189 	return ioread32(&shmem->flags) & SCMI_SHMEM_FLAG_INTR_ENABLED;
190 }
191 
shmem_setup_iomap(struct scmi_chan_info * cinfo,struct device * dev,bool tx,struct resource * res,struct scmi_shmem_io_ops ** ops)192 static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
193 				       struct device *dev, bool tx,
194 				       struct resource *res,
195 				       struct scmi_shmem_io_ops **ops)
196 {
197 	struct device_node *shmem __free(device_node);
198 	const char *desc = tx ? "Tx" : "Rx";
199 	int ret, idx = tx ? 0 : 1;
200 	struct device *cdev = cinfo->dev;
201 	struct resource lres = {};
202 	resource_size_t size;
203 	void __iomem *addr;
204 	u32 reg_io_width;
205 
206 	shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
207 	if (!shmem)
208 		return IOMEM_ERR_PTR(-ENODEV);
209 
210 	if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
211 		return IOMEM_ERR_PTR(-ENXIO);
212 
213 	/* Use a local on-stack as a working area when not provided */
214 	if (!res)
215 		res = &lres;
216 
217 	ret = of_address_to_resource(shmem, 0, res);
218 	if (ret) {
219 		dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
220 		return IOMEM_ERR_PTR(ret);
221 	}
222 
223 	size = resource_size(res);
224 	addr = devm_ioremap(dev, res->start, size);
225 	if (!addr) {
226 		dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
227 		return IOMEM_ERR_PTR(-EADDRNOTAVAIL);
228 	}
229 
230 	of_property_read_u32(shmem, "reg-io-width", ®_io_width);
231 	switch (reg_io_width) {
232 	case 4:
233 		*ops = &shmem_io_ops32;
234 		break;
235 	default:
236 		*ops = &shmem_io_ops_default;
237 		break;
238 	}
239 
240 	return addr;
241 }
242 
243 static const struct scmi_shared_mem_operations scmi_shmem_ops = {
244 	.tx_prepare = shmem_tx_prepare,
245 	.read_header = shmem_read_header,
246 	.fetch_response = shmem_fetch_response,
247 	.fetch_notification = shmem_fetch_notification,
248 	.clear_channel = shmem_clear_channel,
249 	.poll_done = shmem_poll_done,
250 	.channel_free = shmem_channel_free,
251 	.channel_intr_enabled = shmem_channel_intr_enabled,
252 	.setup_iomap = shmem_setup_iomap,
253 };
254 
scmi_shared_mem_operations_get(void)255 const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void)
256 {
257 	return &scmi_shmem_ops;
258 }
259