• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Cadence Design Systems Inc.
4  *
5  * Author: Boris Brezillon <boris.brezillon@bootlin.com>
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/i3c/master.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25 #include <linux/of_device.h>
26 
27 #define DEV_ID				0x0
28 #define DEV_ID_I3C_MASTER		0x5034
29 
30 #define CONF_STATUS0			0x4
31 #define CONF_STATUS0_CMDR_DEPTH(x)	(4 << (((x) & GENMASK(31, 29)) >> 29))
32 #define CONF_STATUS0_ECC_CHK		BIT(28)
33 #define CONF_STATUS0_INTEG_CHK		BIT(27)
34 #define CONF_STATUS0_CSR_DAP_CHK	BIT(26)
35 #define CONF_STATUS0_TRANS_TOUT_CHK	BIT(25)
36 #define CONF_STATUS0_PROT_FAULTS_CHK	BIT(24)
37 #define CONF_STATUS0_GPO_NUM(x)		(((x) & GENMASK(23, 16)) >> 16)
38 #define CONF_STATUS0_GPI_NUM(x)		(((x) & GENMASK(15, 8)) >> 8)
39 #define CONF_STATUS0_IBIR_DEPTH(x)	(4 << (((x) & GENMASK(7, 6)) >> 7))
40 #define CONF_STATUS0_SUPPORTS_DDR	BIT(5)
41 #define CONF_STATUS0_SEC_MASTER		BIT(4)
42 #define CONF_STATUS0_DEVS_NUM(x)	((x) & GENMASK(3, 0))
43 
44 #define CONF_STATUS1			0x8
45 #define CONF_STATUS1_IBI_HW_RES(x)	((((x) & GENMASK(31, 28)) >> 28) + 1)
46 #define CONF_STATUS1_CMD_DEPTH(x)	(4 << (((x) & GENMASK(27, 26)) >> 26))
47 #define CONF_STATUS1_SLVDDR_RX_DEPTH(x)	(8 << (((x) & GENMASK(25, 21)) >> 21))
48 #define CONF_STATUS1_SLVDDR_TX_DEPTH(x)	(8 << (((x) & GENMASK(20, 16)) >> 16))
49 #define CONF_STATUS1_IBI_DEPTH(x)	(2 << (((x) & GENMASK(12, 10)) >> 10))
50 #define CONF_STATUS1_RX_DEPTH(x)	(8 << (((x) & GENMASK(9, 5)) >> 5))
51 #define CONF_STATUS1_TX_DEPTH(x)	(8 << ((x) & GENMASK(4, 0)))
52 
53 #define REV_ID				0xc
54 #define REV_ID_VID(id)			(((id) & GENMASK(31, 20)) >> 20)
55 #define REV_ID_PID(id)			(((id) & GENMASK(19, 8)) >> 8)
56 #define REV_ID_REV_MAJOR(id)		(((id) & GENMASK(7, 4)) >> 4)
57 #define REV_ID_REV_MINOR(id)		((id) & GENMASK(3, 0))
58 
59 #define CTRL				0x10
60 #define CTRL_DEV_EN			BIT(31)
61 #define CTRL_HALT_EN			BIT(30)
62 #define CTRL_MCS			BIT(29)
63 #define CTRL_MCS_EN			BIT(28)
64 #define CTRL_THD_DELAY(x)		(((x) << 24) & GENMASK(25, 24))
65 #define CTRL_HJ_DISEC			BIT(8)
66 #define CTRL_MST_ACK			BIT(7)
67 #define CTRL_HJ_ACK			BIT(6)
68 #define CTRL_HJ_INIT			BIT(5)
69 #define CTRL_MST_INIT			BIT(4)
70 #define CTRL_AHDR_OPT			BIT(3)
71 #define CTRL_PURE_BUS_MODE		0
72 #define CTRL_MIXED_FAST_BUS_MODE	2
73 #define CTRL_MIXED_SLOW_BUS_MODE	3
74 #define CTRL_BUS_MODE_MASK		GENMASK(1, 0)
75 #define THD_DELAY_MAX			3
76 
77 #define PRESCL_CTRL0			0x14
78 #define PRESCL_CTRL0_I2C(x)		((x) << 16)
79 #define PRESCL_CTRL0_I3C(x)		(x)
80 #define PRESCL_CTRL0_I3C_MAX		GENMASK(9, 0)
81 #define PRESCL_CTRL0_I2C_MAX		GENMASK(15, 0)
82 
83 #define PRESCL_CTRL1			0x18
84 #define PRESCL_CTRL1_PP_LOW_MASK	GENMASK(15, 8)
85 #define PRESCL_CTRL1_PP_LOW(x)		((x) << 8)
86 #define PRESCL_CTRL1_OD_LOW_MASK	GENMASK(7, 0)
87 #define PRESCL_CTRL1_OD_LOW(x)		(x)
88 
89 #define MST_IER				0x20
90 #define MST_IDR				0x24
91 #define MST_IMR				0x28
92 #define MST_ICR				0x2c
93 #define MST_ISR				0x30
94 #define MST_INT_HALTED			BIT(18)
95 #define MST_INT_MR_DONE			BIT(17)
96 #define MST_INT_IMM_COMP		BIT(16)
97 #define MST_INT_TX_THR			BIT(15)
98 #define MST_INT_TX_OVF			BIT(14)
99 #define MST_INT_IBID_THR		BIT(12)
100 #define MST_INT_IBID_UNF		BIT(11)
101 #define MST_INT_IBIR_THR		BIT(10)
102 #define MST_INT_IBIR_UNF		BIT(9)
103 #define MST_INT_IBIR_OVF		BIT(8)
104 #define MST_INT_RX_THR			BIT(7)
105 #define MST_INT_RX_UNF			BIT(6)
106 #define MST_INT_CMDD_EMP		BIT(5)
107 #define MST_INT_CMDD_THR		BIT(4)
108 #define MST_INT_CMDD_OVF		BIT(3)
109 #define MST_INT_CMDR_THR		BIT(2)
110 #define MST_INT_CMDR_UNF		BIT(1)
111 #define MST_INT_CMDR_OVF		BIT(0)
112 
113 #define MST_STATUS0			0x34
114 #define MST_STATUS0_IDLE		BIT(18)
115 #define MST_STATUS0_HALTED		BIT(17)
116 #define MST_STATUS0_MASTER_MODE		BIT(16)
117 #define MST_STATUS0_TX_FULL		BIT(13)
118 #define MST_STATUS0_IBID_FULL		BIT(12)
119 #define MST_STATUS0_IBIR_FULL		BIT(11)
120 #define MST_STATUS0_RX_FULL		BIT(10)
121 #define MST_STATUS0_CMDD_FULL		BIT(9)
122 #define MST_STATUS0_CMDR_FULL		BIT(8)
123 #define MST_STATUS0_TX_EMP		BIT(5)
124 #define MST_STATUS0_IBID_EMP		BIT(4)
125 #define MST_STATUS0_IBIR_EMP		BIT(3)
126 #define MST_STATUS0_RX_EMP		BIT(2)
127 #define MST_STATUS0_CMDD_EMP		BIT(1)
128 #define MST_STATUS0_CMDR_EMP		BIT(0)
129 
130 #define CMDR				0x38
131 #define CMDR_NO_ERROR			0
132 #define CMDR_DDR_PREAMBLE_ERROR		1
133 #define CMDR_DDR_PARITY_ERROR		2
134 #define CMDR_DDR_RX_FIFO_OVF		3
135 #define CMDR_DDR_TX_FIFO_UNF		4
136 #define CMDR_M0_ERROR			5
137 #define CMDR_M1_ERROR			6
138 #define CMDR_M2_ERROR			7
139 #define CMDR_MST_ABORT			8
140 #define CMDR_NACK_RESP			9
141 #define CMDR_INVALID_DA			10
142 #define CMDR_DDR_DROPPED		11
143 #define CMDR_ERROR(x)			(((x) & GENMASK(27, 24)) >> 24)
144 #define CMDR_XFER_BYTES(x)		(((x) & GENMASK(19, 8)) >> 8)
145 #define CMDR_CMDID_HJACK_DISEC		0xfe
146 #define CMDR_CMDID_HJACK_ENTDAA		0xff
147 #define CMDR_CMDID(x)			((x) & GENMASK(7, 0))
148 
149 #define IBIR				0x3c
150 #define IBIR_ACKED			BIT(12)
151 #define IBIR_SLVID(x)			(((x) & GENMASK(11, 8)) >> 8)
152 #define IBIR_ERROR			BIT(7)
153 #define IBIR_XFER_BYTES(x)		(((x) & GENMASK(6, 2)) >> 2)
154 #define IBIR_TYPE_IBI			0
155 #define IBIR_TYPE_HJ			1
156 #define IBIR_TYPE_MR			2
157 #define IBIR_TYPE(x)			((x) & GENMASK(1, 0))
158 
159 #define SLV_IER				0x40
160 #define SLV_IDR				0x44
161 #define SLV_IMR				0x48
162 #define SLV_ICR				0x4c
163 #define SLV_ISR				0x50
164 #define SLV_INT_TM			BIT(20)
165 #define SLV_INT_ERROR			BIT(19)
166 #define SLV_INT_EVENT_UP		BIT(18)
167 #define SLV_INT_HJ_DONE			BIT(17)
168 #define SLV_INT_MR_DONE			BIT(16)
169 #define SLV_INT_DA_UPD			BIT(15)
170 #define SLV_INT_SDR_FAIL		BIT(14)
171 #define SLV_INT_DDR_FAIL		BIT(13)
172 #define SLV_INT_M_RD_ABORT		BIT(12)
173 #define SLV_INT_DDR_RX_THR		BIT(11)
174 #define SLV_INT_DDR_TX_THR		BIT(10)
175 #define SLV_INT_SDR_RX_THR		BIT(9)
176 #define SLV_INT_SDR_TX_THR		BIT(8)
177 #define SLV_INT_DDR_RX_UNF		BIT(7)
178 #define SLV_INT_DDR_TX_OVF		BIT(6)
179 #define SLV_INT_SDR_RX_UNF		BIT(5)
180 #define SLV_INT_SDR_TX_OVF		BIT(4)
181 #define SLV_INT_DDR_RD_COMP		BIT(3)
182 #define SLV_INT_DDR_WR_COMP		BIT(2)
183 #define SLV_INT_SDR_RD_COMP		BIT(1)
184 #define SLV_INT_SDR_WR_COMP		BIT(0)
185 
186 #define SLV_STATUS0			0x54
187 #define SLV_STATUS0_REG_ADDR(s)		(((s) & GENMASK(23, 16)) >> 16)
188 #define SLV_STATUS0_XFRD_BYTES(s)	((s) & GENMASK(15, 0))
189 
190 #define SLV_STATUS1			0x58
191 #define SLV_STATUS1_AS(s)		(((s) & GENMASK(21, 20)) >> 20)
192 #define SLV_STATUS1_VEN_TM		BIT(19)
193 #define SLV_STATUS1_HJ_DIS		BIT(18)
194 #define SLV_STATUS1_MR_DIS		BIT(17)
195 #define SLV_STATUS1_PROT_ERR		BIT(16)
196 #define SLV_STATUS1_DA(s)		(((s) & GENMASK(15, 9)) >> 9)
197 #define SLV_STATUS1_HAS_DA		BIT(8)
198 #define SLV_STATUS1_DDR_RX_FULL		BIT(7)
199 #define SLV_STATUS1_DDR_TX_FULL		BIT(6)
200 #define SLV_STATUS1_DDR_RX_EMPTY	BIT(5)
201 #define SLV_STATUS1_DDR_TX_EMPTY	BIT(4)
202 #define SLV_STATUS1_SDR_RX_FULL		BIT(3)
203 #define SLV_STATUS1_SDR_TX_FULL		BIT(2)
204 #define SLV_STATUS1_SDR_RX_EMPTY	BIT(1)
205 #define SLV_STATUS1_SDR_TX_EMPTY	BIT(0)
206 
207 #define CMD0_FIFO			0x60
208 #define CMD0_FIFO_IS_DDR		BIT(31)
209 #define CMD0_FIFO_IS_CCC		BIT(30)
210 #define CMD0_FIFO_BCH			BIT(29)
211 #define XMIT_BURST_STATIC_SUBADDR	0
212 #define XMIT_SINGLE_INC_SUBADDR		1
213 #define XMIT_SINGLE_STATIC_SUBADDR	2
214 #define XMIT_BURST_WITHOUT_SUBADDR	3
215 #define CMD0_FIFO_PRIV_XMIT_MODE(m)	((m) << 27)
216 #define CMD0_FIFO_SBCA			BIT(26)
217 #define CMD0_FIFO_RSBC			BIT(25)
218 #define CMD0_FIFO_IS_10B		BIT(24)
219 #define CMD0_FIFO_PL_LEN(l)		((l) << 12)
220 #define CMD0_FIFO_PL_LEN_MAX		4095
221 #define CMD0_FIFO_DEV_ADDR(a)		((a) << 1)
222 #define CMD0_FIFO_RNW			BIT(0)
223 
224 #define CMD1_FIFO			0x64
225 #define CMD1_FIFO_CMDID(id)		((id) << 24)
226 #define CMD1_FIFO_CSRADDR(a)		(a)
227 #define CMD1_FIFO_CCC(id)		(id)
228 
229 #define TX_FIFO				0x68
230 
231 #define IMD_CMD0			0x70
232 #define IMD_CMD0_PL_LEN(l)		((l) << 12)
233 #define IMD_CMD0_DEV_ADDR(a)		((a) << 1)
234 #define IMD_CMD0_RNW			BIT(0)
235 
236 #define IMD_CMD1			0x74
237 #define IMD_CMD1_CCC(id)		(id)
238 
239 #define IMD_DATA			0x78
240 #define RX_FIFO				0x80
241 #define IBI_DATA_FIFO			0x84
242 #define SLV_DDR_TX_FIFO			0x88
243 #define SLV_DDR_RX_FIFO			0x8c
244 
245 #define CMD_IBI_THR_CTRL		0x90
246 #define IBIR_THR(t)			((t) << 24)
247 #define CMDR_THR(t)			((t) << 16)
248 #define IBI_THR(t)			((t) << 8)
249 #define CMD_THR(t)			(t)
250 
251 #define TX_RX_THR_CTRL			0x94
252 #define RX_THR(t)			((t) << 16)
253 #define TX_THR(t)			(t)
254 
255 #define SLV_DDR_TX_RX_THR_CTRL		0x98
256 #define SLV_DDR_RX_THR(t)		((t) << 16)
257 #define SLV_DDR_TX_THR(t)		(t)
258 
259 #define FLUSH_CTRL			0x9c
260 #define FLUSH_IBI_RESP			BIT(23)
261 #define FLUSH_CMD_RESP			BIT(22)
262 #define FLUSH_SLV_DDR_RX_FIFO		BIT(22)
263 #define FLUSH_SLV_DDR_TX_FIFO		BIT(21)
264 #define FLUSH_IMM_FIFO			BIT(20)
265 #define FLUSH_IBI_FIFO			BIT(19)
266 #define FLUSH_RX_FIFO			BIT(18)
267 #define FLUSH_TX_FIFO			BIT(17)
268 #define FLUSH_CMD_FIFO			BIT(16)
269 
270 #define TTO_PRESCL_CTRL0		0xb0
271 #define TTO_PRESCL_CTRL0_DIVB(x)	((x) << 16)
272 #define TTO_PRESCL_CTRL0_DIVA(x)	(x)
273 
274 #define TTO_PRESCL_CTRL1		0xb4
275 #define TTO_PRESCL_CTRL1_DIVB(x)	((x) << 16)
276 #define TTO_PRESCL_CTRL1_DIVA(x)	(x)
277 
278 #define DEVS_CTRL			0xb8
279 #define DEVS_CTRL_DEV_CLR_SHIFT		16
280 #define DEVS_CTRL_DEV_CLR_ALL		GENMASK(31, 16)
281 #define DEVS_CTRL_DEV_CLR(dev)		BIT(16 + (dev))
282 #define DEVS_CTRL_DEV_ACTIVE(dev)	BIT(dev)
283 #define DEVS_CTRL_DEVS_ACTIVE_MASK	GENMASK(15, 0)
284 #define MAX_DEVS			16
285 
286 #define DEV_ID_RR0(d)			(0xc0 + ((d) * 0x10))
287 #define DEV_ID_RR0_LVR_EXT_ADDR		BIT(11)
288 #define DEV_ID_RR0_HDR_CAP		BIT(10)
289 #define DEV_ID_RR0_IS_I3C		BIT(9)
290 #define DEV_ID_RR0_DEV_ADDR_MASK	(GENMASK(6, 0) | GENMASK(15, 13))
291 #define DEV_ID_RR0_SET_DEV_ADDR(a)	(((a) & GENMASK(6, 0)) |	\
292 					 (((a) & GENMASK(9, 7)) << 6))
293 #define DEV_ID_RR0_GET_DEV_ADDR(x)	((((x) >> 1) & GENMASK(6, 0)) |	\
294 					 (((x) >> 6) & GENMASK(9, 7)))
295 
296 #define DEV_ID_RR1(d)			(0xc4 + ((d) * 0x10))
297 #define DEV_ID_RR1_PID_MSB(pid)		(pid)
298 
299 #define DEV_ID_RR2(d)			(0xc8 + ((d) * 0x10))
300 #define DEV_ID_RR2_PID_LSB(pid)		((pid) << 16)
301 #define DEV_ID_RR2_BCR(bcr)		((bcr) << 8)
302 #define DEV_ID_RR2_DCR(dcr)		(dcr)
303 #define DEV_ID_RR2_LVR(lvr)		(lvr)
304 
305 #define SIR_MAP(x)			(0x180 + ((x) * 4))
306 #define SIR_MAP_DEV_REG(d)		SIR_MAP((d) / 2)
307 #define SIR_MAP_DEV_SHIFT(d, fs)	((fs) + (((d) % 2) ? 16 : 0))
308 #define SIR_MAP_DEV_CONF_MASK(d)	(GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
309 #define SIR_MAP_DEV_CONF(d, c)		((c) << (((d) % 2) ? 16 : 0))
310 #define DEV_ROLE_SLAVE			0
311 #define DEV_ROLE_MASTER			1
312 #define SIR_MAP_DEV_ROLE(role)		((role) << 14)
313 #define SIR_MAP_DEV_SLOW		BIT(13)
314 #define SIR_MAP_DEV_PL(l)		((l) << 8)
315 #define SIR_MAP_PL_MAX			GENMASK(4, 0)
316 #define SIR_MAP_DEV_DA(a)		((a) << 1)
317 #define SIR_MAP_DEV_ACK			BIT(0)
318 
319 #define GPIR_WORD(x)			(0x200 + ((x) * 4))
320 #define GPI_REG(val, id)		\
321 	(((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
322 
323 #define GPOR_WORD(x)			(0x220 + ((x) * 4))
324 #define GPO_REG(val, id)		\
325 	(((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
326 
327 #define ASF_INT_STATUS			0x300
328 #define ASF_INT_RAW_STATUS		0x304
329 #define ASF_INT_MASK			0x308
330 #define ASF_INT_TEST			0x30c
331 #define ASF_INT_FATAL_SELECT		0x310
332 #define ASF_INTEGRITY_ERR		BIT(6)
333 #define ASF_PROTOCOL_ERR		BIT(5)
334 #define ASF_TRANS_TIMEOUT_ERR		BIT(4)
335 #define ASF_CSR_ERR			BIT(3)
336 #define ASF_DAP_ERR			BIT(2)
337 #define ASF_SRAM_UNCORR_ERR		BIT(1)
338 #define ASF_SRAM_CORR_ERR		BIT(0)
339 
340 #define ASF_SRAM_CORR_FAULT_STATUS	0x320
341 #define ASF_SRAM_UNCORR_FAULT_STATUS	0x324
342 #define ASF_SRAM_CORR_FAULT_INSTANCE(x)	((x) >> 24)
343 #define ASF_SRAM_CORR_FAULT_ADDR(x)	((x) & GENMASK(23, 0))
344 
345 #define ASF_SRAM_FAULT_STATS		0x328
346 #define ASF_SRAM_FAULT_UNCORR_STATS(x)	((x) >> 16)
347 #define ASF_SRAM_FAULT_CORR_STATS(x)	((x) & GENMASK(15, 0))
348 
349 #define ASF_TRANS_TOUT_CTRL		0x330
350 #define ASF_TRANS_TOUT_EN		BIT(31)
351 #define ASF_TRANS_TOUT_VAL(x)	(x)
352 
353 #define ASF_TRANS_TOUT_FAULT_MASK	0x334
354 #define ASF_TRANS_TOUT_FAULT_STATUS	0x338
355 #define ASF_TRANS_TOUT_FAULT_APB	BIT(3)
356 #define ASF_TRANS_TOUT_FAULT_SCL_LOW	BIT(2)
357 #define ASF_TRANS_TOUT_FAULT_SCL_HIGH	BIT(1)
358 #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH	BIT(0)
359 
360 #define ASF_PROTO_FAULT_MASK		0x340
361 #define ASF_PROTO_FAULT_STATUS		0x344
362 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT	BIT(31)
363 #define ASF_PROTO_FAULT_SLVDDR_FAIL	BIT(30)
364 #define ASF_PROTO_FAULT_S(x)		BIT(16 + (x))
365 #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT	BIT(15)
366 #define ASF_PROTO_FAULT_MSTDDR_FAIL	BIT(14)
367 #define ASF_PROTO_FAULT_M(x)		BIT(x)
368 
369 struct cdns_i3c_master_caps {
370 	u32 cmdfifodepth;
371 	u32 cmdrfifodepth;
372 	u32 txfifodepth;
373 	u32 rxfifodepth;
374 	u32 ibirfifodepth;
375 };
376 
377 struct cdns_i3c_cmd {
378 	u32 cmd0;
379 	u32 cmd1;
380 	u32 tx_len;
381 	const void *tx_buf;
382 	u32 rx_len;
383 	void *rx_buf;
384 	u32 error;
385 };
386 
387 struct cdns_i3c_xfer {
388 	struct list_head node;
389 	struct completion comp;
390 	int ret;
391 	unsigned int ncmds;
392 	struct cdns_i3c_cmd cmds[];
393 };
394 
395 struct cdns_i3c_data {
396 	u8 thd_delay_ns;
397 };
398 
399 struct cdns_i3c_master {
400 	struct work_struct hj_work;
401 	struct i3c_master_controller base;
402 	u32 free_rr_slots;
403 	unsigned int maxdevs;
404 	struct {
405 		unsigned int num_slots;
406 		struct i3c_dev_desc **slots;
407 		spinlock_t lock;
408 	} ibi;
409 	struct {
410 		struct list_head list;
411 		struct cdns_i3c_xfer *cur;
412 		spinlock_t lock;
413 	} xferqueue;
414 	void __iomem *regs;
415 	struct clk *sysclk;
416 	struct clk *pclk;
417 	struct cdns_i3c_master_caps caps;
418 	unsigned long i3c_scl_lim;
419 	const struct cdns_i3c_data *devdata;
420 };
421 
422 static inline struct cdns_i3c_master *
to_cdns_i3c_master(struct i3c_master_controller * master)423 to_cdns_i3c_master(struct i3c_master_controller *master)
424 {
425 	return container_of(master, struct cdns_i3c_master, base);
426 }
427 
cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master * master,const u8 * bytes,int nbytes)428 static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
429 					  const u8 *bytes, int nbytes)
430 {
431 	writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
432 	if (nbytes & 3) {
433 		u32 tmp = 0;
434 
435 		memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
436 		writesl(master->regs + TX_FIFO, &tmp, 1);
437 	}
438 }
439 
cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master * master,u8 * bytes,int nbytes)440 static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
441 					    u8 *bytes, int nbytes)
442 {
443 	readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
444 	if (nbytes & 3) {
445 		u32 tmp;
446 
447 		readsl(master->regs + RX_FIFO, &tmp, 1);
448 		memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
449 	}
450 }
451 
cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller * m,const struct i3c_ccc_cmd * cmd)452 static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
453 					     const struct i3c_ccc_cmd *cmd)
454 {
455 	if (cmd->ndests > 1)
456 		return false;
457 
458 	switch (cmd->id) {
459 	case I3C_CCC_ENEC(true):
460 	case I3C_CCC_ENEC(false):
461 	case I3C_CCC_DISEC(true):
462 	case I3C_CCC_DISEC(false):
463 	case I3C_CCC_ENTAS(0, true):
464 	case I3C_CCC_ENTAS(0, false):
465 	case I3C_CCC_RSTDAA(true):
466 	case I3C_CCC_RSTDAA(false):
467 	case I3C_CCC_ENTDAA:
468 	case I3C_CCC_SETMWL(true):
469 	case I3C_CCC_SETMWL(false):
470 	case I3C_CCC_SETMRL(true):
471 	case I3C_CCC_SETMRL(false):
472 	case I3C_CCC_DEFSLVS:
473 	case I3C_CCC_ENTHDR(0):
474 	case I3C_CCC_SETDASA:
475 	case I3C_CCC_SETNEWDA:
476 	case I3C_CCC_GETMWL:
477 	case I3C_CCC_GETMRL:
478 	case I3C_CCC_GETPID:
479 	case I3C_CCC_GETBCR:
480 	case I3C_CCC_GETDCR:
481 	case I3C_CCC_GETSTATUS:
482 	case I3C_CCC_GETACCMST:
483 	case I3C_CCC_GETMXDS:
484 	case I3C_CCC_GETHDRCAP:
485 		return true;
486 	default:
487 		break;
488 	}
489 
490 	return false;
491 }
492 
cdns_i3c_master_disable(struct cdns_i3c_master * master)493 static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
494 {
495 	u32 status;
496 
497 	writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
498 
499 	return readl_poll_timeout(master->regs + MST_STATUS0, status,
500 				  status & MST_STATUS0_IDLE, 10, 1000000);
501 }
502 
cdns_i3c_master_enable(struct cdns_i3c_master * master)503 static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
504 {
505 	writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
506 }
507 
508 static struct cdns_i3c_xfer *
cdns_i3c_master_alloc_xfer(struct cdns_i3c_master * master,unsigned int ncmds)509 cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
510 {
511 	struct cdns_i3c_xfer *xfer;
512 
513 	xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
514 	if (!xfer)
515 		return NULL;
516 
517 	INIT_LIST_HEAD(&xfer->node);
518 	xfer->ncmds = ncmds;
519 	xfer->ret = -ETIMEDOUT;
520 
521 	return xfer;
522 }
523 
cdns_i3c_master_free_xfer(struct cdns_i3c_xfer * xfer)524 static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
525 {
526 	kfree(xfer);
527 }
528 
cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master * master)529 static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
530 {
531 	struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
532 	unsigned int i;
533 
534 	if (!xfer)
535 		return;
536 
537 	writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
538 	for (i = 0; i < xfer->ncmds; i++) {
539 		struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
540 
541 		cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
542 					      cmd->tx_len);
543 	}
544 
545 	for (i = 0; i < xfer->ncmds; i++) {
546 		struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
547 
548 		writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
549 		       master->regs + CMD1_FIFO);
550 		writel(cmd->cmd0, master->regs + CMD0_FIFO);
551 	}
552 
553 	writel(readl(master->regs + CTRL) | CTRL_MCS,
554 	       master->regs + CTRL);
555 	writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
556 }
557 
cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master * master,u32 isr)558 static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
559 					    u32 isr)
560 {
561 	struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
562 	int i, ret = 0;
563 	u32 status0;
564 
565 	if (!xfer)
566 		return;
567 
568 	if (!(isr & MST_INT_CMDD_EMP))
569 		return;
570 
571 	writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
572 
573 	for (status0 = readl(master->regs + MST_STATUS0);
574 	     !(status0 & MST_STATUS0_CMDR_EMP);
575 	     status0 = readl(master->regs + MST_STATUS0)) {
576 		struct cdns_i3c_cmd *cmd;
577 		u32 cmdr, rx_len, id;
578 
579 		cmdr = readl(master->regs + CMDR);
580 		id = CMDR_CMDID(cmdr);
581 		if (id == CMDR_CMDID_HJACK_DISEC ||
582 		    id == CMDR_CMDID_HJACK_ENTDAA ||
583 		    WARN_ON(id >= xfer->ncmds))
584 			continue;
585 
586 		cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
587 		rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
588 		cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
589 		cmd->error = CMDR_ERROR(cmdr);
590 	}
591 
592 	for (i = 0; i < xfer->ncmds; i++) {
593 		switch (xfer->cmds[i].error) {
594 		case CMDR_NO_ERROR:
595 			break;
596 
597 		case CMDR_DDR_PREAMBLE_ERROR:
598 		case CMDR_DDR_PARITY_ERROR:
599 		case CMDR_M0_ERROR:
600 		case CMDR_M1_ERROR:
601 		case CMDR_M2_ERROR:
602 		case CMDR_MST_ABORT:
603 		case CMDR_NACK_RESP:
604 		case CMDR_DDR_DROPPED:
605 			ret = -EIO;
606 			break;
607 
608 		case CMDR_DDR_RX_FIFO_OVF:
609 		case CMDR_DDR_TX_FIFO_UNF:
610 			ret = -ENOSPC;
611 			break;
612 
613 		case CMDR_INVALID_DA:
614 		default:
615 			ret = -EINVAL;
616 			break;
617 		}
618 	}
619 
620 	xfer->ret = ret;
621 	complete(&xfer->comp);
622 
623 	xfer = list_first_entry_or_null(&master->xferqueue.list,
624 					struct cdns_i3c_xfer, node);
625 	if (xfer)
626 		list_del_init(&xfer->node);
627 
628 	master->xferqueue.cur = xfer;
629 	cdns_i3c_master_start_xfer_locked(master);
630 }
631 
cdns_i3c_master_queue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)632 static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
633 				       struct cdns_i3c_xfer *xfer)
634 {
635 	unsigned long flags;
636 
637 	init_completion(&xfer->comp);
638 	spin_lock_irqsave(&master->xferqueue.lock, flags);
639 	if (master->xferqueue.cur) {
640 		list_add_tail(&xfer->node, &master->xferqueue.list);
641 	} else {
642 		master->xferqueue.cur = xfer;
643 		cdns_i3c_master_start_xfer_locked(master);
644 	}
645 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
646 }
647 
cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master * master,struct cdns_i3c_xfer * xfer)648 static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
649 					 struct cdns_i3c_xfer *xfer)
650 {
651 	unsigned long flags;
652 
653 	spin_lock_irqsave(&master->xferqueue.lock, flags);
654 	if (master->xferqueue.cur == xfer) {
655 		u32 status;
656 
657 		writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
658 		       master->regs + CTRL);
659 		readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
660 					  status & MST_STATUS0_IDLE, 10,
661 					  1000000);
662 		master->xferqueue.cur = NULL;
663 		writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
664 		       FLUSH_CMD_RESP,
665 		       master->regs + FLUSH_CTRL);
666 		writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
667 		writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
668 		       master->regs + CTRL);
669 	} else {
670 		list_del_init(&xfer->node);
671 	}
672 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
673 }
674 
cdns_i3c_cmd_get_err(struct cdns_i3c_cmd * cmd)675 static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
676 {
677 	switch (cmd->error) {
678 	case CMDR_M0_ERROR:
679 		return I3C_ERROR_M0;
680 
681 	case CMDR_M1_ERROR:
682 		return I3C_ERROR_M1;
683 
684 	case CMDR_M2_ERROR:
685 	case CMDR_NACK_RESP:
686 		return I3C_ERROR_M2;
687 
688 	default:
689 		break;
690 	}
691 
692 	return I3C_ERROR_UNKNOWN;
693 }
694 
cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)695 static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
696 					struct i3c_ccc_cmd *cmd)
697 {
698 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
699 	struct cdns_i3c_xfer *xfer;
700 	struct cdns_i3c_cmd *ccmd;
701 	int ret;
702 
703 	xfer = cdns_i3c_master_alloc_xfer(master, 1);
704 	if (!xfer)
705 		return -ENOMEM;
706 
707 	ccmd = xfer->cmds;
708 	ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
709 	ccmd->cmd0 = CMD0_FIFO_IS_CCC |
710 		     CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
711 
712 	if (cmd->id & I3C_CCC_DIRECT)
713 		ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
714 
715 	if (cmd->rnw) {
716 		ccmd->cmd0 |= CMD0_FIFO_RNW;
717 		ccmd->rx_buf = cmd->dests[0].payload.data;
718 		ccmd->rx_len = cmd->dests[0].payload.len;
719 	} else {
720 		ccmd->tx_buf = cmd->dests[0].payload.data;
721 		ccmd->tx_len = cmd->dests[0].payload.len;
722 	}
723 
724 	cdns_i3c_master_queue_xfer(master, xfer);
725 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
726 		cdns_i3c_master_unqueue_xfer(master, xfer);
727 
728 	ret = xfer->ret;
729 	cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
730 	cdns_i3c_master_free_xfer(xfer);
731 
732 	return ret;
733 }
734 
cdns_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)735 static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
736 				      struct i3c_priv_xfer *xfers,
737 				      int nxfers)
738 {
739 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
740 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
741 	int txslots = 0, rxslots = 0, i, ret;
742 	struct cdns_i3c_xfer *cdns_xfer;
743 
744 	for (i = 0; i < nxfers; i++) {
745 		if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
746 			return -ENOTSUPP;
747 	}
748 
749 	if (!nxfers)
750 		return 0;
751 
752 	if (nxfers > master->caps.cmdfifodepth ||
753 	    nxfers > master->caps.cmdrfifodepth)
754 		return -ENOTSUPP;
755 
756 	/*
757 	 * First make sure that all transactions (block of transfers separated
758 	 * by a STOP marker) fit in the FIFOs.
759 	 */
760 	for (i = 0; i < nxfers; i++) {
761 		if (xfers[i].rnw)
762 			rxslots += DIV_ROUND_UP(xfers[i].len, 4);
763 		else
764 			txslots += DIV_ROUND_UP(xfers[i].len, 4);
765 	}
766 
767 	if (rxslots > master->caps.rxfifodepth ||
768 	    txslots > master->caps.txfifodepth)
769 		return -ENOTSUPP;
770 
771 	cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
772 	if (!cdns_xfer)
773 		return -ENOMEM;
774 
775 	for (i = 0; i < nxfers; i++) {
776 		struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
777 		u32 pl_len = xfers[i].len;
778 
779 		ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
780 			CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
781 
782 		if (xfers[i].rnw) {
783 			ccmd->cmd0 |= CMD0_FIFO_RNW;
784 			ccmd->rx_buf = xfers[i].data.in;
785 			ccmd->rx_len = xfers[i].len;
786 			pl_len++;
787 		} else {
788 			ccmd->tx_buf = xfers[i].data.out;
789 			ccmd->tx_len = xfers[i].len;
790 		}
791 
792 		ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
793 
794 		if (i < nxfers - 1)
795 			ccmd->cmd0 |= CMD0_FIFO_RSBC;
796 
797 		if (!i)
798 			ccmd->cmd0 |= CMD0_FIFO_BCH;
799 	}
800 
801 	cdns_i3c_master_queue_xfer(master, cdns_xfer);
802 	if (!wait_for_completion_timeout(&cdns_xfer->comp,
803 					 msecs_to_jiffies(1000)))
804 		cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
805 
806 	ret = cdns_xfer->ret;
807 
808 	for (i = 0; i < nxfers; i++)
809 		xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
810 
811 	cdns_i3c_master_free_xfer(cdns_xfer);
812 
813 	return ret;
814 }
815 
cdns_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)816 static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
817 				     const struct i2c_msg *xfers, int nxfers)
818 {
819 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
820 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
821 	unsigned int nrxwords = 0, ntxwords = 0;
822 	struct cdns_i3c_xfer *xfer;
823 	int i, ret = 0;
824 
825 	if (nxfers > master->caps.cmdfifodepth)
826 		return -ENOTSUPP;
827 
828 	for (i = 0; i < nxfers; i++) {
829 		if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
830 			return -ENOTSUPP;
831 
832 		if (xfers[i].flags & I2C_M_RD)
833 			nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
834 		else
835 			ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
836 	}
837 
838 	if (ntxwords > master->caps.txfifodepth ||
839 	    nrxwords > master->caps.rxfifodepth)
840 		return -ENOTSUPP;
841 
842 	xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
843 	if (!xfer)
844 		return -ENOMEM;
845 
846 	for (i = 0; i < nxfers; i++) {
847 		struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
848 
849 		ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
850 			CMD0_FIFO_PL_LEN(xfers[i].len) |
851 			CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
852 
853 		if (xfers[i].flags & I2C_M_TEN)
854 			ccmd->cmd0 |= CMD0_FIFO_IS_10B;
855 
856 		if (xfers[i].flags & I2C_M_RD) {
857 			ccmd->cmd0 |= CMD0_FIFO_RNW;
858 			ccmd->rx_buf = xfers[i].buf;
859 			ccmd->rx_len = xfers[i].len;
860 		} else {
861 			ccmd->tx_buf = xfers[i].buf;
862 			ccmd->tx_len = xfers[i].len;
863 		}
864 	}
865 
866 	cdns_i3c_master_queue_xfer(master, xfer);
867 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
868 		cdns_i3c_master_unqueue_xfer(master, xfer);
869 
870 	ret = xfer->ret;
871 	cdns_i3c_master_free_xfer(xfer);
872 
873 	return ret;
874 }
875 
876 struct cdns_i3c_i2c_dev_data {
877 	u16 id;
878 	s16 ibi;
879 	struct i3c_generic_ibi_pool *ibi_pool;
880 };
881 
prepare_rr0_dev_address(u32 addr)882 static u32 prepare_rr0_dev_address(u32 addr)
883 {
884 	u32 ret = (addr << 1) & 0xff;
885 
886 	/* RR0[7:1] = addr[6:0] */
887 	ret |= (addr & GENMASK(6, 0)) << 1;
888 
889 	/* RR0[15:13] = addr[9:7] */
890 	ret |= (addr & GENMASK(9, 7)) << 6;
891 
892 	/* RR0[0] = ~XOR(addr[6:0]) */
893 	if (!(hweight8(addr & 0x7f) & 1))
894 		ret |= 1;
895 
896 	return ret;
897 }
898 
cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc * dev)899 static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
900 {
901 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
902 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
903 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
904 	u32 rr;
905 
906 	rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
907 				     dev->info.dyn_addr :
908 				     dev->info.static_addr);
909 	writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
910 }
911 
cdns_i3c_master_get_rr_slot(struct cdns_i3c_master * master,u8 dyn_addr)912 static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
913 				       u8 dyn_addr)
914 {
915 	unsigned long activedevs;
916 	u32 rr;
917 	int i;
918 
919 	if (!dyn_addr) {
920 		if (!master->free_rr_slots)
921 			return -ENOSPC;
922 
923 		return ffs(master->free_rr_slots) - 1;
924 	}
925 
926 	activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
927 	activedevs &= ~BIT(0);
928 
929 	for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
930 		rr = readl(master->regs + DEV_ID_RR0(i));
931 		if (!(rr & DEV_ID_RR0_IS_I3C) ||
932 		    DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
933 			continue;
934 
935 		return i;
936 	}
937 
938 	return -EINVAL;
939 }
940 
cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)941 static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
942 					    u8 old_dyn_addr)
943 {
944 	cdns_i3c_master_upd_i3c_addr(dev);
945 
946 	return 0;
947 }
948 
cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)949 static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
950 {
951 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
952 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
953 	struct cdns_i3c_i2c_dev_data *data;
954 	int slot;
955 
956 	data = kzalloc(sizeof(*data), GFP_KERNEL);
957 	if (!data)
958 		return -ENOMEM;
959 
960 	slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
961 	if (slot < 0) {
962 		kfree(data);
963 		return slot;
964 	}
965 
966 	data->ibi = -1;
967 	data->id = slot;
968 	i3c_dev_set_master_data(dev, data);
969 	master->free_rr_slots &= ~BIT(slot);
970 
971 	if (!dev->info.dyn_addr) {
972 		cdns_i3c_master_upd_i3c_addr(dev);
973 		writel(readl(master->regs + DEVS_CTRL) |
974 		       DEVS_CTRL_DEV_ACTIVE(data->id),
975 		       master->regs + DEVS_CTRL);
976 	}
977 
978 	return 0;
979 }
980 
cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)981 static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
982 {
983 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
984 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
985 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
986 
987 	writel(readl(master->regs + DEVS_CTRL) |
988 	       DEVS_CTRL_DEV_CLR(data->id),
989 	       master->regs + DEVS_CTRL);
990 
991 	i3c_dev_set_master_data(dev, NULL);
992 	master->free_rr_slots |= BIT(data->id);
993 	kfree(data);
994 }
995 
cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)996 static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
997 {
998 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
999 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1000 	struct cdns_i3c_i2c_dev_data *data;
1001 	int slot;
1002 
1003 	slot = cdns_i3c_master_get_rr_slot(master, 0);
1004 	if (slot < 0)
1005 		return slot;
1006 
1007 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1008 	if (!data)
1009 		return -ENOMEM;
1010 
1011 	data->id = slot;
1012 	master->free_rr_slots &= ~BIT(slot);
1013 	i2c_dev_set_master_data(dev, data);
1014 
1015 	writel(prepare_rr0_dev_address(dev->addr),
1016 	       master->regs + DEV_ID_RR0(data->id));
1017 	writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
1018 	writel(readl(master->regs + DEVS_CTRL) |
1019 	       DEVS_CTRL_DEV_ACTIVE(data->id),
1020 	       master->regs + DEVS_CTRL);
1021 
1022 	return 0;
1023 }
1024 
cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)1025 static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
1026 {
1027 	struct i3c_master_controller *m = i2c_dev_get_master(dev);
1028 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1029 	struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1030 
1031 	writel(readl(master->regs + DEVS_CTRL) |
1032 	       DEVS_CTRL_DEV_CLR(data->id),
1033 	       master->regs + DEVS_CTRL);
1034 	master->free_rr_slots |= BIT(data->id);
1035 
1036 	i2c_dev_set_master_data(dev, NULL);
1037 	kfree(data);
1038 }
1039 
cdns_i3c_master_bus_cleanup(struct i3c_master_controller * m)1040 static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
1041 {
1042 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1043 
1044 	cdns_i3c_master_disable(master);
1045 }
1046 
cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master * master,unsigned int slot,struct i3c_device_info * info)1047 static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
1048 					   unsigned int slot,
1049 					   struct i3c_device_info *info)
1050 {
1051 	u32 rr;
1052 
1053 	memset(info, 0, sizeof(*info));
1054 	rr = readl(master->regs + DEV_ID_RR0(slot));
1055 	info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
1056 	rr = readl(master->regs + DEV_ID_RR2(slot));
1057 	info->dcr = rr;
1058 	info->bcr = rr >> 8;
1059 	info->pid = rr >> 16;
1060 	info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
1061 }
1062 
cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master * master)1063 static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
1064 {
1065 	struct i3c_master_controller *m = &master->base;
1066 	unsigned long i3c_lim_period, pres_step, ncycles;
1067 	struct i3c_bus *bus = i3c_master_get_bus(m);
1068 	unsigned long new_i3c_scl_lim = 0;
1069 	struct i3c_dev_desc *dev;
1070 	u32 prescl1, ctrl;
1071 
1072 	i3c_bus_for_each_i3cdev(bus, dev) {
1073 		unsigned long max_fscl;
1074 
1075 		max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
1076 			       I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
1077 		switch (max_fscl) {
1078 		case I3C_SDR1_FSCL_8MHZ:
1079 			max_fscl = 8000000;
1080 			break;
1081 		case I3C_SDR2_FSCL_6MHZ:
1082 			max_fscl = 6000000;
1083 			break;
1084 		case I3C_SDR3_FSCL_4MHZ:
1085 			max_fscl = 4000000;
1086 			break;
1087 		case I3C_SDR4_FSCL_2MHZ:
1088 			max_fscl = 2000000;
1089 			break;
1090 		case I3C_SDR0_FSCL_MAX:
1091 		default:
1092 			max_fscl = 0;
1093 			break;
1094 		}
1095 
1096 		if (max_fscl &&
1097 		    (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
1098 			new_i3c_scl_lim = max_fscl;
1099 	}
1100 
1101 	/* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
1102 	if (new_i3c_scl_lim == master->i3c_scl_lim)
1103 		return;
1104 	master->i3c_scl_lim = new_i3c_scl_lim;
1105 	if (!new_i3c_scl_lim)
1106 		return;
1107 	pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
1108 
1109 	/* Configure PP_LOW to meet I3C slave limitations. */
1110 	prescl1 = readl(master->regs + PRESCL_CTRL1) &
1111 		  ~PRESCL_CTRL1_PP_LOW_MASK;
1112 	ctrl = readl(master->regs + CTRL);
1113 
1114 	i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
1115 	ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
1116 	if (ncycles < 4)
1117 		ncycles = 0;
1118 	else
1119 		ncycles -= 4;
1120 
1121 	prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
1122 
1123 	/* Disable I3C master before updating PRESCL_CTRL1. */
1124 	if (ctrl & CTRL_DEV_EN)
1125 		cdns_i3c_master_disable(master);
1126 
1127 	writel(prescl1, master->regs + PRESCL_CTRL1);
1128 
1129 	if (ctrl & CTRL_DEV_EN)
1130 		cdns_i3c_master_enable(master);
1131 }
1132 
cdns_i3c_master_do_daa(struct i3c_master_controller * m)1133 static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
1134 {
1135 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1136 	unsigned long olddevs, newdevs;
1137 	int ret, slot;
1138 	u8 addrs[MAX_DEVS] = { };
1139 	u8 last_addr = 0;
1140 
1141 	olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1142 	olddevs |= BIT(0);
1143 
1144 	/* Prepare RR slots before launching DAA. */
1145 	for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
1146 		ret = i3c_master_get_free_addr(m, last_addr + 1);
1147 		if (ret < 0)
1148 			return -ENOSPC;
1149 
1150 		last_addr = ret;
1151 		addrs[slot] = last_addr;
1152 		writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
1153 		       master->regs + DEV_ID_RR0(slot));
1154 		writel(0, master->regs + DEV_ID_RR1(slot));
1155 		writel(0, master->regs + DEV_ID_RR2(slot));
1156 	}
1157 
1158 	ret = i3c_master_entdaa_locked(&master->base);
1159 	if (ret && ret != I3C_ERROR_M2)
1160 		return ret;
1161 
1162 	newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
1163 	newdevs &= ~olddevs;
1164 
1165 	/*
1166 	 * Clear all retaining registers filled during DAA. We already
1167 	 * have the addressed assigned to them in the addrs array.
1168 	 */
1169 	for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
1170 		i3c_master_add_i3c_dev_locked(m, addrs[slot]);
1171 
1172 	/*
1173 	 * Clear slots that ended up not being used. Can be caused by I3C
1174 	 * device creation failure or when the I3C device was already known
1175 	 * by the system but with a different address (in this case the device
1176 	 * already has a slot and does not need a new one).
1177 	 */
1178 	writel(readl(master->regs + DEVS_CTRL) |
1179 	       master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
1180 	       master->regs + DEVS_CTRL);
1181 
1182 	i3c_master_defslvs_locked(&master->base);
1183 
1184 	cdns_i3c_master_upd_i3c_scl_lim(master);
1185 
1186 	/* Unmask Hot-Join and Mastership request interrupts. */
1187 	i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
1188 			       I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
1189 
1190 	return 0;
1191 }
1192 
cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master * master)1193 static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
1194 {
1195 	unsigned long sysclk_rate = clk_get_rate(master->sysclk);
1196 	u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
1197 				    (NSEC_PER_SEC / sysclk_rate));
1198 
1199 	/* Every value greater than 3 is not valid. */
1200 	if (thd_delay > THD_DELAY_MAX)
1201 		thd_delay = THD_DELAY_MAX;
1202 
1203 	/* CTLR_THD_DEL value is encoded. */
1204 	return (THD_DELAY_MAX - thd_delay);
1205 }
1206 
cdns_i3c_master_bus_init(struct i3c_master_controller * m)1207 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
1208 {
1209 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1210 	unsigned long pres_step, sysclk_rate, max_i2cfreq;
1211 	struct i3c_bus *bus = i3c_master_get_bus(m);
1212 	u32 ctrl, prescl0, prescl1, pres, low;
1213 	struct i3c_device_info info = { };
1214 	int ret, ncycles;
1215 
1216 	switch (bus->mode) {
1217 	case I3C_BUS_MODE_PURE:
1218 		ctrl = CTRL_PURE_BUS_MODE;
1219 		break;
1220 
1221 	case I3C_BUS_MODE_MIXED_FAST:
1222 		ctrl = CTRL_MIXED_FAST_BUS_MODE;
1223 		break;
1224 
1225 	case I3C_BUS_MODE_MIXED_SLOW:
1226 		ctrl = CTRL_MIXED_SLOW_BUS_MODE;
1227 		break;
1228 
1229 	default:
1230 		return -EINVAL;
1231 	}
1232 
1233 	sysclk_rate = clk_get_rate(master->sysclk);
1234 	if (!sysclk_rate)
1235 		return -EINVAL;
1236 
1237 	pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
1238 	if (pres > PRESCL_CTRL0_I3C_MAX)
1239 		return -ERANGE;
1240 
1241 	bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
1242 
1243 	prescl0 = PRESCL_CTRL0_I3C(pres);
1244 
1245 	low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
1246 	prescl1 = PRESCL_CTRL1_OD_LOW(low);
1247 
1248 	max_i2cfreq = bus->scl_rate.i2c;
1249 
1250 	pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
1251 	if (pres > PRESCL_CTRL0_I2C_MAX)
1252 		return -ERANGE;
1253 
1254 	bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
1255 
1256 	prescl0 |= PRESCL_CTRL0_I2C(pres);
1257 	writel(prescl0, master->regs + PRESCL_CTRL0);
1258 
1259 	/* Calculate OD and PP low. */
1260 	pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
1261 	ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
1262 	if (ncycles < 0)
1263 		ncycles = 0;
1264 	prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
1265 	writel(prescl1, master->regs + PRESCL_CTRL1);
1266 
1267 	/* Get an address for the master. */
1268 	ret = i3c_master_get_free_addr(m, 0);
1269 	if (ret < 0)
1270 		return ret;
1271 
1272 	writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
1273 	       master->regs + DEV_ID_RR0(0));
1274 
1275 	cdns_i3c_master_dev_rr_to_info(master, 0, &info);
1276 	if (info.bcr & I3C_BCR_HDR_CAP)
1277 		info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
1278 
1279 	ret = i3c_master_set_info(&master->base, &info);
1280 	if (ret)
1281 		return ret;
1282 
1283 	/*
1284 	 * Enable Hot-Join, and, when a Hot-Join request happens, disable all
1285 	 * events coming from this device.
1286 	 *
1287 	 * We will issue ENTDAA afterwards from the threaded IRQ handler.
1288 	 */
1289 	ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
1290 
1291 	/*
1292 	 * Configure data hold delay based on device-specific data.
1293 	 *
1294 	 * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
1295 	 * master output. This setting allows to meet this timing on master's
1296 	 * SoC outputs, regardless of PCB balancing.
1297 	 */
1298 	ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
1299 	writel(ctrl, master->regs + CTRL);
1300 
1301 	cdns_i3c_master_enable(master);
1302 
1303 	return 0;
1304 }
1305 
cdns_i3c_master_handle_ibi(struct cdns_i3c_master * master,u32 ibir)1306 static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
1307 				       u32 ibir)
1308 {
1309 	struct cdns_i3c_i2c_dev_data *data;
1310 	bool data_consumed = false;
1311 	struct i3c_ibi_slot *slot;
1312 	u32 id = IBIR_SLVID(ibir);
1313 	struct i3c_dev_desc *dev;
1314 	size_t nbytes;
1315 	u8 *buf;
1316 
1317 	/*
1318 	 * FIXME: maybe we should report the FIFO OVF errors to the upper
1319 	 * layer.
1320 	 */
1321 	if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
1322 		goto out;
1323 
1324 	dev = master->ibi.slots[id];
1325 	spin_lock(&master->ibi.lock);
1326 
1327 	data = i3c_dev_get_master_data(dev);
1328 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
1329 	if (!slot)
1330 		goto out_unlock;
1331 
1332 	buf = slot->data;
1333 
1334 	nbytes = IBIR_XFER_BYTES(ibir);
1335 	readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
1336 	if (nbytes % 3) {
1337 		u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
1338 
1339 		memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
1340 	}
1341 
1342 	slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
1343 			  dev->ibi->max_payload_len);
1344 	i3c_master_queue_ibi(dev, slot);
1345 	data_consumed = true;
1346 
1347 out_unlock:
1348 	spin_unlock(&master->ibi.lock);
1349 
1350 out:
1351 	/* Consume data from the FIFO if it's not been done already. */
1352 	if (!data_consumed) {
1353 		int i;
1354 
1355 		for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
1356 			readl(master->regs + IBI_DATA_FIFO);
1357 	}
1358 }
1359 
cnds_i3c_master_demux_ibis(struct cdns_i3c_master * master)1360 static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
1361 {
1362 	u32 status0;
1363 
1364 	writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
1365 
1366 	for (status0 = readl(master->regs + MST_STATUS0);
1367 	     !(status0 & MST_STATUS0_IBIR_EMP);
1368 	     status0 = readl(master->regs + MST_STATUS0)) {
1369 		u32 ibir = readl(master->regs + IBIR);
1370 
1371 		switch (IBIR_TYPE(ibir)) {
1372 		case IBIR_TYPE_IBI:
1373 			cdns_i3c_master_handle_ibi(master, ibir);
1374 			break;
1375 
1376 		case IBIR_TYPE_HJ:
1377 			WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1378 			queue_work(master->base.wq, &master->hj_work);
1379 			break;
1380 
1381 		case IBIR_TYPE_MR:
1382 			WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
1383 			break;
1384 
1385 		default:
1386 			break;
1387 		}
1388 	}
1389 }
1390 
cdns_i3c_master_interrupt(int irq,void * data)1391 static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
1392 {
1393 	struct cdns_i3c_master *master = data;
1394 	u32 status;
1395 
1396 	status = readl(master->regs + MST_ISR);
1397 	if (!(status & readl(master->regs + MST_IMR)))
1398 		return IRQ_NONE;
1399 
1400 	spin_lock(&master->xferqueue.lock);
1401 	cdns_i3c_master_end_xfer_locked(master, status);
1402 	spin_unlock(&master->xferqueue.lock);
1403 
1404 	if (status & MST_INT_IBIR_THR)
1405 		cnds_i3c_master_demux_ibis(master);
1406 
1407 	return IRQ_HANDLED;
1408 }
1409 
cdns_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1410 static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1411 {
1412 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1413 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1414 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1415 	unsigned long flags;
1416 	u32 sirmap;
1417 	int ret;
1418 
1419 	ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
1420 				      I3C_CCC_EVENT_SIR);
1421 	if (ret)
1422 		return ret;
1423 
1424 	spin_lock_irqsave(&master->ibi.lock, flags);
1425 	sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1426 	sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1427 	sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1428 				   SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1429 	writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1430 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1431 
1432 	return ret;
1433 }
1434 
cdns_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1435 static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1436 {
1437 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1438 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1439 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1440 	unsigned long flags;
1441 	u32 sircfg, sirmap;
1442 	int ret;
1443 
1444 	spin_lock_irqsave(&master->ibi.lock, flags);
1445 	sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1446 	sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1447 	sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
1448 		 SIR_MAP_DEV_DA(dev->info.dyn_addr) |
1449 		 SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
1450 		 SIR_MAP_DEV_ACK;
1451 
1452 	if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
1453 		sircfg |= SIR_MAP_DEV_SLOW;
1454 
1455 	sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
1456 	writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1457 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1458 
1459 	ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
1460 				     I3C_CCC_EVENT_SIR);
1461 	if (ret) {
1462 		spin_lock_irqsave(&master->ibi.lock, flags);
1463 		sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
1464 		sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
1465 		sirmap |= SIR_MAP_DEV_CONF(data->ibi,
1466 					   SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
1467 		writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
1468 		spin_unlock_irqrestore(&master->ibi.lock, flags);
1469 	}
1470 
1471 	return ret;
1472 }
1473 
cdns_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1474 static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1475 				       const struct i3c_ibi_setup *req)
1476 {
1477 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1478 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1479 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1480 	unsigned long flags;
1481 	unsigned int i;
1482 
1483 	data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1484 	if (IS_ERR(data->ibi_pool))
1485 		return PTR_ERR(data->ibi_pool);
1486 
1487 	spin_lock_irqsave(&master->ibi.lock, flags);
1488 	for (i = 0; i < master->ibi.num_slots; i++) {
1489 		if (!master->ibi.slots[i]) {
1490 			data->ibi = i;
1491 			master->ibi.slots[i] = dev;
1492 			break;
1493 		}
1494 	}
1495 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1496 
1497 	if (i < master->ibi.num_slots)
1498 		return 0;
1499 
1500 	i3c_generic_ibi_free_pool(data->ibi_pool);
1501 	data->ibi_pool = NULL;
1502 
1503 	return -ENOSPC;
1504 }
1505 
cdns_i3c_master_free_ibi(struct i3c_dev_desc * dev)1506 static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1507 {
1508 	struct i3c_master_controller *m = i3c_dev_get_master(dev);
1509 	struct cdns_i3c_master *master = to_cdns_i3c_master(m);
1510 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1511 	unsigned long flags;
1512 
1513 	spin_lock_irqsave(&master->ibi.lock, flags);
1514 	master->ibi.slots[data->ibi] = NULL;
1515 	data->ibi = -1;
1516 	spin_unlock_irqrestore(&master->ibi.lock, flags);
1517 
1518 	i3c_generic_ibi_free_pool(data->ibi_pool);
1519 }
1520 
cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1521 static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1522 					     struct i3c_ibi_slot *slot)
1523 {
1524 	struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1525 
1526 	i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1527 }
1528 
1529 static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
1530 	.bus_init = cdns_i3c_master_bus_init,
1531 	.bus_cleanup = cdns_i3c_master_bus_cleanup,
1532 	.do_daa = cdns_i3c_master_do_daa,
1533 	.attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
1534 	.reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
1535 	.detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
1536 	.attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
1537 	.detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
1538 	.supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
1539 	.send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
1540 	.priv_xfers = cdns_i3c_master_priv_xfers,
1541 	.i2c_xfers = cdns_i3c_master_i2c_xfers,
1542 	.enable_ibi = cdns_i3c_master_enable_ibi,
1543 	.disable_ibi = cdns_i3c_master_disable_ibi,
1544 	.request_ibi = cdns_i3c_master_request_ibi,
1545 	.free_ibi = cdns_i3c_master_free_ibi,
1546 	.recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
1547 };
1548 
cdns_i3c_master_hj(struct work_struct * work)1549 static void cdns_i3c_master_hj(struct work_struct *work)
1550 {
1551 	struct cdns_i3c_master *master = container_of(work,
1552 						      struct cdns_i3c_master,
1553 						      hj_work);
1554 
1555 	i3c_master_do_daa(&master->base);
1556 }
1557 
1558 static struct cdns_i3c_data cdns_i3c_devdata = {
1559 	.thd_delay_ns = 10,
1560 };
1561 
1562 static const struct of_device_id cdns_i3c_master_of_ids[] = {
1563 	{ .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
1564 	{ /* sentinel */ },
1565 };
1566 
cdns_i3c_master_probe(struct platform_device * pdev)1567 static int cdns_i3c_master_probe(struct platform_device *pdev)
1568 {
1569 	struct cdns_i3c_master *master;
1570 	int ret, irq;
1571 	u32 val;
1572 
1573 	master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
1574 	if (!master)
1575 		return -ENOMEM;
1576 
1577 	master->devdata = of_device_get_match_data(&pdev->dev);
1578 	if (!master->devdata)
1579 		return -EINVAL;
1580 
1581 	master->regs = devm_platform_ioremap_resource(pdev, 0);
1582 	if (IS_ERR(master->regs))
1583 		return PTR_ERR(master->regs);
1584 
1585 	master->pclk = devm_clk_get(&pdev->dev, "pclk");
1586 	if (IS_ERR(master->pclk))
1587 		return PTR_ERR(master->pclk);
1588 
1589 	master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1590 	if (IS_ERR(master->sysclk))
1591 		return PTR_ERR(master->sysclk);
1592 
1593 	irq = platform_get_irq(pdev, 0);
1594 	if (irq < 0)
1595 		return irq;
1596 
1597 	ret = clk_prepare_enable(master->pclk);
1598 	if (ret)
1599 		return ret;
1600 
1601 	ret = clk_prepare_enable(master->sysclk);
1602 	if (ret)
1603 		goto err_disable_pclk;
1604 
1605 	if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
1606 		ret = -EINVAL;
1607 		goto err_disable_sysclk;
1608 	}
1609 
1610 	spin_lock_init(&master->xferqueue.lock);
1611 	INIT_LIST_HEAD(&master->xferqueue.list);
1612 
1613 	INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
1614 	writel(0xffffffff, master->regs + MST_IDR);
1615 	writel(0xffffffff, master->regs + SLV_IDR);
1616 	ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
1617 			       dev_name(&pdev->dev), master);
1618 	if (ret)
1619 		goto err_disable_sysclk;
1620 
1621 	platform_set_drvdata(pdev, master);
1622 
1623 	val = readl(master->regs + CONF_STATUS0);
1624 
1625 	/* Device ID0 is reserved to describe this master. */
1626 	master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
1627 	master->free_rr_slots = GENMASK(master->maxdevs, 1);
1628 	master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
1629 	master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
1630 
1631 	val = readl(master->regs + CONF_STATUS1);
1632 	master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
1633 	master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
1634 	master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
1635 
1636 	spin_lock_init(&master->ibi.lock);
1637 	master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
1638 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1639 					 sizeof(*master->ibi.slots),
1640 					 GFP_KERNEL);
1641 	if (!master->ibi.slots) {
1642 		ret = -ENOMEM;
1643 		goto err_disable_sysclk;
1644 	}
1645 
1646 	writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
1647 	writel(MST_INT_IBIR_THR, master->regs + MST_IER);
1648 	writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
1649 
1650 	ret = i3c_master_register(&master->base, &pdev->dev,
1651 				  &cdns_i3c_master_ops, false);
1652 	if (ret)
1653 		goto err_disable_sysclk;
1654 
1655 	return 0;
1656 
1657 err_disable_sysclk:
1658 	clk_disable_unprepare(master->sysclk);
1659 
1660 err_disable_pclk:
1661 	clk_disable_unprepare(master->pclk);
1662 
1663 	return ret;
1664 }
1665 
cdns_i3c_master_remove(struct platform_device * pdev)1666 static int cdns_i3c_master_remove(struct platform_device *pdev)
1667 {
1668 	struct cdns_i3c_master *master = platform_get_drvdata(pdev);
1669 	int ret;
1670 
1671 	ret = i3c_master_unregister(&master->base);
1672 	if (ret)
1673 		return ret;
1674 
1675 	clk_disable_unprepare(master->sysclk);
1676 	clk_disable_unprepare(master->pclk);
1677 
1678 	return 0;
1679 }
1680 
1681 static struct platform_driver cdns_i3c_master = {
1682 	.probe = cdns_i3c_master_probe,
1683 	.remove = cdns_i3c_master_remove,
1684 	.driver = {
1685 		.name = "cdns-i3c-master",
1686 		.of_match_table = cdns_i3c_master_of_ids,
1687 	},
1688 };
1689 module_platform_driver(cdns_i3c_master);
1690 
1691 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
1692 MODULE_DESCRIPTION("Cadence I3C master driver");
1693 MODULE_LICENSE("GPL v2");
1694 MODULE_ALIAS("platform:cdns-i3c-master");
1695