1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21
22 /* Master Mode Registers */
23 #define SVC_I3C_MCONFIG 0x000
24 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
25 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
26 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
27 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
28 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
29 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
30 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
31 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
32 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
33 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
34
35 #define SVC_I3C_MCTRL 0x084
36 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
37 #define SVC_I3C_MCTRL_REQUEST_NONE 0
38 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
39 #define SVC_I3C_MCTRL_REQUEST_STOP 2
40 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
41 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
42 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
43 #define SVC_I3C_MCTRL_TYPE_I3C 0
44 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
45 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
46 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
47 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
48 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
49 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
50 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
51 #define SVC_I3C_MCTRL_DIR_WRITE 0
52 #define SVC_I3C_MCTRL_DIR_READ 1
53 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
54 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
55
56 #define SVC_I3C_MSTATUS 0x088
57 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
58 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
59 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
60 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
61 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
62 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
63 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
64 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
65 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
66 #define SVC_I3C_MINT_SLVSTART BIT(8)
67 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
68 #define SVC_I3C_MINT_COMPLETE BIT(10)
69 #define SVC_I3C_MINT_RXPEND BIT(11)
70 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
71 #define SVC_I3C_MINT_IBIWON BIT(13)
72 #define SVC_I3C_MINT_ERRWARN BIT(15)
73 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
74 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
75 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
76 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
77 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
78 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
79 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
80 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
81
82 #define SVC_I3C_IBIRULES 0x08C
83 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
84 ((addr) & 0x3F) << ((slot) * 6))
85 #define SVC_I3C_IBIRULES_ADDRS 5
86 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
87 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
88 #define SVC_I3C_IBIRULES_MANDBYTE 0
89 #define SVC_I3C_MINTSET 0x090
90 #define SVC_I3C_MINTCLR 0x094
91 #define SVC_I3C_MINTMASKED 0x098
92 #define SVC_I3C_MERRWARN 0x09C
93 #define SVC_I3C_MDMACTRL 0x0A0
94 #define SVC_I3C_MDATACTRL 0x0AC
95 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
96 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
97 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
98 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
99 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
100 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
101 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
102 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
103
104 #define SVC_I3C_MWDATAB 0x0B0
105 #define SVC_I3C_MWDATAB_END BIT(8)
106
107 #define SVC_I3C_MWDATABE 0x0B4
108 #define SVC_I3C_MWDATAH 0x0B8
109 #define SVC_I3C_MWDATAHE 0x0BC
110 #define SVC_I3C_MRDATAB 0x0C0
111 #define SVC_I3C_MRDATAH 0x0C8
112 #define SVC_I3C_MWMSG_SDR 0x0D0
113 #define SVC_I3C_MRMSG_SDR 0x0D4
114 #define SVC_I3C_MWMSG_DDR 0x0D8
115 #define SVC_I3C_MRMSG_DDR 0x0DC
116
117 #define SVC_I3C_MDYNADDR 0x0E4
118 #define SVC_MDYNADDR_VALID BIT(0)
119 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
120
121 #define SVC_I3C_MAX_DEVS 32
122
123 /* This parameter depends on the implementation and may be tuned */
124 #define SVC_I3C_FIFO_SIZE 16
125
126 struct svc_i3c_cmd {
127 u8 addr;
128 bool rnw;
129 u8 *in;
130 const void *out;
131 unsigned int len;
132 unsigned int read_len;
133 bool continued;
134 };
135
136 struct svc_i3c_xfer {
137 struct list_head node;
138 struct completion comp;
139 int ret;
140 unsigned int type;
141 unsigned int ncmds;
142 struct svc_i3c_cmd cmds[];
143 };
144
145 /**
146 * struct svc_i3c_master - Silvaco I3C Master structure
147 * @base: I3C master controller
148 * @dev: Corresponding device
149 * @regs: Memory mapping
150 * @free_slots: Bit array of available slots
151 * @addrs: Array containing the dynamic addresses of each attached device
152 * @descs: Array of descriptors, one per attached device
153 * @hj_work: Hot-join work
154 * @ibi_work: IBI work
155 * @irq: Main interrupt
156 * @pclk: System clock
157 * @fclk: Fast clock (bus)
158 * @sclk: Slow clock (other events)
159 * @xferqueue: Transfer queue structure
160 * @xferqueue.list: List member
161 * @xferqueue.cur: Current ongoing transfer
162 * @xferqueue.lock: Queue lock
163 * @ibi: IBI structure
164 * @ibi.num_slots: Number of slots available in @ibi.slots
165 * @ibi.slots: Available IBI slots
166 * @ibi.tbq_slot: To be queued IBI slot
167 * @ibi.lock: IBI lock
168 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
169 */
170 struct svc_i3c_master {
171 struct i3c_master_controller base;
172 struct device *dev;
173 void __iomem *regs;
174 u32 free_slots;
175 u8 addrs[SVC_I3C_MAX_DEVS];
176 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
177 struct work_struct hj_work;
178 struct work_struct ibi_work;
179 int irq;
180 struct clk *pclk;
181 struct clk *fclk;
182 struct clk *sclk;
183 struct {
184 struct list_head list;
185 struct svc_i3c_xfer *cur;
186 /* Prevent races between transfers */
187 spinlock_t lock;
188 } xferqueue;
189 struct {
190 unsigned int num_slots;
191 struct i3c_dev_desc **slots;
192 struct i3c_ibi_slot *tbq_slot;
193 /* Prevent races within IBI handlers */
194 spinlock_t lock;
195 } ibi;
196 struct mutex lock;
197 };
198
199 /**
200 * struct svc_i3c_i2c_dev_data - Device specific data
201 * @index: Index in the master tables corresponding to this device
202 * @ibi: IBI slot index in the master structure
203 * @ibi_pool: IBI pool associated to this device
204 */
205 struct svc_i3c_i2c_dev_data {
206 u8 index;
207 int ibi;
208 struct i3c_generic_ibi_pool *ibi_pool;
209 };
210
svc_i3c_master_error(struct svc_i3c_master * master)211 static bool svc_i3c_master_error(struct svc_i3c_master *master)
212 {
213 u32 mstatus, merrwarn;
214
215 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
216 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
217 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
218 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
219 dev_err(master->dev,
220 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
221 mstatus, merrwarn);
222
223 return true;
224 }
225
226 return false;
227 }
228
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)229 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
230 {
231 writel(mask, master->regs + SVC_I3C_MINTSET);
232 }
233
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)234 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
235 {
236 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
237
238 writel(mask, master->regs + SVC_I3C_MINTCLR);
239 }
240
241 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)242 to_svc_i3c_master(struct i3c_master_controller *master)
243 {
244 return container_of(master, struct svc_i3c_master, base);
245 }
246
svc_i3c_master_hj_work(struct work_struct * work)247 static void svc_i3c_master_hj_work(struct work_struct *work)
248 {
249 struct svc_i3c_master *master;
250
251 master = container_of(work, struct svc_i3c_master, hj_work);
252 i3c_master_do_daa(&master->base);
253 }
254
255 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)256 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
257 unsigned int ibiaddr)
258 {
259 int i;
260
261 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
262 if (master->addrs[i] == ibiaddr)
263 break;
264
265 if (i == SVC_I3C_MAX_DEVS)
266 return NULL;
267
268 return master->descs[i];
269 }
270
svc_i3c_master_emit_stop(struct svc_i3c_master * master)271 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
272 {
273 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
274
275 /*
276 * This delay is necessary after the emission of a stop, otherwise eg.
277 * repeating IBIs do not get detected. There is a note in the manual
278 * about it, stating that the stop condition might not be settled
279 * correctly if a start condition follows too rapidly.
280 */
281 udelay(1);
282 }
283
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)284 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
285 {
286 writel(readl(master->regs + SVC_I3C_MERRWARN),
287 master->regs + SVC_I3C_MERRWARN);
288 }
289
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)290 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
291 struct i3c_dev_desc *dev)
292 {
293 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
294 struct i3c_ibi_slot *slot;
295 unsigned int count;
296 u32 mdatactrl;
297 int ret, val;
298 u8 *buf;
299
300 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
301 if (!slot)
302 return -ENOSPC;
303
304 slot->len = 0;
305 buf = slot->data;
306
307 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
308 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
309 if (ret) {
310 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
311 return ret;
312 }
313
314 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
315 slot->len < SVC_I3C_FIFO_SIZE) {
316 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
317 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
318 readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
319 slot->len += count;
320 buf += count;
321 }
322
323 master->ibi.tbq_slot = slot;
324
325 return 0;
326 }
327
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)328 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
329 bool mandatory_byte)
330 {
331 unsigned int ibi_ack_nack;
332
333 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
334 if (mandatory_byte)
335 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
336 else
337 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
338
339 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
340 }
341
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)342 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
343 {
344 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
345 SVC_I3C_MCTRL_IBIRESP_NACK,
346 master->regs + SVC_I3C_MCTRL);
347 }
348
svc_i3c_master_ibi_work(struct work_struct * work)349 static void svc_i3c_master_ibi_work(struct work_struct *work)
350 {
351 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
352 struct svc_i3c_i2c_dev_data *data;
353 unsigned int ibitype, ibiaddr;
354 struct i3c_dev_desc *dev;
355 u32 status, val;
356 int ret;
357
358 mutex_lock(&master->lock);
359 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
360 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
361 SVC_I3C_MCTRL_IBIRESP_AUTO,
362 master->regs + SVC_I3C_MCTRL);
363
364 /* Wait for IBIWON, should take approximately 100us */
365 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
366 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
367 if (ret) {
368 dev_err(master->dev, "Timeout when polling for IBIWON\n");
369 svc_i3c_master_emit_stop(master);
370 goto reenable_ibis;
371 }
372
373 /* Clear the interrupt status */
374 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
375
376 status = readl(master->regs + SVC_I3C_MSTATUS);
377 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
378 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
379
380 /* Handle the critical responses to IBI's */
381 switch (ibitype) {
382 case SVC_I3C_MSTATUS_IBITYPE_IBI:
383 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
384 if (!dev)
385 svc_i3c_master_nack_ibi(master);
386 else
387 svc_i3c_master_handle_ibi(master, dev);
388 break;
389 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
390 svc_i3c_master_ack_ibi(master, false);
391 break;
392 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
393 svc_i3c_master_nack_ibi(master);
394 break;
395 default:
396 break;
397 }
398
399 /*
400 * If an error happened, we probably got interrupted and the exchange
401 * timedout. In this case we just drop everything, emit a stop and wait
402 * for the slave to interrupt again.
403 */
404 if (svc_i3c_master_error(master)) {
405 if (master->ibi.tbq_slot) {
406 data = i3c_dev_get_master_data(dev);
407 i3c_generic_ibi_recycle_slot(data->ibi_pool,
408 master->ibi.tbq_slot);
409 master->ibi.tbq_slot = NULL;
410 }
411
412 svc_i3c_master_emit_stop(master);
413
414 goto reenable_ibis;
415 }
416
417 /* Handle the non critical tasks */
418 switch (ibitype) {
419 case SVC_I3C_MSTATUS_IBITYPE_IBI:
420 if (dev) {
421 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
422 master->ibi.tbq_slot = NULL;
423 }
424 svc_i3c_master_emit_stop(master);
425 break;
426 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
427 queue_work(master->base.wq, &master->hj_work);
428 break;
429 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
430 default:
431 break;
432 }
433
434 reenable_ibis:
435 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
436 mutex_unlock(&master->lock);
437 }
438
svc_i3c_master_irq_handler(int irq,void * dev_id)439 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
440 {
441 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
442 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
443
444 if (!SVC_I3C_MSTATUS_SLVSTART(active))
445 return IRQ_NONE;
446
447 /* Clear the interrupt status */
448 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
449
450 svc_i3c_master_disable_interrupts(master);
451
452 /* Handle the interrupt in a non atomic context */
453 queue_work(master->base.wq, &master->ibi_work);
454
455 return IRQ_HANDLED;
456 }
457
svc_i3c_master_bus_init(struct i3c_master_controller * m)458 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
459 {
460 struct svc_i3c_master *master = to_svc_i3c_master(m);
461 struct i3c_bus *bus = i3c_master_get_bus(m);
462 struct i3c_device_info info = {};
463 unsigned long fclk_rate, fclk_period_ns;
464 unsigned int high_period_ns, od_low_period_ns;
465 u32 ppbaud, pplow, odhpp, odbaud, i2cbaud, reg;
466 int ret;
467
468 /* Timings derivation */
469 fclk_rate = clk_get_rate(master->fclk);
470 if (!fclk_rate)
471 return -EINVAL;
472
473 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
474
475 /*
476 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
477 * Simplest configuration is using a 50% duty-cycle of 40ns.
478 */
479 ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
480 pplow = 0;
481
482 /*
483 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
484 * duty-cycle tuned so that high levels are filetered out by
485 * the 50ns filter (target being 40ns).
486 */
487 odhpp = 1;
488 high_period_ns = (ppbaud + 1) * fclk_period_ns;
489 odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
490 od_low_period_ns = (odbaud + 1) * high_period_ns;
491
492 switch (bus->mode) {
493 case I3C_BUS_MODE_PURE:
494 i2cbaud = 0;
495 break;
496 case I3C_BUS_MODE_MIXED_FAST:
497 case I3C_BUS_MODE_MIXED_LIMITED:
498 /*
499 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
500 * between the high and low period does not really matter.
501 */
502 i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
503 break;
504 case I3C_BUS_MODE_MIXED_SLOW:
505 /*
506 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
507 * constraints as the FM+ mode.
508 */
509 i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
510 break;
511 default:
512 return -EINVAL;
513 }
514
515 reg = SVC_I3C_MCONFIG_MASTER_EN |
516 SVC_I3C_MCONFIG_DISTO(0) |
517 SVC_I3C_MCONFIG_HKEEP(0) |
518 SVC_I3C_MCONFIG_ODSTOP(0) |
519 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
520 SVC_I3C_MCONFIG_PPLOW(pplow) |
521 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
522 SVC_I3C_MCONFIG_ODHPP(odhpp) |
523 SVC_I3C_MCONFIG_SKEW(0) |
524 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
525 writel(reg, master->regs + SVC_I3C_MCONFIG);
526
527 /* Master core's registration */
528 ret = i3c_master_get_free_addr(m, 0);
529 if (ret < 0)
530 return ret;
531
532 info.dyn_addr = ret;
533
534 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
535 master->regs + SVC_I3C_MDYNADDR);
536
537 ret = i3c_master_set_info(&master->base, &info);
538 if (ret)
539 return ret;
540
541 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
542
543 return 0;
544 }
545
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)546 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
547 {
548 struct svc_i3c_master *master = to_svc_i3c_master(m);
549
550 svc_i3c_master_disable_interrupts(master);
551
552 /* Disable master */
553 writel(0, master->regs + SVC_I3C_MCONFIG);
554 }
555
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)556 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
557 {
558 unsigned int slot;
559
560 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
561 return -ENOSPC;
562
563 slot = ffs(master->free_slots) - 1;
564
565 master->free_slots &= ~BIT(slot);
566
567 return slot;
568 }
569
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)570 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
571 unsigned int slot)
572 {
573 master->free_slots |= BIT(slot);
574 }
575
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)576 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
577 {
578 struct i3c_master_controller *m = i3c_dev_get_master(dev);
579 struct svc_i3c_master *master = to_svc_i3c_master(m);
580 struct svc_i3c_i2c_dev_data *data;
581 int slot;
582
583 slot = svc_i3c_master_reserve_slot(master);
584 if (slot < 0)
585 return slot;
586
587 data = kzalloc(sizeof(*data), GFP_KERNEL);
588 if (!data) {
589 svc_i3c_master_release_slot(master, slot);
590 return -ENOMEM;
591 }
592
593 data->ibi = -1;
594 data->index = slot;
595 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
596 dev->info.static_addr;
597 master->descs[slot] = dev;
598
599 i3c_dev_set_master_data(dev, data);
600
601 return 0;
602 }
603
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)604 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
605 u8 old_dyn_addr)
606 {
607 struct i3c_master_controller *m = i3c_dev_get_master(dev);
608 struct svc_i3c_master *master = to_svc_i3c_master(m);
609 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
610
611 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
612 dev->info.static_addr;
613
614 return 0;
615 }
616
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)617 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
618 {
619 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
620 struct i3c_master_controller *m = i3c_dev_get_master(dev);
621 struct svc_i3c_master *master = to_svc_i3c_master(m);
622
623 master->addrs[data->index] = 0;
624 svc_i3c_master_release_slot(master, data->index);
625
626 kfree(data);
627 }
628
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)629 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
630 {
631 struct i3c_master_controller *m = i2c_dev_get_master(dev);
632 struct svc_i3c_master *master = to_svc_i3c_master(m);
633 struct svc_i3c_i2c_dev_data *data;
634 int slot;
635
636 slot = svc_i3c_master_reserve_slot(master);
637 if (slot < 0)
638 return slot;
639
640 data = kzalloc(sizeof(*data), GFP_KERNEL);
641 if (!data) {
642 svc_i3c_master_release_slot(master, slot);
643 return -ENOMEM;
644 }
645
646 data->index = slot;
647 master->addrs[slot] = dev->addr;
648
649 i2c_dev_set_master_data(dev, data);
650
651 return 0;
652 }
653
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)654 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
655 {
656 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
657 struct i3c_master_controller *m = i2c_dev_get_master(dev);
658 struct svc_i3c_master *master = to_svc_i3c_master(m);
659
660 svc_i3c_master_release_slot(master, data->index);
661
662 kfree(data);
663 }
664
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)665 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
666 unsigned int len)
667 {
668 int ret, i;
669 u32 reg;
670
671 for (i = 0; i < len; i++) {
672 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
673 SVC_I3C_MSTATUS_RXPEND(reg), 0, 1000);
674 if (ret)
675 return ret;
676
677 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
678 }
679
680 return 0;
681 }
682
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)683 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
684 u8 *addrs, unsigned int *count)
685 {
686 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
687 unsigned int dev_nb = 0, last_addr = 0;
688 u32 reg;
689 int ret, i;
690
691 while (true) {
692 /* Enter/proceed with DAA */
693 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
694 SVC_I3C_MCTRL_TYPE_I3C |
695 SVC_I3C_MCTRL_IBIRESP_NACK |
696 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
697 master->regs + SVC_I3C_MCTRL);
698
699 /*
700 * Either one slave will send its ID, or the assignment process
701 * is done.
702 */
703 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
704 SVC_I3C_MSTATUS_RXPEND(reg) |
705 SVC_I3C_MSTATUS_MCTRLDONE(reg),
706 1, 1000);
707 if (ret)
708 return ret;
709
710 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
711 u8 data[6];
712
713 /*
714 * We only care about the 48-bit provisional ID yet to
715 * be sure a device does not nack an address twice.
716 * Otherwise, we would just need to flush the RX FIFO.
717 */
718 ret = svc_i3c_master_readb(master, data, 6);
719 if (ret)
720 return ret;
721
722 for (i = 0; i < 6; i++)
723 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
724
725 /* We do not care about the BCR and DCR yet */
726 ret = svc_i3c_master_readb(master, data, 2);
727 if (ret)
728 return ret;
729 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
730 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
731 SVC_I3C_MSTATUS_COMPLETE(reg)) {
732 /*
733 * All devices received and acked they dynamic
734 * address, this is the natural end of the DAA
735 * procedure.
736 */
737 break;
738 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
739 /* No I3C devices attached */
740 if (dev_nb == 0)
741 break;
742
743 /*
744 * A slave device nacked the address, this is
745 * allowed only once, DAA will be stopped and
746 * then resumed. The same device is supposed to
747 * answer again immediately and shall ack the
748 * address this time.
749 */
750 if (prov_id[dev_nb] == nacking_prov_id)
751 return -EIO;
752
753 dev_nb--;
754 nacking_prov_id = prov_id[dev_nb];
755 svc_i3c_master_emit_stop(master);
756
757 continue;
758 } else {
759 return -EIO;
760 }
761 }
762
763 /* Wait for the slave to be ready to receive its address */
764 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
765 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
766 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
767 SVC_I3C_MSTATUS_BETWEEN(reg),
768 0, 1000);
769 if (ret)
770 return ret;
771
772 /* Give the slave device a suitable dynamic address */
773 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
774 if (ret < 0)
775 return ret;
776
777 addrs[dev_nb] = ret;
778 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
779 dev_nb, addrs[dev_nb]);
780
781 writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
782 last_addr = addrs[dev_nb++];
783 }
784
785 *count = dev_nb;
786
787 return 0;
788 }
789
svc_i3c_update_ibirules(struct svc_i3c_master * master)790 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
791 {
792 struct i3c_dev_desc *dev;
793 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
794 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
795 nobyte_addr_ko = 0;
796 bool list_mbyte = false, list_nobyte = false;
797
798 /* Create the IBIRULES register for both cases */
799 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
800 if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
801 continue;
802
803 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
804 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
805 dev->info.dyn_addr);
806
807 /* IBI rules cannot be applied to devices with MSb=1 */
808 if (dev->info.dyn_addr & BIT(7))
809 mbyte_addr_ko++;
810 else
811 mbyte_addr_ok++;
812 } else {
813 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
814 dev->info.dyn_addr);
815
816 /* IBI rules cannot be applied to devices with MSb=1 */
817 if (dev->info.dyn_addr & BIT(7))
818 nobyte_addr_ko++;
819 else
820 nobyte_addr_ok++;
821 }
822 }
823
824 /* Device list cannot be handled by hardware */
825 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
826 list_mbyte = true;
827
828 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
829 list_nobyte = true;
830
831 /* No list can be properly handled, return an error */
832 if (!list_mbyte && !list_nobyte)
833 return -ERANGE;
834
835 /* Pick the first list that can be handled by hardware, randomly */
836 if (list_mbyte)
837 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
838 else
839 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
840
841 return 0;
842 }
843
svc_i3c_master_do_daa(struct i3c_master_controller * m)844 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
845 {
846 struct svc_i3c_master *master = to_svc_i3c_master(m);
847 u8 addrs[SVC_I3C_MAX_DEVS];
848 unsigned long flags;
849 unsigned int dev_nb;
850 int ret, i;
851
852 spin_lock_irqsave(&master->xferqueue.lock, flags);
853 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
854 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
855 if (ret)
856 goto emit_stop;
857
858 /* Register all devices who participated to the core */
859 for (i = 0; i < dev_nb; i++) {
860 ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
861 if (ret)
862 return ret;
863 }
864
865 /* Configure IBI auto-rules */
866 ret = svc_i3c_update_ibirules(master);
867 if (ret) {
868 dev_err(master->dev, "Cannot handle such a list of devices");
869 return ret;
870 }
871
872 return 0;
873
874 emit_stop:
875 svc_i3c_master_emit_stop(master);
876 svc_i3c_master_clear_merrwarn(master);
877
878 return ret;
879 }
880
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)881 static int svc_i3c_master_read(struct svc_i3c_master *master,
882 u8 *in, unsigned int len)
883 {
884 int offset = 0, i, ret;
885 u32 mdctrl;
886
887 while (offset < len) {
888 unsigned int count;
889
890 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
891 mdctrl,
892 !(mdctrl & SVC_I3C_MDATACTRL_RXEMPTY),
893 0, 1000);
894 if (ret)
895 return ret;
896
897 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
898 for (i = 0; i < count; i++)
899 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
900
901 offset += count;
902 }
903
904 return 0;
905 }
906
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)907 static int svc_i3c_master_write(struct svc_i3c_master *master,
908 const u8 *out, unsigned int len)
909 {
910 int offset = 0, ret;
911 u32 mdctrl;
912
913 while (offset < len) {
914 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
915 mdctrl,
916 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
917 0, 1000);
918 if (ret)
919 return ret;
920
921 /*
922 * The last byte to be sent over the bus must either have the
923 * "end" bit set or be written in MWDATABE.
924 */
925 if (likely(offset < (len - 1)))
926 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
927 else
928 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
929 }
930
931 return 0;
932 }
933
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int read_len,bool continued)934 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
935 bool rnw, unsigned int xfer_type, u8 addr,
936 u8 *in, const u8 *out, unsigned int xfer_len,
937 unsigned int read_len, bool continued)
938 {
939 u32 reg;
940 int ret;
941
942 /* clean SVC_I3C_MINT_IBIWON w1c bits */
943 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
944
945 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
946 xfer_type |
947 SVC_I3C_MCTRL_IBIRESP_NACK |
948 SVC_I3C_MCTRL_DIR(rnw) |
949 SVC_I3C_MCTRL_ADDR(addr) |
950 SVC_I3C_MCTRL_RDTERM(read_len),
951 master->regs + SVC_I3C_MCTRL);
952
953 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
954 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
955 if (ret)
956 goto emit_stop;
957
958 /*
959 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
960 * with I3C Target Address.
961 *
962 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
963 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
964 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
965 * a Hot-Join Request has been made.
966 *
967 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
968 * and yield the above events handler.
969 */
970 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
971 ret = -ENXIO;
972 goto emit_stop;
973 }
974
975 if (rnw)
976 ret = svc_i3c_master_read(master, in, xfer_len);
977 else
978 ret = svc_i3c_master_write(master, out, xfer_len);
979 if (ret)
980 goto emit_stop;
981
982 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
983 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
984 if (ret)
985 goto emit_stop;
986
987 if (!continued)
988 svc_i3c_master_emit_stop(master);
989
990 return 0;
991
992 emit_stop:
993 svc_i3c_master_emit_stop(master);
994 svc_i3c_master_clear_merrwarn(master);
995
996 return ret;
997 }
998
999 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1000 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1001 {
1002 struct svc_i3c_xfer *xfer;
1003
1004 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1005 if (!xfer)
1006 return NULL;
1007
1008 INIT_LIST_HEAD(&xfer->node);
1009 xfer->ncmds = ncmds;
1010 xfer->ret = -ETIMEDOUT;
1011
1012 return xfer;
1013 }
1014
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1015 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1016 {
1017 kfree(xfer);
1018 }
1019
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1020 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1021 struct svc_i3c_xfer *xfer)
1022 {
1023 if (master->xferqueue.cur == xfer)
1024 master->xferqueue.cur = NULL;
1025 else
1026 list_del_init(&xfer->node);
1027 }
1028
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1029 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1030 struct svc_i3c_xfer *xfer)
1031 {
1032 unsigned long flags;
1033
1034 spin_lock_irqsave(&master->xferqueue.lock, flags);
1035 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1036 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1037 }
1038
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1039 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1040 {
1041 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1042 int ret, i;
1043
1044 if (!xfer)
1045 return;
1046
1047 for (i = 0; i < xfer->ncmds; i++) {
1048 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1049
1050 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1051 cmd->addr, cmd->in, cmd->out,
1052 cmd->len, cmd->read_len,
1053 cmd->continued);
1054 if (ret)
1055 break;
1056 }
1057
1058 xfer->ret = ret;
1059 complete(&xfer->comp);
1060
1061 if (ret < 0)
1062 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1063
1064 xfer = list_first_entry_or_null(&master->xferqueue.list,
1065 struct svc_i3c_xfer,
1066 node);
1067 if (xfer)
1068 list_del_init(&xfer->node);
1069
1070 master->xferqueue.cur = xfer;
1071 svc_i3c_master_start_xfer_locked(master);
1072 }
1073
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1074 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1075 struct svc_i3c_xfer *xfer)
1076 {
1077 unsigned long flags;
1078
1079 init_completion(&xfer->comp);
1080 spin_lock_irqsave(&master->xferqueue.lock, flags);
1081 if (master->xferqueue.cur) {
1082 list_add_tail(&xfer->node, &master->xferqueue.list);
1083 } else {
1084 master->xferqueue.cur = xfer;
1085 svc_i3c_master_start_xfer_locked(master);
1086 }
1087 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1088 }
1089
1090 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1091 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1092 const struct i3c_ccc_cmd *cmd)
1093 {
1094 /* No software support for CCC commands targeting more than one slave */
1095 return (cmd->ndests == 1);
1096 }
1097
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1098 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1099 struct i3c_ccc_cmd *ccc)
1100 {
1101 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1102 struct svc_i3c_xfer *xfer;
1103 struct svc_i3c_cmd *cmd;
1104 u8 *buf;
1105 int ret;
1106
1107 xfer = svc_i3c_master_alloc_xfer(master, 1);
1108 if (!xfer)
1109 return -ENOMEM;
1110
1111 buf = kmalloc(xfer_len, GFP_KERNEL);
1112 if (!buf) {
1113 svc_i3c_master_free_xfer(xfer);
1114 return -ENOMEM;
1115 }
1116
1117 buf[0] = ccc->id;
1118 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1119
1120 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1121
1122 cmd = &xfer->cmds[0];
1123 cmd->addr = ccc->dests[0].addr;
1124 cmd->rnw = ccc->rnw;
1125 cmd->in = NULL;
1126 cmd->out = buf;
1127 cmd->len = xfer_len;
1128 cmd->read_len = 0;
1129 cmd->continued = false;
1130
1131 mutex_lock(&master->lock);
1132 svc_i3c_master_enqueue_xfer(master, xfer);
1133 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1134 svc_i3c_master_dequeue_xfer(master, xfer);
1135 mutex_unlock(&master->lock);
1136
1137 ret = xfer->ret;
1138 kfree(buf);
1139 svc_i3c_master_free_xfer(xfer);
1140
1141 return ret;
1142 }
1143
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1144 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1145 struct i3c_ccc_cmd *ccc)
1146 {
1147 unsigned int xfer_len = ccc->dests[0].payload.len;
1148 unsigned int read_len = ccc->rnw ? xfer_len : 0;
1149 struct svc_i3c_xfer *xfer;
1150 struct svc_i3c_cmd *cmd;
1151 int ret;
1152
1153 xfer = svc_i3c_master_alloc_xfer(master, 2);
1154 if (!xfer)
1155 return -ENOMEM;
1156
1157 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1158
1159 /* Broadcasted message */
1160 cmd = &xfer->cmds[0];
1161 cmd->addr = I3C_BROADCAST_ADDR;
1162 cmd->rnw = 0;
1163 cmd->in = NULL;
1164 cmd->out = &ccc->id;
1165 cmd->len = 1;
1166 cmd->read_len = 0;
1167 cmd->continued = true;
1168
1169 /* Directed message */
1170 cmd = &xfer->cmds[1];
1171 cmd->addr = ccc->dests[0].addr;
1172 cmd->rnw = ccc->rnw;
1173 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1174 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1175 cmd->len = xfer_len;
1176 cmd->read_len = read_len;
1177 cmd->continued = false;
1178
1179 mutex_lock(&master->lock);
1180 svc_i3c_master_enqueue_xfer(master, xfer);
1181 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1182 svc_i3c_master_dequeue_xfer(master, xfer);
1183 mutex_unlock(&master->lock);
1184
1185 ret = xfer->ret;
1186 svc_i3c_master_free_xfer(xfer);
1187
1188 return ret;
1189 }
1190
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1191 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1192 struct i3c_ccc_cmd *cmd)
1193 {
1194 struct svc_i3c_master *master = to_svc_i3c_master(m);
1195 bool broadcast = cmd->id < 0x80;
1196 int ret;
1197
1198 if (broadcast)
1199 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1200 else
1201 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1202
1203 if (ret)
1204 cmd->err = I3C_ERROR_M2;
1205
1206 return ret;
1207 }
1208
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1209 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1210 struct i3c_priv_xfer *xfers,
1211 int nxfers)
1212 {
1213 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1214 struct svc_i3c_master *master = to_svc_i3c_master(m);
1215 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1216 struct svc_i3c_xfer *xfer;
1217 int ret, i;
1218
1219 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1220 if (!xfer)
1221 return -ENOMEM;
1222
1223 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1224
1225 for (i = 0; i < nxfers; i++) {
1226 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1227
1228 cmd->addr = master->addrs[data->index];
1229 cmd->rnw = xfers[i].rnw;
1230 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1231 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1232 cmd->len = xfers[i].len;
1233 cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
1234 cmd->continued = (i + 1) < nxfers;
1235 }
1236
1237 mutex_lock(&master->lock);
1238 svc_i3c_master_enqueue_xfer(master, xfer);
1239 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1240 svc_i3c_master_dequeue_xfer(master, xfer);
1241 mutex_unlock(&master->lock);
1242
1243 ret = xfer->ret;
1244 svc_i3c_master_free_xfer(xfer);
1245
1246 return ret;
1247 }
1248
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)1249 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1250 const struct i2c_msg *xfers,
1251 int nxfers)
1252 {
1253 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1254 struct svc_i3c_master *master = to_svc_i3c_master(m);
1255 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1256 struct svc_i3c_xfer *xfer;
1257 int ret, i;
1258
1259 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1260 if (!xfer)
1261 return -ENOMEM;
1262
1263 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1264
1265 for (i = 0; i < nxfers; i++) {
1266 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1267
1268 cmd->addr = master->addrs[data->index];
1269 cmd->rnw = xfers[i].flags & I2C_M_RD;
1270 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1271 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1272 cmd->len = xfers[i].len;
1273 cmd->read_len = cmd->rnw ? xfers[i].len : 0;
1274 cmd->continued = (i + 1 < nxfers);
1275 }
1276
1277 mutex_lock(&master->lock);
1278 svc_i3c_master_enqueue_xfer(master, xfer);
1279 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1280 svc_i3c_master_dequeue_xfer(master, xfer);
1281 mutex_unlock(&master->lock);
1282
1283 ret = xfer->ret;
1284 svc_i3c_master_free_xfer(xfer);
1285
1286 return ret;
1287 }
1288
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1289 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1290 const struct i3c_ibi_setup *req)
1291 {
1292 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1293 struct svc_i3c_master *master = to_svc_i3c_master(m);
1294 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1295 unsigned long flags;
1296 unsigned int i;
1297
1298 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1299 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1300 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1301 return -ERANGE;
1302 }
1303
1304 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1305 if (IS_ERR(data->ibi_pool))
1306 return PTR_ERR(data->ibi_pool);
1307
1308 spin_lock_irqsave(&master->ibi.lock, flags);
1309 for (i = 0; i < master->ibi.num_slots; i++) {
1310 if (!master->ibi.slots[i]) {
1311 data->ibi = i;
1312 master->ibi.slots[i] = dev;
1313 break;
1314 }
1315 }
1316 spin_unlock_irqrestore(&master->ibi.lock, flags);
1317
1318 if (i < master->ibi.num_slots)
1319 return 0;
1320
1321 i3c_generic_ibi_free_pool(data->ibi_pool);
1322 data->ibi_pool = NULL;
1323
1324 return -ENOSPC;
1325 }
1326
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1327 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1328 {
1329 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1330 struct svc_i3c_master *master = to_svc_i3c_master(m);
1331 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1332 unsigned long flags;
1333
1334 spin_lock_irqsave(&master->ibi.lock, flags);
1335 master->ibi.slots[data->ibi] = NULL;
1336 data->ibi = -1;
1337 spin_unlock_irqrestore(&master->ibi.lock, flags);
1338
1339 i3c_generic_ibi_free_pool(data->ibi_pool);
1340 }
1341
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1342 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1343 {
1344 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1345
1346 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1347 }
1348
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1349 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1350 {
1351 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1352
1353 return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1354 }
1355
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1356 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1357 struct i3c_ibi_slot *slot)
1358 {
1359 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1360
1361 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1362 }
1363
1364 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1365 .bus_init = svc_i3c_master_bus_init,
1366 .bus_cleanup = svc_i3c_master_bus_cleanup,
1367 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1368 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1369 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1370 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1371 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1372 .do_daa = svc_i3c_master_do_daa,
1373 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1374 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1375 .priv_xfers = svc_i3c_master_priv_xfers,
1376 .i2c_xfers = svc_i3c_master_i2c_xfers,
1377 .request_ibi = svc_i3c_master_request_ibi,
1378 .free_ibi = svc_i3c_master_free_ibi,
1379 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1380 .enable_ibi = svc_i3c_master_enable_ibi,
1381 .disable_ibi = svc_i3c_master_disable_ibi,
1382 };
1383
svc_i3c_master_reset(struct svc_i3c_master * master)1384 static void svc_i3c_master_reset(struct svc_i3c_master *master)
1385 {
1386 u32 reg;
1387
1388 /* Clear pending warnings */
1389 writel(readl(master->regs + SVC_I3C_MERRWARN),
1390 master->regs + SVC_I3C_MERRWARN);
1391
1392 /* Set RX and TX tigger levels, flush FIFOs */
1393 reg = SVC_I3C_MDATACTRL_FLUSHTB |
1394 SVC_I3C_MDATACTRL_FLUSHRB |
1395 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
1396 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
1397 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
1398 writel(reg, master->regs + SVC_I3C_MDATACTRL);
1399
1400 svc_i3c_master_disable_interrupts(master);
1401 }
1402
svc_i3c_master_probe(struct platform_device * pdev)1403 static int svc_i3c_master_probe(struct platform_device *pdev)
1404 {
1405 struct device *dev = &pdev->dev;
1406 struct svc_i3c_master *master;
1407 int ret;
1408
1409 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1410 if (!master)
1411 return -ENOMEM;
1412
1413 master->regs = devm_platform_ioremap_resource(pdev, 0);
1414 if (IS_ERR(master->regs))
1415 return PTR_ERR(master->regs);
1416
1417 master->pclk = devm_clk_get(dev, "pclk");
1418 if (IS_ERR(master->pclk))
1419 return PTR_ERR(master->pclk);
1420
1421 master->fclk = devm_clk_get(dev, "fast_clk");
1422 if (IS_ERR(master->fclk))
1423 return PTR_ERR(master->fclk);
1424
1425 master->sclk = devm_clk_get(dev, "slow_clk");
1426 if (IS_ERR(master->sclk))
1427 return PTR_ERR(master->sclk);
1428
1429 master->irq = platform_get_irq(pdev, 0);
1430 if (master->irq <= 0)
1431 return -ENOENT;
1432
1433 master->dev = dev;
1434
1435 svc_i3c_master_reset(master);
1436
1437 ret = clk_prepare_enable(master->pclk);
1438 if (ret)
1439 return ret;
1440
1441 ret = clk_prepare_enable(master->fclk);
1442 if (ret)
1443 goto err_disable_pclk;
1444
1445 ret = clk_prepare_enable(master->sclk);
1446 if (ret)
1447 goto err_disable_fclk;
1448
1449 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1450 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1451 mutex_init(&master->lock);
1452
1453 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1454 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1455 if (ret)
1456 goto err_disable_sclk;
1457
1458 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1459
1460 spin_lock_init(&master->xferqueue.lock);
1461 INIT_LIST_HEAD(&master->xferqueue.list);
1462
1463 spin_lock_init(&master->ibi.lock);
1464 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1465 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1466 sizeof(*master->ibi.slots),
1467 GFP_KERNEL);
1468 if (!master->ibi.slots) {
1469 ret = -ENOMEM;
1470 goto err_disable_sclk;
1471 }
1472
1473 platform_set_drvdata(pdev, master);
1474
1475 /* Register the master */
1476 ret = i3c_master_register(&master->base, &pdev->dev,
1477 &svc_i3c_master_ops, false);
1478 if (ret)
1479 goto err_disable_sclk;
1480
1481 return 0;
1482
1483 err_disable_sclk:
1484 clk_disable_unprepare(master->sclk);
1485
1486 err_disable_fclk:
1487 clk_disable_unprepare(master->fclk);
1488
1489 err_disable_pclk:
1490 clk_disable_unprepare(master->pclk);
1491
1492 return ret;
1493 }
1494
svc_i3c_master_remove(struct platform_device * pdev)1495 static int svc_i3c_master_remove(struct platform_device *pdev)
1496 {
1497 struct svc_i3c_master *master = platform_get_drvdata(pdev);
1498 int ret;
1499
1500 ret = i3c_master_unregister(&master->base);
1501 if (ret)
1502 return ret;
1503
1504 clk_disable_unprepare(master->pclk);
1505 clk_disable_unprepare(master->fclk);
1506 clk_disable_unprepare(master->sclk);
1507
1508 return 0;
1509 }
1510
1511 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1512 { .compatible = "silvaco,i3c-master" },
1513 { /* sentinel */ },
1514 };
1515
1516 static struct platform_driver svc_i3c_master = {
1517 .probe = svc_i3c_master_probe,
1518 .remove = svc_i3c_master_remove,
1519 .driver = {
1520 .name = "silvaco-i3c-master",
1521 .of_match_table = svc_i3c_master_of_match_tbl,
1522 },
1523 };
1524 module_platform_driver(svc_i3c_master);
1525
1526 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1527 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1528 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1529 MODULE_LICENSE("GPL v2");
1530