1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved.
4 */
5 #include <linux/bitmap.h>
6 #include <linux/delay.h>
7 #include <linux/err.h>
8 #include <linux/interrupt.h>
9 #include <linux/io.h>
10 #include <linux/irqchip/chained_irq.h>
11 #include <linux/irqdomain.h>
12 #include <linux/irq.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spmi.h>
19
20 /* PMIC Arbiter configuration registers */
21 #define PMIC_ARB_VERSION 0x0000
22 #define PMIC_ARB_VERSION_V2_MIN 0x20010000
23 #define PMIC_ARB_VERSION_V3_MIN 0x30000000
24 #define PMIC_ARB_VERSION_V5_MIN 0x50000000
25 #define PMIC_ARB_INT_EN 0x0004
26
27 /* PMIC Arbiter channel registers offsets */
28 #define PMIC_ARB_CMD 0x00
29 #define PMIC_ARB_CONFIG 0x04
30 #define PMIC_ARB_STATUS 0x08
31 #define PMIC_ARB_WDATA0 0x10
32 #define PMIC_ARB_WDATA1 0x14
33 #define PMIC_ARB_RDATA0 0x18
34 #define PMIC_ARB_RDATA1 0x1C
35
36 /* Mapping Table */
37 #define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
38 #define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF)
39 #define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1)
40 #define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF)
41 #define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
42 #define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
43
44 #define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
45 #define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */
46 #define PMIC_ARB_APID_VALID BIT(15)
47 #define PMIC_ARB_CHAN_IS_IRQ_OWNER(reg) ((reg) & BIT(24))
48 #define INVALID_EE 0xFF
49
50 /* Ownership Table */
51 #define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
52 #define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7)
53
54 /* Channel Status fields */
55 enum pmic_arb_chnl_status {
56 PMIC_ARB_STATUS_DONE = BIT(0),
57 PMIC_ARB_STATUS_FAILURE = BIT(1),
58 PMIC_ARB_STATUS_DENIED = BIT(2),
59 PMIC_ARB_STATUS_DROPPED = BIT(3),
60 };
61
62 /* Command register fields */
63 #define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
64
65 /* Command Opcodes */
66 enum pmic_arb_cmd_op_code {
67 PMIC_ARB_OP_EXT_WRITEL = 0,
68 PMIC_ARB_OP_EXT_READL = 1,
69 PMIC_ARB_OP_EXT_WRITE = 2,
70 PMIC_ARB_OP_RESET = 3,
71 PMIC_ARB_OP_SLEEP = 4,
72 PMIC_ARB_OP_SHUTDOWN = 5,
73 PMIC_ARB_OP_WAKEUP = 6,
74 PMIC_ARB_OP_AUTHENTICATE = 7,
75 PMIC_ARB_OP_MSTR_READ = 8,
76 PMIC_ARB_OP_MSTR_WRITE = 9,
77 PMIC_ARB_OP_EXT_READ = 13,
78 PMIC_ARB_OP_WRITE = 14,
79 PMIC_ARB_OP_READ = 15,
80 PMIC_ARB_OP_ZERO_WRITE = 16,
81 };
82
83 /*
84 * PMIC arbiter version 5 uses different register offsets for read/write vs
85 * observer channels.
86 */
87 enum pmic_arb_channel {
88 PMIC_ARB_CHANNEL_RW,
89 PMIC_ARB_CHANNEL_OBS,
90 };
91
92 /* Maximum number of support PMIC peripherals */
93 #define PMIC_ARB_MAX_PERIPHS 512
94 #define PMIC_ARB_TIMEOUT_US 100
95 #define PMIC_ARB_MAX_TRANS_BYTES (8)
96
97 #define PMIC_ARB_APID_MASK 0xFF
98 #define PMIC_ARB_PPID_MASK 0xFFF
99
100 /* interrupt enable bit */
101 #define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
102
103 #define spec_to_hwirq(slave_id, periph_id, irq_id, apid) \
104 ((((slave_id) & 0xF) << 28) | \
105 (((periph_id) & 0xFF) << 20) | \
106 (((irq_id) & 0x7) << 16) | \
107 (((apid) & 0x1FF) << 0))
108
109 #define hwirq_to_sid(hwirq) (((hwirq) >> 28) & 0xF)
110 #define hwirq_to_per(hwirq) (((hwirq) >> 20) & 0xFF)
111 #define hwirq_to_irq(hwirq) (((hwirq) >> 16) & 0x7)
112 #define hwirq_to_apid(hwirq) (((hwirq) >> 0) & 0x1FF)
113
114 struct pmic_arb_ver_ops;
115
116 struct apid_data {
117 u16 ppid;
118 u8 write_ee;
119 u8 irq_ee;
120 };
121
122 /**
123 * spmi_pmic_arb - SPMI PMIC Arbiter object
124 *
125 * @rd_base: on v1 "core", on v2 "observer" register base off DT.
126 * @wr_base: on v1 "core", on v2 "chnls" register base off DT.
127 * @intr: address of the SPMI interrupt control registers.
128 * @cnfg: address of the PMIC Arbiter configuration registers.
129 * @lock: lock to synchronize accesses.
130 * @channel: execution environment channel to use for accesses.
131 * @irq: PMIC ARB interrupt.
132 * @ee: the current Execution Environment
133 * @min_apid: minimum APID (used for bounding IRQ search)
134 * @max_apid: maximum APID
135 * @mapping_table: in-memory copy of PPID -> APID mapping table.
136 * @domain: irq domain object for PMIC IRQ domain
137 * @spmic: SPMI controller object
138 * @ver_ops: version dependent operations.
139 * @ppid_to_apid in-memory copy of PPID -> APID mapping table.
140 */
141 struct spmi_pmic_arb {
142 void __iomem *rd_base;
143 void __iomem *wr_base;
144 void __iomem *intr;
145 void __iomem *cnfg;
146 void __iomem *core;
147 resource_size_t core_size;
148 raw_spinlock_t lock;
149 u8 channel;
150 int irq;
151 u8 ee;
152 u16 min_apid;
153 u16 max_apid;
154 u32 *mapping_table;
155 DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
156 struct irq_domain *domain;
157 struct spmi_controller *spmic;
158 const struct pmic_arb_ver_ops *ver_ops;
159 u16 *ppid_to_apid;
160 u16 last_apid;
161 struct apid_data apid_data[PMIC_ARB_MAX_PERIPHS];
162 };
163
164 /**
165 * pmic_arb_ver: version dependent functionality.
166 *
167 * @ver_str: version string.
168 * @ppid_to_apid: finds the apid for a given ppid.
169 * @non_data_cmd: on v1 issues an spmi non-data command.
170 * on v2 no HW support, returns -EOPNOTSUPP.
171 * @offset: on v1 offset of per-ee channel.
172 * on v2 offset of per-ee and per-ppid channel.
173 * @fmt_cmd: formats a GENI/SPMI command.
174 * @owner_acc_status: on v1 address of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn
175 * on v2 address of SPMI_PIC_OWNERm_ACC_STATUSn.
176 * @acc_enable: on v1 address of PMIC_ARB_SPMI_PIC_ACC_ENABLEn
177 * on v2 address of SPMI_PIC_ACC_ENABLEn.
178 * @irq_status: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_STATUSn
179 * on v2 address of SPMI_PIC_IRQ_STATUSn.
180 * @irq_clear: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
181 * on v2 address of SPMI_PIC_IRQ_CLEARn.
182 * @apid_map_offset: offset of PMIC_ARB_REG_CHNLn
183 */
184 struct pmic_arb_ver_ops {
185 const char *ver_str;
186 int (*ppid_to_apid)(struct spmi_pmic_arb *pmic_arb, u16 ppid);
187 /* spmi commands (read_cmd, write_cmd, cmd) functionality */
188 int (*offset)(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
189 enum pmic_arb_channel ch_type);
190 u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
191 int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
192 /* Interrupts controller functionality (offset of PIC registers) */
193 void __iomem *(*owner_acc_status)(struct spmi_pmic_arb *pmic_arb, u8 m,
194 u16 n);
195 void __iomem *(*acc_enable)(struct spmi_pmic_arb *pmic_arb, u16 n);
196 void __iomem *(*irq_status)(struct spmi_pmic_arb *pmic_arb, u16 n);
197 void __iomem *(*irq_clear)(struct spmi_pmic_arb *pmic_arb, u16 n);
198 u32 (*apid_map_offset)(u16 n);
199 };
200
pmic_arb_base_write(struct spmi_pmic_arb * pmic_arb,u32 offset,u32 val)201 static inline void pmic_arb_base_write(struct spmi_pmic_arb *pmic_arb,
202 u32 offset, u32 val)
203 {
204 writel_relaxed(val, pmic_arb->wr_base + offset);
205 }
206
pmic_arb_set_rd_cmd(struct spmi_pmic_arb * pmic_arb,u32 offset,u32 val)207 static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pmic_arb,
208 u32 offset, u32 val)
209 {
210 writel_relaxed(val, pmic_arb->rd_base + offset);
211 }
212
213 /**
214 * pmic_arb_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
215 * @bc: byte count -1. range: 0..3
216 * @reg: register's address
217 * @buf: output parameter, length must be bc + 1
218 */
219 static void
pmic_arb_read_data(struct spmi_pmic_arb * pmic_arb,u8 * buf,u32 reg,u8 bc)220 pmic_arb_read_data(struct spmi_pmic_arb *pmic_arb, u8 *buf, u32 reg, u8 bc)
221 {
222 u32 data = __raw_readl(pmic_arb->rd_base + reg);
223
224 memcpy(buf, &data, (bc & 3) + 1);
225 }
226
227 /**
228 * pmic_arb_write_data: write 1..4 bytes from buf to pmic-arb's register
229 * @bc: byte-count -1. range: 0..3.
230 * @reg: register's address.
231 * @buf: buffer to write. length must be bc + 1.
232 */
pmic_arb_write_data(struct spmi_pmic_arb * pmic_arb,const u8 * buf,u32 reg,u8 bc)233 static void pmic_arb_write_data(struct spmi_pmic_arb *pmic_arb, const u8 *buf,
234 u32 reg, u8 bc)
235 {
236 u32 data = 0;
237
238 memcpy(&data, buf, (bc & 3) + 1);
239 __raw_writel(data, pmic_arb->wr_base + reg);
240 }
241
pmic_arb_wait_for_done(struct spmi_controller * ctrl,void __iomem * base,u8 sid,u16 addr,enum pmic_arb_channel ch_type)242 static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
243 void __iomem *base, u8 sid, u16 addr,
244 enum pmic_arb_channel ch_type)
245 {
246 struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
247 u32 status = 0;
248 u32 timeout = PMIC_ARB_TIMEOUT_US;
249 u32 offset;
250 int rc;
251
252 rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, ch_type);
253 if (rc < 0)
254 return rc;
255
256 offset = rc;
257 offset += PMIC_ARB_STATUS;
258
259 while (timeout--) {
260 status = readl_relaxed(base + offset);
261
262 if (status & PMIC_ARB_STATUS_DONE) {
263 if (status & PMIC_ARB_STATUS_DENIED) {
264 dev_err(&ctrl->dev, "%s: transaction denied (0x%x)\n",
265 __func__, status);
266 return -EPERM;
267 }
268
269 if (status & PMIC_ARB_STATUS_FAILURE) {
270 dev_err(&ctrl->dev, "%s: transaction failed (0x%x)\n",
271 __func__, status);
272 return -EIO;
273 }
274
275 if (status & PMIC_ARB_STATUS_DROPPED) {
276 dev_err(&ctrl->dev, "%s: transaction dropped (0x%x)\n",
277 __func__, status);
278 return -EIO;
279 }
280
281 return 0;
282 }
283 udelay(1);
284 }
285
286 dev_err(&ctrl->dev, "%s: timeout, status 0x%x\n",
287 __func__, status);
288 return -ETIMEDOUT;
289 }
290
291 static int
pmic_arb_non_data_cmd_v1(struct spmi_controller * ctrl,u8 opc,u8 sid)292 pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
293 {
294 struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
295 unsigned long flags;
296 u32 cmd;
297 int rc;
298 u32 offset;
299
300 rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, PMIC_ARB_CHANNEL_RW);
301 if (rc < 0)
302 return rc;
303
304 offset = rc;
305 cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
306
307 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
308 pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
309 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0,
310 PMIC_ARB_CHANNEL_RW);
311 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
312
313 return rc;
314 }
315
316 static int
pmic_arb_non_data_cmd_v2(struct spmi_controller * ctrl,u8 opc,u8 sid)317 pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
318 {
319 return -EOPNOTSUPP;
320 }
321
322 /* Non-data command */
pmic_arb_cmd(struct spmi_controller * ctrl,u8 opc,u8 sid)323 static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
324 {
325 struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
326
327 dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
328
329 /* Check for valid non-data command */
330 if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
331 return -EINVAL;
332
333 return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
334 }
335
pmic_arb_read_cmd(struct spmi_controller * ctrl,u8 opc,u8 sid,u16 addr,u8 * buf,size_t len)336 static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
337 u16 addr, u8 *buf, size_t len)
338 {
339 struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
340 unsigned long flags;
341 u8 bc = len - 1;
342 u32 cmd;
343 int rc;
344 u32 offset;
345
346 rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
347 PMIC_ARB_CHANNEL_OBS);
348 if (rc < 0)
349 return rc;
350
351 offset = rc;
352 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
353 dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
354 PMIC_ARB_MAX_TRANS_BYTES, len);
355 return -EINVAL;
356 }
357
358 /* Check the opcode */
359 if (opc >= 0x60 && opc <= 0x7F)
360 opc = PMIC_ARB_OP_READ;
361 else if (opc >= 0x20 && opc <= 0x2F)
362 opc = PMIC_ARB_OP_EXT_READ;
363 else if (opc >= 0x38 && opc <= 0x3F)
364 opc = PMIC_ARB_OP_EXT_READL;
365 else
366 return -EINVAL;
367
368 cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
369
370 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
371 pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
372 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr,
373 PMIC_ARB_CHANNEL_OBS);
374 if (rc)
375 goto done;
376
377 pmic_arb_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
378 min_t(u8, bc, 3));
379
380 if (bc > 3)
381 pmic_arb_read_data(pmic_arb, buf + 4, offset + PMIC_ARB_RDATA1,
382 bc - 4);
383
384 done:
385 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
386 return rc;
387 }
388
pmic_arb_write_cmd(struct spmi_controller * ctrl,u8 opc,u8 sid,u16 addr,const u8 * buf,size_t len)389 static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
390 u16 addr, const u8 *buf, size_t len)
391 {
392 struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
393 unsigned long flags;
394 u8 bc = len - 1;
395 u32 cmd;
396 int rc;
397 u32 offset;
398
399 rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
400 PMIC_ARB_CHANNEL_RW);
401 if (rc < 0)
402 return rc;
403
404 offset = rc;
405 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
406 dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
407 PMIC_ARB_MAX_TRANS_BYTES, len);
408 return -EINVAL;
409 }
410
411 /* Check the opcode */
412 if (opc >= 0x40 && opc <= 0x5F)
413 opc = PMIC_ARB_OP_WRITE;
414 else if (opc <= 0x0F)
415 opc = PMIC_ARB_OP_EXT_WRITE;
416 else if (opc >= 0x30 && opc <= 0x37)
417 opc = PMIC_ARB_OP_EXT_WRITEL;
418 else if (opc >= 0x80)
419 opc = PMIC_ARB_OP_ZERO_WRITE;
420 else
421 return -EINVAL;
422
423 cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
424
425 /* Write data to FIFOs */
426 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
427 pmic_arb_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
428 min_t(u8, bc, 3));
429 if (bc > 3)
430 pmic_arb_write_data(pmic_arb, buf + 4, offset + PMIC_ARB_WDATA1,
431 bc - 4);
432
433 /* Start the transaction */
434 pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
435 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr,
436 PMIC_ARB_CHANNEL_RW);
437 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
438
439 return rc;
440 }
441
442 enum qpnpint_regs {
443 QPNPINT_REG_RT_STS = 0x10,
444 QPNPINT_REG_SET_TYPE = 0x11,
445 QPNPINT_REG_POLARITY_HIGH = 0x12,
446 QPNPINT_REG_POLARITY_LOW = 0x13,
447 QPNPINT_REG_LATCHED_CLR = 0x14,
448 QPNPINT_REG_EN_SET = 0x15,
449 QPNPINT_REG_EN_CLR = 0x16,
450 QPNPINT_REG_LATCHED_STS = 0x18,
451 };
452
453 struct spmi_pmic_arb_qpnpint_type {
454 u8 type; /* 1 -> edge */
455 u8 polarity_high;
456 u8 polarity_low;
457 } __packed;
458
459 /* Simplified accessor functions for irqchip callbacks */
qpnpint_spmi_write(struct irq_data * d,u8 reg,void * buf,size_t len)460 static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
461 size_t len)
462 {
463 struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
464 u8 sid = hwirq_to_sid(d->hwirq);
465 u8 per = hwirq_to_per(d->hwirq);
466
467 if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
468 (per << 8) + reg, buf, len))
469 dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n",
470 d->irq);
471 }
472
qpnpint_spmi_read(struct irq_data * d,u8 reg,void * buf,size_t len)473 static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
474 {
475 struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
476 u8 sid = hwirq_to_sid(d->hwirq);
477 u8 per = hwirq_to_per(d->hwirq);
478
479 if (pmic_arb_read_cmd(pmic_arb->spmic, SPMI_CMD_EXT_READL, sid,
480 (per << 8) + reg, buf, len))
481 dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n",
482 d->irq);
483 }
484
cleanup_irq(struct spmi_pmic_arb * pmic_arb,u16 apid,int id)485 static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
486 {
487 u16 ppid = pmic_arb->apid_data[apid].ppid;
488 u8 sid = ppid >> 8;
489 u8 per = ppid & 0xFF;
490 u8 irq_mask = BIT(id);
491
492 writel_relaxed(irq_mask, pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
493
494 if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
495 (per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
496 dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
497 irq_mask, ppid);
498
499 if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
500 (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
501 dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
502 irq_mask, ppid);
503 }
504
periph_interrupt(struct spmi_pmic_arb * pmic_arb,u16 apid)505 static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
506 {
507 unsigned int irq;
508 u32 status, id;
509 u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
510 u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
511
512 status = readl_relaxed(pmic_arb->ver_ops->irq_status(pmic_arb, apid));
513 while (status) {
514 id = ffs(status) - 1;
515 status &= ~BIT(id);
516 irq = irq_find_mapping(pmic_arb->domain,
517 spec_to_hwirq(sid, per, id, apid));
518 if (irq == 0) {
519 cleanup_irq(pmic_arb, apid, id);
520 continue;
521 }
522 generic_handle_irq(irq);
523 }
524 }
525
pmic_arb_chained_irq(struct irq_desc * desc)526 static void pmic_arb_chained_irq(struct irq_desc *desc)
527 {
528 struct spmi_pmic_arb *pmic_arb = irq_desc_get_handler_data(desc);
529 const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
530 struct irq_chip *chip = irq_desc_get_chip(desc);
531 int first = pmic_arb->min_apid >> 5;
532 int last = pmic_arb->max_apid >> 5;
533 u8 ee = pmic_arb->ee;
534 u32 status, enable;
535 int i, id, apid;
536
537 chained_irq_enter(chip, desc);
538
539 for (i = first; i <= last; ++i) {
540 status = readl_relaxed(
541 ver_ops->owner_acc_status(pmic_arb, ee, i));
542 while (status) {
543 id = ffs(status) - 1;
544 status &= ~BIT(id);
545 apid = id + i * 32;
546 enable = readl_relaxed(
547 ver_ops->acc_enable(pmic_arb, apid));
548 if (enable & SPMI_PIC_ACC_ENABLE_BIT)
549 periph_interrupt(pmic_arb, apid);
550 }
551 }
552
553 chained_irq_exit(chip, desc);
554 }
555
qpnpint_irq_ack(struct irq_data * d)556 static void qpnpint_irq_ack(struct irq_data *d)
557 {
558 struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
559 u8 irq = hwirq_to_irq(d->hwirq);
560 u16 apid = hwirq_to_apid(d->hwirq);
561 u8 data;
562
563 writel_relaxed(BIT(irq), pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
564
565 data = BIT(irq);
566 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
567 }
568
qpnpint_irq_mask(struct irq_data * d)569 static void qpnpint_irq_mask(struct irq_data *d)
570 {
571 u8 irq = hwirq_to_irq(d->hwirq);
572 u8 data = BIT(irq);
573
574 qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
575 }
576
qpnpint_irq_unmask(struct irq_data * d)577 static void qpnpint_irq_unmask(struct irq_data *d)
578 {
579 struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
580 const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
581 u8 irq = hwirq_to_irq(d->hwirq);
582 u16 apid = hwirq_to_apid(d->hwirq);
583 u8 buf[2];
584
585 writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
586 ver_ops->acc_enable(pmic_arb, apid));
587
588 qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
589 if (!(buf[0] & BIT(irq))) {
590 /*
591 * Since the interrupt is currently disabled, write to both the
592 * LATCHED_CLR and EN_SET registers so that a spurious interrupt
593 * cannot be triggered when the interrupt is enabled
594 */
595 buf[0] = BIT(irq);
596 buf[1] = BIT(irq);
597 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
598 }
599 }
600
qpnpint_irq_set_type(struct irq_data * d,unsigned int flow_type)601 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
602 {
603 struct spmi_pmic_arb_qpnpint_type type;
604 irq_flow_handler_t flow_handler;
605 u8 irq = hwirq_to_irq(d->hwirq);
606
607 qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
608
609 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
610 type.type |= BIT(irq);
611 if (flow_type & IRQF_TRIGGER_RISING)
612 type.polarity_high |= BIT(irq);
613 if (flow_type & IRQF_TRIGGER_FALLING)
614 type.polarity_low |= BIT(irq);
615
616 flow_handler = handle_edge_irq;
617 } else {
618 if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
619 (flow_type & (IRQF_TRIGGER_LOW)))
620 return -EINVAL;
621
622 type.type &= ~BIT(irq); /* level trig */
623 if (flow_type & IRQF_TRIGGER_HIGH)
624 type.polarity_high |= BIT(irq);
625 else
626 type.polarity_low |= BIT(irq);
627
628 flow_handler = handle_level_irq;
629 }
630
631 qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
632 irq_set_handler_locked(d, flow_handler);
633
634 return 0;
635 }
636
qpnpint_irq_set_wake(struct irq_data * d,unsigned int on)637 static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
638 {
639 struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
640
641 return irq_set_irq_wake(pmic_arb->irq, on);
642 }
643
qpnpint_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * state)644 static int qpnpint_get_irqchip_state(struct irq_data *d,
645 enum irqchip_irq_state which,
646 bool *state)
647 {
648 u8 irq = hwirq_to_irq(d->hwirq);
649 u8 status = 0;
650
651 if (which != IRQCHIP_STATE_LINE_LEVEL)
652 return -EINVAL;
653
654 qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1);
655 *state = !!(status & BIT(irq));
656
657 return 0;
658 }
659
qpnpint_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)660 static int qpnpint_irq_domain_activate(struct irq_domain *domain,
661 struct irq_data *d, bool reserve)
662 {
663 struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
664 u16 periph = hwirq_to_per(d->hwirq);
665 u16 apid = hwirq_to_apid(d->hwirq);
666 u16 sid = hwirq_to_sid(d->hwirq);
667 u16 irq = hwirq_to_irq(d->hwirq);
668
669 if (pmic_arb->apid_data[apid].irq_ee != pmic_arb->ee) {
670 dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n",
671 sid, periph, irq, pmic_arb->ee,
672 pmic_arb->apid_data[apid].irq_ee);
673 return -ENODEV;
674 }
675
676 return 0;
677 }
678
679 static struct irq_chip pmic_arb_irqchip = {
680 .name = "pmic_arb",
681 .irq_ack = qpnpint_irq_ack,
682 .irq_mask = qpnpint_irq_mask,
683 .irq_unmask = qpnpint_irq_unmask,
684 .irq_set_type = qpnpint_irq_set_type,
685 .irq_set_wake = qpnpint_irq_set_wake,
686 .irq_get_irqchip_state = qpnpint_get_irqchip_state,
687 .flags = IRQCHIP_MASK_ON_SUSPEND,
688 };
689
qpnpint_irq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * out_hwirq,unsigned int * out_type)690 static int qpnpint_irq_domain_translate(struct irq_domain *d,
691 struct irq_fwspec *fwspec,
692 unsigned long *out_hwirq,
693 unsigned int *out_type)
694 {
695 struct spmi_pmic_arb *pmic_arb = d->host_data;
696 u32 *intspec = fwspec->param;
697 u16 apid, ppid;
698 int rc;
699
700 dev_dbg(&pmic_arb->spmic->dev, "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
701 intspec[0], intspec[1], intspec[2]);
702
703 if (irq_domain_get_of_node(d) != pmic_arb->spmic->dev.of_node)
704 return -EINVAL;
705 if (fwspec->param_count != 4)
706 return -EINVAL;
707 if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
708 return -EINVAL;
709
710 ppid = intspec[0] << 8 | intspec[1];
711 rc = pmic_arb->ver_ops->ppid_to_apid(pmic_arb, ppid);
712 if (rc < 0) {
713 dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u rc = %d\n",
714 intspec[0], intspec[1], intspec[2], rc);
715 return rc;
716 }
717
718 apid = rc;
719 /* Keep track of {max,min}_apid for bounding search during interrupt */
720 if (apid > pmic_arb->max_apid)
721 pmic_arb->max_apid = apid;
722 if (apid < pmic_arb->min_apid)
723 pmic_arb->min_apid = apid;
724
725 *out_hwirq = spec_to_hwirq(intspec[0], intspec[1], intspec[2], apid);
726 *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
727
728 dev_dbg(&pmic_arb->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
729
730 return 0;
731 }
732
733 static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
734
qpnpint_irq_domain_map(struct spmi_pmic_arb * pmic_arb,struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq,unsigned int type)735 static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
736 struct irq_domain *domain, unsigned int virq,
737 irq_hw_number_t hwirq, unsigned int type)
738 {
739 irq_flow_handler_t handler;
740
741 dev_dbg(&pmic_arb->spmic->dev, "virq = %u, hwirq = %lu, type = %u\n",
742 virq, hwirq, type);
743
744 if (type & IRQ_TYPE_EDGE_BOTH)
745 handler = handle_edge_irq;
746 else
747 handler = handle_level_irq;
748
749
750 irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
751 &qpnpint_irq_request_class);
752 irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
753 handler, NULL, NULL);
754 }
755
qpnpint_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * data)756 static int qpnpint_irq_domain_alloc(struct irq_domain *domain,
757 unsigned int virq, unsigned int nr_irqs,
758 void *data)
759 {
760 struct spmi_pmic_arb *pmic_arb = domain->host_data;
761 struct irq_fwspec *fwspec = data;
762 irq_hw_number_t hwirq;
763 unsigned int type;
764 int ret, i;
765
766 ret = qpnpint_irq_domain_translate(domain, fwspec, &hwirq, &type);
767 if (ret)
768 return ret;
769
770 for (i = 0; i < nr_irqs; i++)
771 qpnpint_irq_domain_map(pmic_arb, domain, virq + i, hwirq + i,
772 type);
773
774 return 0;
775 }
776
pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb * pmic_arb,u16 ppid)777 static int pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pmic_arb, u16 ppid)
778 {
779 u32 *mapping_table = pmic_arb->mapping_table;
780 int index = 0, i;
781 u16 apid_valid;
782 u16 apid;
783 u32 data;
784
785 apid_valid = pmic_arb->ppid_to_apid[ppid];
786 if (apid_valid & PMIC_ARB_APID_VALID) {
787 apid = apid_valid & ~PMIC_ARB_APID_VALID;
788 return apid;
789 }
790
791 for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
792 if (!test_and_set_bit(index, pmic_arb->mapping_table_valid))
793 mapping_table[index] = readl_relaxed(pmic_arb->cnfg +
794 SPMI_MAPPING_TABLE_REG(index));
795
796 data = mapping_table[index];
797
798 if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) {
799 if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
800 index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
801 } else {
802 apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
803 pmic_arb->ppid_to_apid[ppid]
804 = apid | PMIC_ARB_APID_VALID;
805 pmic_arb->apid_data[apid].ppid = ppid;
806 return apid;
807 }
808 } else {
809 if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
810 index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
811 } else {
812 apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
813 pmic_arb->ppid_to_apid[ppid]
814 = apid | PMIC_ARB_APID_VALID;
815 pmic_arb->apid_data[apid].ppid = ppid;
816 return apid;
817 }
818 }
819 }
820
821 return -ENODEV;
822 }
823
824 /* v1 offset per ee */
pmic_arb_offset_v1(struct spmi_pmic_arb * pmic_arb,u8 sid,u16 addr,enum pmic_arb_channel ch_type)825 static int pmic_arb_offset_v1(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
826 enum pmic_arb_channel ch_type)
827 {
828 return 0x800 + 0x80 * pmic_arb->channel;
829 }
830
pmic_arb_find_apid(struct spmi_pmic_arb * pmic_arb,u16 ppid)831 static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pmic_arb, u16 ppid)
832 {
833 struct apid_data *apidd = &pmic_arb->apid_data[pmic_arb->last_apid];
834 u32 regval, offset;
835 u16 id, apid;
836
837 for (apid = pmic_arb->last_apid; ; apid++, apidd++) {
838 offset = pmic_arb->ver_ops->apid_map_offset(apid);
839 if (offset >= pmic_arb->core_size)
840 break;
841
842 regval = readl_relaxed(pmic_arb->cnfg +
843 SPMI_OWNERSHIP_TABLE_REG(apid));
844 apidd->irq_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
845 apidd->write_ee = apidd->irq_ee;
846
847 regval = readl_relaxed(pmic_arb->core + offset);
848 if (!regval)
849 continue;
850
851 id = (regval >> 8) & PMIC_ARB_PPID_MASK;
852 pmic_arb->ppid_to_apid[id] = apid | PMIC_ARB_APID_VALID;
853 apidd->ppid = id;
854 if (id == ppid) {
855 apid |= PMIC_ARB_APID_VALID;
856 break;
857 }
858 }
859 pmic_arb->last_apid = apid & ~PMIC_ARB_APID_VALID;
860
861 return apid;
862 }
863
pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb * pmic_arb,u16 ppid)864 static int pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pmic_arb, u16 ppid)
865 {
866 u16 apid_valid;
867
868 apid_valid = pmic_arb->ppid_to_apid[ppid];
869 if (!(apid_valid & PMIC_ARB_APID_VALID))
870 apid_valid = pmic_arb_find_apid(pmic_arb, ppid);
871 if (!(apid_valid & PMIC_ARB_APID_VALID))
872 return -ENODEV;
873
874 return apid_valid & ~PMIC_ARB_APID_VALID;
875 }
876
pmic_arb_read_apid_map_v5(struct spmi_pmic_arb * pmic_arb)877 static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
878 {
879 struct apid_data *apidd = pmic_arb->apid_data;
880 struct apid_data *prev_apidd;
881 u16 i, apid, ppid;
882 bool valid, is_irq_ee;
883 u32 regval, offset;
884
885 /*
886 * In order to allow multiple EEs to write to a single PPID in arbiter
887 * version 5, there is more than one APID mapped to each PPID.
888 * The owner field for each of these mappings specifies the EE which is
889 * allowed to write to the APID. The owner of the last (highest) APID
890 * which has the IRQ owner bit set for a given PPID will receive
891 * interrupts from the PPID.
892 */
893 for (i = 0; ; i++, apidd++) {
894 offset = pmic_arb->ver_ops->apid_map_offset(i);
895 if (offset >= pmic_arb->core_size)
896 break;
897
898 regval = readl_relaxed(pmic_arb->core + offset);
899 if (!regval)
900 continue;
901 ppid = (regval >> 8) & PMIC_ARB_PPID_MASK;
902 is_irq_ee = PMIC_ARB_CHAN_IS_IRQ_OWNER(regval);
903
904 regval = readl_relaxed(pmic_arb->cnfg +
905 SPMI_OWNERSHIP_TABLE_REG(i));
906 apidd->write_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
907
908 apidd->irq_ee = is_irq_ee ? apidd->write_ee : INVALID_EE;
909
910 valid = pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID;
911 apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
912 prev_apidd = &pmic_arb->apid_data[apid];
913
914 if (!valid || apidd->write_ee == pmic_arb->ee) {
915 /* First PPID mapping or one for this EE */
916 pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
917 } else if (valid && is_irq_ee &&
918 prev_apidd->write_ee == pmic_arb->ee) {
919 /*
920 * Duplicate PPID mapping after the one for this EE;
921 * override the irq owner
922 */
923 prev_apidd->irq_ee = apidd->irq_ee;
924 }
925
926 apidd->ppid = ppid;
927 pmic_arb->last_apid = i;
928 }
929
930 /* Dump the mapping table for debug purposes. */
931 dev_dbg(&pmic_arb->spmic->dev, "PPID APID Write-EE IRQ-EE\n");
932 for (ppid = 0; ppid < PMIC_ARB_MAX_PPID; ppid++) {
933 apid = pmic_arb->ppid_to_apid[ppid];
934 if (apid & PMIC_ARB_APID_VALID) {
935 apid &= ~PMIC_ARB_APID_VALID;
936 apidd = &pmic_arb->apid_data[apid];
937 dev_dbg(&pmic_arb->spmic->dev, "%#03X %3u %2u %2u\n",
938 ppid, apid, apidd->write_ee, apidd->irq_ee);
939 }
940 }
941
942 return 0;
943 }
944
pmic_arb_ppid_to_apid_v5(struct spmi_pmic_arb * pmic_arb,u16 ppid)945 static int pmic_arb_ppid_to_apid_v5(struct spmi_pmic_arb *pmic_arb, u16 ppid)
946 {
947 if (!(pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID))
948 return -ENODEV;
949
950 return pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
951 }
952
953 /* v2 offset per ppid and per ee */
pmic_arb_offset_v2(struct spmi_pmic_arb * pmic_arb,u8 sid,u16 addr,enum pmic_arb_channel ch_type)954 static int pmic_arb_offset_v2(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
955 enum pmic_arb_channel ch_type)
956 {
957 u16 apid;
958 u16 ppid;
959 int rc;
960
961 ppid = sid << 8 | ((addr >> 8) & 0xFF);
962 rc = pmic_arb_ppid_to_apid_v2(pmic_arb, ppid);
963 if (rc < 0)
964 return rc;
965
966 apid = rc;
967 return 0x1000 * pmic_arb->ee + 0x8000 * apid;
968 }
969
970 /*
971 * v5 offset per ee and per apid for observer channels and per apid for
972 * read/write channels.
973 */
pmic_arb_offset_v5(struct spmi_pmic_arb * pmic_arb,u8 sid,u16 addr,enum pmic_arb_channel ch_type)974 static int pmic_arb_offset_v5(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
975 enum pmic_arb_channel ch_type)
976 {
977 u16 apid;
978 int rc;
979 u32 offset = 0;
980 u16 ppid = (sid << 8) | (addr >> 8);
981
982 rc = pmic_arb_ppid_to_apid_v5(pmic_arb, ppid);
983 if (rc < 0)
984 return rc;
985
986 apid = rc;
987 switch (ch_type) {
988 case PMIC_ARB_CHANNEL_OBS:
989 offset = 0x10000 * pmic_arb->ee + 0x80 * apid;
990 break;
991 case PMIC_ARB_CHANNEL_RW:
992 offset = 0x10000 * apid;
993 break;
994 }
995
996 return offset;
997 }
998
pmic_arb_fmt_cmd_v1(u8 opc,u8 sid,u16 addr,u8 bc)999 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
1000 {
1001 return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
1002 }
1003
pmic_arb_fmt_cmd_v2(u8 opc,u8 sid,u16 addr,u8 bc)1004 static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
1005 {
1006 return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
1007 }
1008
1009 static void __iomem *
pmic_arb_owner_acc_status_v1(struct spmi_pmic_arb * pmic_arb,u8 m,u16 n)1010 pmic_arb_owner_acc_status_v1(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
1011 {
1012 return pmic_arb->intr + 0x20 * m + 0x4 * n;
1013 }
1014
1015 static void __iomem *
pmic_arb_owner_acc_status_v2(struct spmi_pmic_arb * pmic_arb,u8 m,u16 n)1016 pmic_arb_owner_acc_status_v2(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
1017 {
1018 return pmic_arb->intr + 0x100000 + 0x1000 * m + 0x4 * n;
1019 }
1020
1021 static void __iomem *
pmic_arb_owner_acc_status_v3(struct spmi_pmic_arb * pmic_arb,u8 m,u16 n)1022 pmic_arb_owner_acc_status_v3(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
1023 {
1024 return pmic_arb->intr + 0x200000 + 0x1000 * m + 0x4 * n;
1025 }
1026
1027 static void __iomem *
pmic_arb_owner_acc_status_v5(struct spmi_pmic_arb * pmic_arb,u8 m,u16 n)1028 pmic_arb_owner_acc_status_v5(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n)
1029 {
1030 return pmic_arb->intr + 0x10000 * m + 0x4 * n;
1031 }
1032
1033 static void __iomem *
pmic_arb_acc_enable_v1(struct spmi_pmic_arb * pmic_arb,u16 n)1034 pmic_arb_acc_enable_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
1035 {
1036 return pmic_arb->intr + 0x200 + 0x4 * n;
1037 }
1038
1039 static void __iomem *
pmic_arb_acc_enable_v2(struct spmi_pmic_arb * pmic_arb,u16 n)1040 pmic_arb_acc_enable_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
1041 {
1042 return pmic_arb->intr + 0x1000 * n;
1043 }
1044
1045 static void __iomem *
pmic_arb_acc_enable_v5(struct spmi_pmic_arb * pmic_arb,u16 n)1046 pmic_arb_acc_enable_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
1047 {
1048 return pmic_arb->wr_base + 0x100 + 0x10000 * n;
1049 }
1050
1051 static void __iomem *
pmic_arb_irq_status_v1(struct spmi_pmic_arb * pmic_arb,u16 n)1052 pmic_arb_irq_status_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
1053 {
1054 return pmic_arb->intr + 0x600 + 0x4 * n;
1055 }
1056
1057 static void __iomem *
pmic_arb_irq_status_v2(struct spmi_pmic_arb * pmic_arb,u16 n)1058 pmic_arb_irq_status_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
1059 {
1060 return pmic_arb->intr + 0x4 + 0x1000 * n;
1061 }
1062
1063 static void __iomem *
pmic_arb_irq_status_v5(struct spmi_pmic_arb * pmic_arb,u16 n)1064 pmic_arb_irq_status_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
1065 {
1066 return pmic_arb->wr_base + 0x104 + 0x10000 * n;
1067 }
1068
1069 static void __iomem *
pmic_arb_irq_clear_v1(struct spmi_pmic_arb * pmic_arb,u16 n)1070 pmic_arb_irq_clear_v1(struct spmi_pmic_arb *pmic_arb, u16 n)
1071 {
1072 return pmic_arb->intr + 0xA00 + 0x4 * n;
1073 }
1074
1075 static void __iomem *
pmic_arb_irq_clear_v2(struct spmi_pmic_arb * pmic_arb,u16 n)1076 pmic_arb_irq_clear_v2(struct spmi_pmic_arb *pmic_arb, u16 n)
1077 {
1078 return pmic_arb->intr + 0x8 + 0x1000 * n;
1079 }
1080
1081 static void __iomem *
pmic_arb_irq_clear_v5(struct spmi_pmic_arb * pmic_arb,u16 n)1082 pmic_arb_irq_clear_v5(struct spmi_pmic_arb *pmic_arb, u16 n)
1083 {
1084 return pmic_arb->wr_base + 0x108 + 0x10000 * n;
1085 }
1086
pmic_arb_apid_map_offset_v2(u16 n)1087 static u32 pmic_arb_apid_map_offset_v2(u16 n)
1088 {
1089 return 0x800 + 0x4 * n;
1090 }
1091
pmic_arb_apid_map_offset_v5(u16 n)1092 static u32 pmic_arb_apid_map_offset_v5(u16 n)
1093 {
1094 return 0x900 + 0x4 * n;
1095 }
1096
1097 static const struct pmic_arb_ver_ops pmic_arb_v1 = {
1098 .ver_str = "v1",
1099 .ppid_to_apid = pmic_arb_ppid_to_apid_v1,
1100 .non_data_cmd = pmic_arb_non_data_cmd_v1,
1101 .offset = pmic_arb_offset_v1,
1102 .fmt_cmd = pmic_arb_fmt_cmd_v1,
1103 .owner_acc_status = pmic_arb_owner_acc_status_v1,
1104 .acc_enable = pmic_arb_acc_enable_v1,
1105 .irq_status = pmic_arb_irq_status_v1,
1106 .irq_clear = pmic_arb_irq_clear_v1,
1107 .apid_map_offset = pmic_arb_apid_map_offset_v2,
1108 };
1109
1110 static const struct pmic_arb_ver_ops pmic_arb_v2 = {
1111 .ver_str = "v2",
1112 .ppid_to_apid = pmic_arb_ppid_to_apid_v2,
1113 .non_data_cmd = pmic_arb_non_data_cmd_v2,
1114 .offset = pmic_arb_offset_v2,
1115 .fmt_cmd = pmic_arb_fmt_cmd_v2,
1116 .owner_acc_status = pmic_arb_owner_acc_status_v2,
1117 .acc_enable = pmic_arb_acc_enable_v2,
1118 .irq_status = pmic_arb_irq_status_v2,
1119 .irq_clear = pmic_arb_irq_clear_v2,
1120 .apid_map_offset = pmic_arb_apid_map_offset_v2,
1121 };
1122
1123 static const struct pmic_arb_ver_ops pmic_arb_v3 = {
1124 .ver_str = "v3",
1125 .ppid_to_apid = pmic_arb_ppid_to_apid_v2,
1126 .non_data_cmd = pmic_arb_non_data_cmd_v2,
1127 .offset = pmic_arb_offset_v2,
1128 .fmt_cmd = pmic_arb_fmt_cmd_v2,
1129 .owner_acc_status = pmic_arb_owner_acc_status_v3,
1130 .acc_enable = pmic_arb_acc_enable_v2,
1131 .irq_status = pmic_arb_irq_status_v2,
1132 .irq_clear = pmic_arb_irq_clear_v2,
1133 .apid_map_offset = pmic_arb_apid_map_offset_v2,
1134 };
1135
1136 static const struct pmic_arb_ver_ops pmic_arb_v5 = {
1137 .ver_str = "v5",
1138 .ppid_to_apid = pmic_arb_ppid_to_apid_v5,
1139 .non_data_cmd = pmic_arb_non_data_cmd_v2,
1140 .offset = pmic_arb_offset_v5,
1141 .fmt_cmd = pmic_arb_fmt_cmd_v2,
1142 .owner_acc_status = pmic_arb_owner_acc_status_v5,
1143 .acc_enable = pmic_arb_acc_enable_v5,
1144 .irq_status = pmic_arb_irq_status_v5,
1145 .irq_clear = pmic_arb_irq_clear_v5,
1146 .apid_map_offset = pmic_arb_apid_map_offset_v5,
1147 };
1148
1149 static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
1150 .activate = qpnpint_irq_domain_activate,
1151 .alloc = qpnpint_irq_domain_alloc,
1152 .free = irq_domain_free_irqs_common,
1153 .translate = qpnpint_irq_domain_translate,
1154 };
1155
spmi_pmic_arb_probe(struct platform_device * pdev)1156 static int spmi_pmic_arb_probe(struct platform_device *pdev)
1157 {
1158 struct spmi_pmic_arb *pmic_arb;
1159 struct spmi_controller *ctrl;
1160 struct resource *res;
1161 void __iomem *core;
1162 u32 *mapping_table;
1163 u32 channel, ee, hw_ver;
1164 int err;
1165
1166 ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb));
1167 if (!ctrl)
1168 return -ENOMEM;
1169
1170 pmic_arb = spmi_controller_get_drvdata(ctrl);
1171 pmic_arb->spmic = ctrl;
1172
1173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
1174 core = devm_ioremap_resource(&ctrl->dev, res);
1175 if (IS_ERR(core)) {
1176 err = PTR_ERR(core);
1177 goto err_put_ctrl;
1178 }
1179
1180 pmic_arb->core_size = resource_size(res);
1181
1182 pmic_arb->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
1183 sizeof(*pmic_arb->ppid_to_apid),
1184 GFP_KERNEL);
1185 if (!pmic_arb->ppid_to_apid) {
1186 err = -ENOMEM;
1187 goto err_put_ctrl;
1188 }
1189
1190 hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
1191
1192 if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
1193 pmic_arb->ver_ops = &pmic_arb_v1;
1194 pmic_arb->wr_base = core;
1195 pmic_arb->rd_base = core;
1196 } else {
1197 pmic_arb->core = core;
1198
1199 if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
1200 pmic_arb->ver_ops = &pmic_arb_v2;
1201 else if (hw_ver < PMIC_ARB_VERSION_V5_MIN)
1202 pmic_arb->ver_ops = &pmic_arb_v3;
1203 else
1204 pmic_arb->ver_ops = &pmic_arb_v5;
1205
1206 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1207 "obsrvr");
1208 pmic_arb->rd_base = devm_ioremap_resource(&ctrl->dev, res);
1209 if (IS_ERR(pmic_arb->rd_base)) {
1210 err = PTR_ERR(pmic_arb->rd_base);
1211 goto err_put_ctrl;
1212 }
1213
1214 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1215 "chnls");
1216 pmic_arb->wr_base = devm_ioremap_resource(&ctrl->dev, res);
1217 if (IS_ERR(pmic_arb->wr_base)) {
1218 err = PTR_ERR(pmic_arb->wr_base);
1219 goto err_put_ctrl;
1220 }
1221 }
1222
1223 dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
1224 pmic_arb->ver_ops->ver_str, hw_ver);
1225
1226 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
1227 pmic_arb->intr = devm_ioremap_resource(&ctrl->dev, res);
1228 if (IS_ERR(pmic_arb->intr)) {
1229 err = PTR_ERR(pmic_arb->intr);
1230 goto err_put_ctrl;
1231 }
1232
1233 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
1234 pmic_arb->cnfg = devm_ioremap_resource(&ctrl->dev, res);
1235 if (IS_ERR(pmic_arb->cnfg)) {
1236 err = PTR_ERR(pmic_arb->cnfg);
1237 goto err_put_ctrl;
1238 }
1239
1240 pmic_arb->irq = platform_get_irq_byname(pdev, "periph_irq");
1241 if (pmic_arb->irq < 0) {
1242 err = pmic_arb->irq;
1243 goto err_put_ctrl;
1244 }
1245
1246 err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
1247 if (err) {
1248 dev_err(&pdev->dev, "channel unspecified.\n");
1249 goto err_put_ctrl;
1250 }
1251
1252 if (channel > 5) {
1253 dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
1254 channel);
1255 err = -EINVAL;
1256 goto err_put_ctrl;
1257 }
1258
1259 pmic_arb->channel = channel;
1260
1261 err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
1262 if (err) {
1263 dev_err(&pdev->dev, "EE unspecified.\n");
1264 goto err_put_ctrl;
1265 }
1266
1267 if (ee > 5) {
1268 dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
1269 err = -EINVAL;
1270 goto err_put_ctrl;
1271 }
1272
1273 pmic_arb->ee = ee;
1274 mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
1275 sizeof(*mapping_table), GFP_KERNEL);
1276 if (!mapping_table) {
1277 err = -ENOMEM;
1278 goto err_put_ctrl;
1279 }
1280
1281 pmic_arb->mapping_table = mapping_table;
1282 /* Initialize max_apid/min_apid to the opposite bounds, during
1283 * the irq domain translation, we are sure to update these */
1284 pmic_arb->max_apid = 0;
1285 pmic_arb->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
1286
1287 platform_set_drvdata(pdev, ctrl);
1288 raw_spin_lock_init(&pmic_arb->lock);
1289
1290 ctrl->cmd = pmic_arb_cmd;
1291 ctrl->read_cmd = pmic_arb_read_cmd;
1292 ctrl->write_cmd = pmic_arb_write_cmd;
1293
1294 if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
1295 err = pmic_arb_read_apid_map_v5(pmic_arb);
1296 if (err) {
1297 dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n",
1298 err);
1299 goto err_put_ctrl;
1300 }
1301 }
1302
1303 dev_dbg(&pdev->dev, "adding irq domain\n");
1304 pmic_arb->domain = irq_domain_add_tree(pdev->dev.of_node,
1305 &pmic_arb_irq_domain_ops, pmic_arb);
1306 if (!pmic_arb->domain) {
1307 dev_err(&pdev->dev, "unable to create irq_domain\n");
1308 err = -ENOMEM;
1309 goto err_put_ctrl;
1310 }
1311
1312 irq_set_chained_handler_and_data(pmic_arb->irq, pmic_arb_chained_irq,
1313 pmic_arb);
1314 err = spmi_controller_add(ctrl);
1315 if (err)
1316 goto err_domain_remove;
1317
1318 return 0;
1319
1320 err_domain_remove:
1321 irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
1322 irq_domain_remove(pmic_arb->domain);
1323 err_put_ctrl:
1324 spmi_controller_put(ctrl);
1325 return err;
1326 }
1327
spmi_pmic_arb_remove(struct platform_device * pdev)1328 static int spmi_pmic_arb_remove(struct platform_device *pdev)
1329 {
1330 struct spmi_controller *ctrl = platform_get_drvdata(pdev);
1331 struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
1332 spmi_controller_remove(ctrl);
1333 irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
1334 irq_domain_remove(pmic_arb->domain);
1335 spmi_controller_put(ctrl);
1336 return 0;
1337 }
1338
1339 static const struct of_device_id spmi_pmic_arb_match_table[] = {
1340 { .compatible = "qcom,spmi-pmic-arb", },
1341 {},
1342 };
1343 MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table);
1344
1345 static struct platform_driver spmi_pmic_arb_driver = {
1346 .probe = spmi_pmic_arb_probe,
1347 .remove = spmi_pmic_arb_remove,
1348 .driver = {
1349 .name = "spmi_pmic_arb",
1350 .of_match_table = spmi_pmic_arb_match_table,
1351 },
1352 };
1353 module_platform_driver(spmi_pmic_arb_driver);
1354
1355 MODULE_LICENSE("GPL v2");
1356 MODULE_ALIAS("platform:spmi_pmic_arb");
1357