1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/idr.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/mhi.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/wait.h>
21 #include "internal.h"
22
23 static DEFINE_IDA(mhi_controller_ida);
24
25 const char * const mhi_ee_str[MHI_EE_MAX] = {
26 [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
27 [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
28 [MHI_EE_AMSS] = "MISSION MODE",
29 [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
30 [MHI_EE_WFW] = "WLAN FIRMWARE",
31 [MHI_EE_PTHRU] = "PASS THROUGH",
32 [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
33 [MHI_EE_FP] = "FLASH PROGRAMMER",
34 [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
35 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
36 };
37
38 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
39 [DEV_ST_TRANSITION_PBL] = "PBL",
40 [DEV_ST_TRANSITION_READY] = "READY",
41 [DEV_ST_TRANSITION_SBL] = "SBL",
42 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
43 [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
44 [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
45 [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
46 };
47
48 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
49 [MHI_CH_STATE_TYPE_RESET] = "RESET",
50 [MHI_CH_STATE_TYPE_STOP] = "STOP",
51 [MHI_CH_STATE_TYPE_START] = "START",
52 };
53
54 static const char * const mhi_pm_state_str[] = {
55 [MHI_PM_STATE_DISABLE] = "DISABLE",
56 [MHI_PM_STATE_POR] = "POWER ON RESET",
57 [MHI_PM_STATE_M0] = "M0",
58 [MHI_PM_STATE_M2] = "M2",
59 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
60 [MHI_PM_STATE_M3] = "M3",
61 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
62 [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
63 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
64 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
65 [MHI_PM_STATE_SYS_ERR_FAIL] = "SYS ERROR Failure",
66 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
67 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
68 };
69
to_mhi_pm_state_str(u32 state)70 const char *to_mhi_pm_state_str(u32 state)
71 {
72 int index;
73
74 if (state)
75 index = __fls(state);
76
77 if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
78 return "Invalid State";
79
80 return mhi_pm_state_str[index];
81 }
82
serial_number_show(struct device * dev,struct device_attribute * attr,char * buf)83 static ssize_t serial_number_show(struct device *dev,
84 struct device_attribute *attr,
85 char *buf)
86 {
87 struct mhi_device *mhi_dev = to_mhi_device(dev);
88 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
89
90 return sysfs_emit(buf, "Serial Number: %u\n",
91 mhi_cntrl->serial_number);
92 }
93 static DEVICE_ATTR_RO(serial_number);
94
oem_pk_hash_show(struct device * dev,struct device_attribute * attr,char * buf)95 static ssize_t oem_pk_hash_show(struct device *dev,
96 struct device_attribute *attr,
97 char *buf)
98 {
99 struct mhi_device *mhi_dev = to_mhi_device(dev);
100 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
101 int i, cnt = 0;
102
103 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
104 cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
105 i, mhi_cntrl->oem_pk_hash[i]);
106
107 return cnt;
108 }
109 static DEVICE_ATTR_RO(oem_pk_hash);
110
soc_reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)111 static ssize_t soc_reset_store(struct device *dev,
112 struct device_attribute *attr,
113 const char *buf,
114 size_t count)
115 {
116 struct mhi_device *mhi_dev = to_mhi_device(dev);
117 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
118
119 mhi_soc_reset(mhi_cntrl);
120 return count;
121 }
122 static DEVICE_ATTR_WO(soc_reset);
123
124 static struct attribute *mhi_dev_attrs[] = {
125 &dev_attr_serial_number.attr,
126 &dev_attr_oem_pk_hash.attr,
127 &dev_attr_soc_reset.attr,
128 NULL,
129 };
130 ATTRIBUTE_GROUPS(mhi_dev);
131
132 /* MHI protocol requires the transfer ring to be aligned with ring length */
mhi_alloc_aligned_ring(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring,u64 len)133 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
134 struct mhi_ring *ring,
135 u64 len)
136 {
137 ring->alloc_size = len + (len - 1);
138 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
139 &ring->dma_handle, GFP_KERNEL);
140 if (!ring->pre_aligned)
141 return -ENOMEM;
142
143 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
144 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
145
146 return 0;
147 }
148
mhi_deinit_free_irq(struct mhi_controller * mhi_cntrl)149 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
150 {
151 int i;
152 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
153
154 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
155 if (mhi_event->offload_ev)
156 continue;
157
158 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
159 }
160
161 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
162 }
163
mhi_init_irq_setup(struct mhi_controller * mhi_cntrl)164 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
165 {
166 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
167 struct device *dev = &mhi_cntrl->mhi_dev->dev;
168 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
169 int i, ret;
170
171 /* if controller driver has set irq_flags, use it */
172 if (mhi_cntrl->irq_flags)
173 irq_flags = mhi_cntrl->irq_flags;
174
175 /* Setup BHI_INTVEC IRQ */
176 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
177 mhi_intvec_threaded_handler,
178 irq_flags,
179 "bhi", mhi_cntrl);
180 if (ret)
181 return ret;
182 /*
183 * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
184 * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
185 * IRQ_NOAUTOEN is not applicable.
186 */
187 disable_irq(mhi_cntrl->irq[0]);
188
189 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
190 if (mhi_event->offload_ev)
191 continue;
192
193 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
194 dev_err(dev, "irq %d not available for event ring\n",
195 mhi_event->irq);
196 ret = -EINVAL;
197 goto error_request;
198 }
199
200 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
201 mhi_irq_handler,
202 irq_flags,
203 "mhi", mhi_event);
204 if (ret) {
205 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
206 mhi_cntrl->irq[mhi_event->irq], i);
207 goto error_request;
208 }
209
210 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
211 }
212
213 return 0;
214
215 error_request:
216 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
217 if (mhi_event->offload_ev)
218 continue;
219
220 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
221 }
222 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
223
224 return ret;
225 }
226
mhi_deinit_dev_ctxt(struct mhi_controller * mhi_cntrl)227 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
228 {
229 int i;
230 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
231 struct mhi_cmd *mhi_cmd;
232 struct mhi_event *mhi_event;
233 struct mhi_ring *ring;
234
235 mhi_cmd = mhi_cntrl->mhi_cmd;
236 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
237 ring = &mhi_cmd->ring;
238 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
239 ring->pre_aligned, ring->dma_handle);
240 ring->base = NULL;
241 ring->iommu_base = 0;
242 }
243
244 dma_free_coherent(mhi_cntrl->cntrl_dev,
245 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
246 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
247
248 mhi_event = mhi_cntrl->mhi_event;
249 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
250 if (mhi_event->offload_ev)
251 continue;
252
253 ring = &mhi_event->ring;
254 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
255 ring->pre_aligned, ring->dma_handle);
256 ring->base = NULL;
257 ring->iommu_base = 0;
258 }
259
260 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
261 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
262 mhi_ctxt->er_ctxt_addr);
263
264 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
265 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
266 mhi_ctxt->chan_ctxt_addr);
267
268 kfree(mhi_ctxt);
269 mhi_cntrl->mhi_ctxt = NULL;
270 }
271
mhi_init_dev_ctxt(struct mhi_controller * mhi_cntrl)272 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
273 {
274 struct mhi_ctxt *mhi_ctxt;
275 struct mhi_chan_ctxt *chan_ctxt;
276 struct mhi_event_ctxt *er_ctxt;
277 struct mhi_cmd_ctxt *cmd_ctxt;
278 struct mhi_chan *mhi_chan;
279 struct mhi_event *mhi_event;
280 struct mhi_cmd *mhi_cmd;
281 u32 tmp;
282 int ret = -ENOMEM, i;
283
284 atomic_set(&mhi_cntrl->dev_wake, 0);
285 atomic_set(&mhi_cntrl->pending_pkts, 0);
286
287 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
288 if (!mhi_ctxt)
289 return -ENOMEM;
290
291 /* Setup channel ctxt */
292 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
293 sizeof(*mhi_ctxt->chan_ctxt) *
294 mhi_cntrl->max_chan,
295 &mhi_ctxt->chan_ctxt_addr,
296 GFP_KERNEL);
297 if (!mhi_ctxt->chan_ctxt)
298 goto error_alloc_chan_ctxt;
299
300 mhi_chan = mhi_cntrl->mhi_chan;
301 chan_ctxt = mhi_ctxt->chan_ctxt;
302 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
303 /* Skip if it is an offload channel */
304 if (mhi_chan->offload_ch)
305 continue;
306
307 tmp = le32_to_cpu(chan_ctxt->chcfg);
308 tmp &= ~CHAN_CTX_CHSTATE_MASK;
309 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
310 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
311 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
312 tmp &= ~CHAN_CTX_POLLCFG_MASK;
313 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
314 chan_ctxt->chcfg = cpu_to_le32(tmp);
315
316 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
317 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
318
319 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
320 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
321 }
322
323 /* Setup event context */
324 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
325 sizeof(*mhi_ctxt->er_ctxt) *
326 mhi_cntrl->total_ev_rings,
327 &mhi_ctxt->er_ctxt_addr,
328 GFP_KERNEL);
329 if (!mhi_ctxt->er_ctxt)
330 goto error_alloc_er_ctxt;
331
332 er_ctxt = mhi_ctxt->er_ctxt;
333 mhi_event = mhi_cntrl->mhi_event;
334 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
335 mhi_event++) {
336 struct mhi_ring *ring = &mhi_event->ring;
337
338 /* Skip if it is an offload event */
339 if (mhi_event->offload_ev)
340 continue;
341
342 tmp = le32_to_cpu(er_ctxt->intmod);
343 tmp &= ~EV_CTX_INTMODC_MASK;
344 tmp &= ~EV_CTX_INTMODT_MASK;
345 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
346 er_ctxt->intmod = cpu_to_le32(tmp);
347
348 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
349 er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
350 mhi_event->db_cfg.db_mode = true;
351
352 ring->el_size = sizeof(struct mhi_ring_element);
353 ring->len = ring->el_size * ring->elements;
354 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
355 if (ret)
356 goto error_alloc_er;
357
358 /*
359 * If the read pointer equals to the write pointer, then the
360 * ring is empty
361 */
362 ring->rp = ring->wp = ring->base;
363 er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
364 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
365 er_ctxt->rlen = cpu_to_le64(ring->len);
366 ring->ctxt_wp = &er_ctxt->wp;
367 }
368
369 /* Setup cmd context */
370 ret = -ENOMEM;
371 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
372 sizeof(*mhi_ctxt->cmd_ctxt) *
373 NR_OF_CMD_RINGS,
374 &mhi_ctxt->cmd_ctxt_addr,
375 GFP_KERNEL);
376 if (!mhi_ctxt->cmd_ctxt)
377 goto error_alloc_er;
378
379 mhi_cmd = mhi_cntrl->mhi_cmd;
380 cmd_ctxt = mhi_ctxt->cmd_ctxt;
381 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
382 struct mhi_ring *ring = &mhi_cmd->ring;
383
384 ring->el_size = sizeof(struct mhi_ring_element);
385 ring->elements = CMD_EL_PER_RING;
386 ring->len = ring->el_size * ring->elements;
387 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
388 if (ret)
389 goto error_alloc_cmd;
390
391 ring->rp = ring->wp = ring->base;
392 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
393 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
394 cmd_ctxt->rlen = cpu_to_le64(ring->len);
395 ring->ctxt_wp = &cmd_ctxt->wp;
396 }
397
398 mhi_cntrl->mhi_ctxt = mhi_ctxt;
399
400 return 0;
401
402 error_alloc_cmd:
403 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
404 struct mhi_ring *ring = &mhi_cmd->ring;
405
406 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
407 ring->pre_aligned, ring->dma_handle);
408 }
409 dma_free_coherent(mhi_cntrl->cntrl_dev,
410 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
411 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
412 i = mhi_cntrl->total_ev_rings;
413 mhi_event = mhi_cntrl->mhi_event + i;
414
415 error_alloc_er:
416 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
417 struct mhi_ring *ring = &mhi_event->ring;
418
419 if (mhi_event->offload_ev)
420 continue;
421
422 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
423 ring->pre_aligned, ring->dma_handle);
424 }
425 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
426 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
427 mhi_ctxt->er_ctxt_addr);
428
429 error_alloc_er_ctxt:
430 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
431 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
432 mhi_ctxt->chan_ctxt_addr);
433
434 error_alloc_chan_ctxt:
435 kfree(mhi_ctxt);
436
437 return ret;
438 }
439
mhi_init_mmio(struct mhi_controller * mhi_cntrl)440 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
441 {
442 u32 val;
443 int i, ret;
444 struct mhi_chan *mhi_chan;
445 struct mhi_event *mhi_event;
446 void __iomem *base = mhi_cntrl->regs;
447 struct device *dev = &mhi_cntrl->mhi_dev->dev;
448 struct {
449 u32 offset;
450 u32 val;
451 } reg_info[] = {
452 {
453 CCABAP_HIGHER,
454 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
455 },
456 {
457 CCABAP_LOWER,
458 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
459 },
460 {
461 ECABAP_HIGHER,
462 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
463 },
464 {
465 ECABAP_LOWER,
466 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
467 },
468 {
469 CRCBAP_HIGHER,
470 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
471 },
472 {
473 CRCBAP_LOWER,
474 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
475 },
476 {
477 MHICTRLBASE_HIGHER,
478 upper_32_bits(mhi_cntrl->iova_start),
479 },
480 {
481 MHICTRLBASE_LOWER,
482 lower_32_bits(mhi_cntrl->iova_start),
483 },
484 {
485 MHIDATABASE_HIGHER,
486 upper_32_bits(mhi_cntrl->iova_start),
487 },
488 {
489 MHIDATABASE_LOWER,
490 lower_32_bits(mhi_cntrl->iova_start),
491 },
492 {
493 MHICTRLLIMIT_HIGHER,
494 upper_32_bits(mhi_cntrl->iova_stop),
495 },
496 {
497 MHICTRLLIMIT_LOWER,
498 lower_32_bits(mhi_cntrl->iova_stop),
499 },
500 {
501 MHIDATALIMIT_HIGHER,
502 upper_32_bits(mhi_cntrl->iova_stop),
503 },
504 {
505 MHIDATALIMIT_LOWER,
506 lower_32_bits(mhi_cntrl->iova_stop),
507 },
508 {0, 0}
509 };
510
511 dev_dbg(dev, "Initializing MHI registers\n");
512
513 /* Read channel db offset */
514 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
515 if (ret) {
516 dev_err(dev, "Unable to read CHDBOFF register\n");
517 return -EIO;
518 }
519
520 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
521 dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
522 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
523 return -ERANGE;
524 }
525
526 /* Setup wake db */
527 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
528 mhi_cntrl->wake_set = false;
529
530 /* Setup channel db address for each channel in tre_ring */
531 mhi_chan = mhi_cntrl->mhi_chan;
532 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
533 mhi_chan->tre_ring.db_addr = base + val;
534
535 /* Read event ring db offset */
536 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
537 if (ret) {
538 dev_err(dev, "Unable to read ERDBOFF register\n");
539 return -EIO;
540 }
541
542 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
543 dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
544 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
545 return -ERANGE;
546 }
547
548 /* Setup event db address for each ev_ring */
549 mhi_event = mhi_cntrl->mhi_event;
550 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
551 if (mhi_event->offload_ev)
552 continue;
553
554 mhi_event->ring.db_addr = base + val;
555 }
556
557 /* Setup DB register for primary CMD rings */
558 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
559
560 /* Write to MMIO registers */
561 for (i = 0; reg_info[i].offset; i++)
562 mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
563 reg_info[i].val);
564
565 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
566 mhi_cntrl->total_ev_rings);
567 if (ret) {
568 dev_err(dev, "Unable to write MHICFG register\n");
569 return ret;
570 }
571
572 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
573 mhi_cntrl->hw_ev_rings);
574 if (ret) {
575 dev_err(dev, "Unable to write MHICFG register\n");
576 return ret;
577 }
578
579 return 0;
580 }
581
mhi_deinit_chan_ctxt(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)582 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
583 struct mhi_chan *mhi_chan)
584 {
585 struct mhi_ring *buf_ring;
586 struct mhi_ring *tre_ring;
587 struct mhi_chan_ctxt *chan_ctxt;
588 u32 tmp;
589
590 buf_ring = &mhi_chan->buf_ring;
591 tre_ring = &mhi_chan->tre_ring;
592 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
593
594 if (!chan_ctxt->rbase) /* Already uninitialized */
595 return;
596
597 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
598 tre_ring->pre_aligned, tre_ring->dma_handle);
599 vfree(buf_ring->base);
600
601 buf_ring->base = tre_ring->base = NULL;
602 tre_ring->ctxt_wp = NULL;
603 chan_ctxt->rbase = 0;
604 chan_ctxt->rlen = 0;
605 chan_ctxt->rp = 0;
606 chan_ctxt->wp = 0;
607
608 tmp = le32_to_cpu(chan_ctxt->chcfg);
609 tmp &= ~CHAN_CTX_CHSTATE_MASK;
610 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
611 chan_ctxt->chcfg = cpu_to_le32(tmp);
612
613 /* Update to all cores */
614 smp_wmb();
615 }
616
mhi_init_chan_ctxt(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)617 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
618 struct mhi_chan *mhi_chan)
619 {
620 struct mhi_ring *buf_ring;
621 struct mhi_ring *tre_ring;
622 struct mhi_chan_ctxt *chan_ctxt;
623 u32 tmp;
624 int ret;
625
626 buf_ring = &mhi_chan->buf_ring;
627 tre_ring = &mhi_chan->tre_ring;
628 tre_ring->el_size = sizeof(struct mhi_ring_element);
629 tre_ring->len = tre_ring->el_size * tre_ring->elements;
630 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
631 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
632 if (ret)
633 return -ENOMEM;
634
635 buf_ring->el_size = sizeof(struct mhi_buf_info);
636 buf_ring->len = buf_ring->el_size * buf_ring->elements;
637 buf_ring->base = vzalloc(buf_ring->len);
638
639 if (!buf_ring->base) {
640 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
641 tre_ring->pre_aligned, tre_ring->dma_handle);
642 return -ENOMEM;
643 }
644
645 tmp = le32_to_cpu(chan_ctxt->chcfg);
646 tmp &= ~CHAN_CTX_CHSTATE_MASK;
647 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
648 chan_ctxt->chcfg = cpu_to_le32(tmp);
649
650 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
651 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
652 chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
653 tre_ring->ctxt_wp = &chan_ctxt->wp;
654
655 tre_ring->rp = tre_ring->wp = tre_ring->base;
656 buf_ring->rp = buf_ring->wp = buf_ring->base;
657 mhi_chan->db_cfg.db_mode = 1;
658
659 /* Update to all cores */
660 smp_wmb();
661
662 return 0;
663 }
664
parse_ev_cfg(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)665 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
666 const struct mhi_controller_config *config)
667 {
668 struct mhi_event *mhi_event;
669 const struct mhi_event_config *event_cfg;
670 struct device *dev = mhi_cntrl->cntrl_dev;
671 int i, num;
672
673 num = config->num_events;
674 mhi_cntrl->total_ev_rings = num;
675 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
676 GFP_KERNEL);
677 if (!mhi_cntrl->mhi_event)
678 return -ENOMEM;
679
680 /* Populate event ring */
681 mhi_event = mhi_cntrl->mhi_event;
682 for (i = 0; i < num; i++) {
683 event_cfg = &config->event_cfg[i];
684
685 mhi_event->er_index = i;
686 mhi_event->ring.elements = event_cfg->num_elements;
687 mhi_event->intmod = event_cfg->irq_moderation_ms;
688 mhi_event->irq = event_cfg->irq;
689
690 if (event_cfg->channel != U32_MAX) {
691 /* This event ring has a dedicated channel */
692 mhi_event->chan = event_cfg->channel;
693 if (mhi_event->chan >= mhi_cntrl->max_chan) {
694 dev_err(dev,
695 "Event Ring channel not available\n");
696 goto error_ev_cfg;
697 }
698
699 mhi_event->mhi_chan =
700 &mhi_cntrl->mhi_chan[mhi_event->chan];
701 }
702
703 /* Priority is fixed to 1 for now */
704 mhi_event->priority = 1;
705
706 mhi_event->db_cfg.brstmode = event_cfg->mode;
707 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
708 goto error_ev_cfg;
709
710 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
711 mhi_event->db_cfg.process_db = mhi_db_brstmode;
712 else
713 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
714
715 mhi_event->data_type = event_cfg->data_type;
716
717 switch (mhi_event->data_type) {
718 case MHI_ER_DATA:
719 mhi_event->process_event = mhi_process_data_event_ring;
720 break;
721 case MHI_ER_CTRL:
722 mhi_event->process_event = mhi_process_ctrl_ev_ring;
723 break;
724 default:
725 dev_err(dev, "Event Ring type not supported\n");
726 goto error_ev_cfg;
727 }
728
729 mhi_event->hw_ring = event_cfg->hardware_event;
730 if (mhi_event->hw_ring)
731 mhi_cntrl->hw_ev_rings++;
732 else
733 mhi_cntrl->sw_ev_rings++;
734
735 mhi_event->cl_manage = event_cfg->client_managed;
736 mhi_event->offload_ev = event_cfg->offload_channel;
737 mhi_event++;
738 }
739
740 return 0;
741
742 error_ev_cfg:
743
744 kfree(mhi_cntrl->mhi_event);
745 return -EINVAL;
746 }
747
parse_ch_cfg(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)748 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
749 const struct mhi_controller_config *config)
750 {
751 const struct mhi_channel_config *ch_cfg;
752 struct device *dev = mhi_cntrl->cntrl_dev;
753 int i;
754 u32 chan;
755
756 mhi_cntrl->max_chan = config->max_channels;
757
758 /*
759 * The allocation of MHI channels can exceed 32KB in some scenarios,
760 * so to avoid any memory possible allocation failures, vzalloc is
761 * used here
762 */
763 mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
764 sizeof(*mhi_cntrl->mhi_chan));
765 if (!mhi_cntrl->mhi_chan)
766 return -ENOMEM;
767
768 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
769
770 /* Populate channel configurations */
771 for (i = 0; i < config->num_channels; i++) {
772 struct mhi_chan *mhi_chan;
773
774 ch_cfg = &config->ch_cfg[i];
775
776 chan = ch_cfg->num;
777 if (chan >= mhi_cntrl->max_chan) {
778 dev_err(dev, "Channel %d not available\n", chan);
779 goto error_chan_cfg;
780 }
781
782 mhi_chan = &mhi_cntrl->mhi_chan[chan];
783 mhi_chan->name = ch_cfg->name;
784 mhi_chan->chan = chan;
785
786 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
787 if (!mhi_chan->tre_ring.elements)
788 goto error_chan_cfg;
789
790 /*
791 * For some channels, local ring length should be bigger than
792 * the transfer ring length due to internal logical channels
793 * in device. So host can queue much more buffers than transfer
794 * ring length. Example, RSC channels should have a larger local
795 * channel length than transfer ring length.
796 */
797 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
798 if (!mhi_chan->buf_ring.elements)
799 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
800 mhi_chan->er_index = ch_cfg->event_ring;
801 mhi_chan->dir = ch_cfg->dir;
802
803 /*
804 * For most channels, chtype is identical to channel directions.
805 * So, if it is not defined then assign channel direction to
806 * chtype
807 */
808 mhi_chan->type = ch_cfg->type;
809 if (!mhi_chan->type)
810 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
811
812 mhi_chan->ee_mask = ch_cfg->ee_mask;
813 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
814 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
815 mhi_chan->offload_ch = ch_cfg->offload_channel;
816 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
817 mhi_chan->pre_alloc = ch_cfg->auto_queue;
818 mhi_chan->wake_capable = ch_cfg->wake_capable;
819
820 /*
821 * If MHI host allocates buffers, then the channel direction
822 * should be DMA_FROM_DEVICE
823 */
824 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
825 dev_err(dev, "Invalid channel configuration\n");
826 goto error_chan_cfg;
827 }
828
829 /*
830 * Bi-directional and direction less channel must be an
831 * offload channel
832 */
833 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
834 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
835 dev_err(dev, "Invalid channel configuration\n");
836 goto error_chan_cfg;
837 }
838
839 if (!mhi_chan->offload_ch) {
840 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
841 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
842 dev_err(dev, "Invalid Door bell mode\n");
843 goto error_chan_cfg;
844 }
845 }
846
847 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
848 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
849 else
850 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
851
852 mhi_chan->configured = true;
853
854 if (mhi_chan->lpm_notify)
855 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
856 }
857
858 return 0;
859
860 error_chan_cfg:
861 vfree(mhi_cntrl->mhi_chan);
862
863 return -EINVAL;
864 }
865
parse_config(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)866 static int parse_config(struct mhi_controller *mhi_cntrl,
867 const struct mhi_controller_config *config)
868 {
869 int ret;
870
871 /* Parse MHI channel configuration */
872 ret = parse_ch_cfg(mhi_cntrl, config);
873 if (ret)
874 return ret;
875
876 /* Parse MHI event configuration */
877 ret = parse_ev_cfg(mhi_cntrl, config);
878 if (ret)
879 goto error_ev_cfg;
880
881 mhi_cntrl->timeout_ms = config->timeout_ms;
882 if (!mhi_cntrl->timeout_ms)
883 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
884
885 mhi_cntrl->bounce_buf = config->use_bounce_buf;
886 mhi_cntrl->buffer_len = config->buf_len;
887 if (!mhi_cntrl->buffer_len)
888 mhi_cntrl->buffer_len = MHI_MAX_MTU;
889
890 /* By default, host is allowed to ring DB in both M0 and M2 states */
891 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
892 if (config->m2_no_db)
893 mhi_cntrl->db_access &= ~MHI_PM_M2;
894
895 return 0;
896
897 error_ev_cfg:
898 vfree(mhi_cntrl->mhi_chan);
899
900 return ret;
901 }
902
mhi_register_controller(struct mhi_controller * mhi_cntrl,const struct mhi_controller_config * config)903 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
904 const struct mhi_controller_config *config)
905 {
906 struct mhi_event *mhi_event;
907 struct mhi_chan *mhi_chan;
908 struct mhi_cmd *mhi_cmd;
909 struct mhi_device *mhi_dev;
910 u32 soc_info;
911 int ret, i;
912
913 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
914 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
915 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
916 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
917 !mhi_cntrl->irq || !mhi_cntrl->reg_len)
918 return -EINVAL;
919
920 ret = parse_config(mhi_cntrl, config);
921 if (ret)
922 return -EINVAL;
923
924 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
925 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
926 if (!mhi_cntrl->mhi_cmd) {
927 ret = -ENOMEM;
928 goto err_free_event;
929 }
930
931 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
932 mutex_init(&mhi_cntrl->pm_mutex);
933 rwlock_init(&mhi_cntrl->pm_lock);
934 spin_lock_init(&mhi_cntrl->transition_lock);
935 spin_lock_init(&mhi_cntrl->wlock);
936 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
937 init_waitqueue_head(&mhi_cntrl->state_event);
938
939 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
940 if (!mhi_cntrl->hiprio_wq) {
941 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
942 ret = -ENOMEM;
943 goto err_free_cmd;
944 }
945
946 mhi_cmd = mhi_cntrl->mhi_cmd;
947 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
948 spin_lock_init(&mhi_cmd->lock);
949
950 mhi_event = mhi_cntrl->mhi_event;
951 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
952 /* Skip for offload events */
953 if (mhi_event->offload_ev)
954 continue;
955
956 mhi_event->mhi_cntrl = mhi_cntrl;
957 spin_lock_init(&mhi_event->lock);
958 if (mhi_event->data_type == MHI_ER_CTRL)
959 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
960 (ulong)mhi_event);
961 else
962 tasklet_init(&mhi_event->task, mhi_ev_task,
963 (ulong)mhi_event);
964 }
965
966 mhi_chan = mhi_cntrl->mhi_chan;
967 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
968 mutex_init(&mhi_chan->mutex);
969 init_completion(&mhi_chan->completion);
970 rwlock_init(&mhi_chan->lock);
971
972 /* used in setting bei field of TRE */
973 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
974 mhi_chan->intmod = mhi_event->intmod;
975 }
976
977 if (mhi_cntrl->bounce_buf) {
978 mhi_cntrl->map_single = mhi_map_single_use_bb;
979 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
980 } else {
981 mhi_cntrl->map_single = mhi_map_single_no_bb;
982 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
983 }
984
985 /* Read the MHI device info */
986 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
987 SOC_HW_VERSION_OFFS, &soc_info);
988 if (ret)
989 goto err_destroy_wq;
990
991 mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
992 mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
993 mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
994 mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
995
996 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
997 if (mhi_cntrl->index < 0) {
998 ret = mhi_cntrl->index;
999 goto err_destroy_wq;
1000 }
1001
1002 ret = mhi_init_irq_setup(mhi_cntrl);
1003 if (ret)
1004 goto err_ida_free;
1005
1006 /* Register controller with MHI bus */
1007 mhi_dev = mhi_alloc_device(mhi_cntrl);
1008 if (IS_ERR(mhi_dev)) {
1009 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
1010 ret = PTR_ERR(mhi_dev);
1011 goto error_setup_irq;
1012 }
1013
1014 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
1015 mhi_dev->mhi_cntrl = mhi_cntrl;
1016 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
1017 mhi_dev->name = dev_name(&mhi_dev->dev);
1018
1019 /* Init wakeup source */
1020 device_init_wakeup(&mhi_dev->dev, true);
1021
1022 ret = device_add(&mhi_dev->dev);
1023 if (ret)
1024 goto err_release_dev;
1025
1026 mhi_cntrl->mhi_dev = mhi_dev;
1027
1028 mhi_create_debugfs(mhi_cntrl);
1029
1030 return 0;
1031
1032 err_release_dev:
1033 put_device(&mhi_dev->dev);
1034 error_setup_irq:
1035 mhi_deinit_free_irq(mhi_cntrl);
1036 err_ida_free:
1037 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1038 err_destroy_wq:
1039 destroy_workqueue(mhi_cntrl->hiprio_wq);
1040 err_free_cmd:
1041 kfree(mhi_cntrl->mhi_cmd);
1042 err_free_event:
1043 kfree(mhi_cntrl->mhi_event);
1044 vfree(mhi_cntrl->mhi_chan);
1045
1046 return ret;
1047 }
1048 EXPORT_SYMBOL_GPL(mhi_register_controller);
1049
mhi_unregister_controller(struct mhi_controller * mhi_cntrl)1050 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1051 {
1052 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1053 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1054 unsigned int i;
1055
1056 mhi_deinit_free_irq(mhi_cntrl);
1057 mhi_destroy_debugfs(mhi_cntrl);
1058
1059 destroy_workqueue(mhi_cntrl->hiprio_wq);
1060 kfree(mhi_cntrl->mhi_cmd);
1061 kfree(mhi_cntrl->mhi_event);
1062
1063 /* Drop the references to MHI devices created for channels */
1064 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1065 if (!mhi_chan->mhi_dev)
1066 continue;
1067
1068 put_device(&mhi_chan->mhi_dev->dev);
1069 }
1070 vfree(mhi_cntrl->mhi_chan);
1071
1072 device_del(&mhi_dev->dev);
1073 put_device(&mhi_dev->dev);
1074
1075 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1076 }
1077 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1078
mhi_alloc_controller(void)1079 struct mhi_controller *mhi_alloc_controller(void)
1080 {
1081 struct mhi_controller *mhi_cntrl;
1082
1083 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1084
1085 return mhi_cntrl;
1086 }
1087 EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1088
mhi_free_controller(struct mhi_controller * mhi_cntrl)1089 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1090 {
1091 kfree(mhi_cntrl);
1092 }
1093 EXPORT_SYMBOL_GPL(mhi_free_controller);
1094
mhi_prepare_for_power_up(struct mhi_controller * mhi_cntrl)1095 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1096 {
1097 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1098 u32 bhi_off, bhie_off;
1099 int ret;
1100
1101 mutex_lock(&mhi_cntrl->pm_mutex);
1102
1103 ret = mhi_init_dev_ctxt(mhi_cntrl);
1104 if (ret)
1105 goto error_dev_ctxt;
1106
1107 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
1108 if (ret) {
1109 dev_err(dev, "Error getting BHI offset\n");
1110 goto error_reg_offset;
1111 }
1112
1113 if (bhi_off >= mhi_cntrl->reg_len) {
1114 dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
1115 bhi_off, mhi_cntrl->reg_len);
1116 ret = -ERANGE;
1117 goto error_reg_offset;
1118 }
1119 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
1120
1121 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
1122 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1123 &bhie_off);
1124 if (ret) {
1125 dev_err(dev, "Error getting BHIE offset\n");
1126 goto error_reg_offset;
1127 }
1128
1129 if (bhie_off >= mhi_cntrl->reg_len) {
1130 dev_err(dev,
1131 "BHIe offset: 0x%x is out of range: 0x%zx\n",
1132 bhie_off, mhi_cntrl->reg_len);
1133 ret = -ERANGE;
1134 goto error_reg_offset;
1135 }
1136 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1137 }
1138
1139 if (mhi_cntrl->rddm_size) {
1140 /*
1141 * This controller supports RDDM, so we need to manually clear
1142 * BHIE RX registers since POR values are undefined.
1143 */
1144 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1145 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1146 4);
1147 /*
1148 * Allocate RDDM table for debugging purpose if specified
1149 */
1150 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1151 mhi_cntrl->rddm_size);
1152 if (mhi_cntrl->rddm_image) {
1153 ret = mhi_rddm_prepare(mhi_cntrl,
1154 mhi_cntrl->rddm_image);
1155 if (ret) {
1156 mhi_free_bhie_table(mhi_cntrl,
1157 mhi_cntrl->rddm_image);
1158 goto error_reg_offset;
1159 }
1160 }
1161 }
1162
1163 mutex_unlock(&mhi_cntrl->pm_mutex);
1164
1165 return 0;
1166
1167 error_reg_offset:
1168 mhi_deinit_dev_ctxt(mhi_cntrl);
1169
1170 error_dev_ctxt:
1171 mutex_unlock(&mhi_cntrl->pm_mutex);
1172
1173 return ret;
1174 }
1175 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1176
mhi_unprepare_after_power_down(struct mhi_controller * mhi_cntrl)1177 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1178 {
1179 if (mhi_cntrl->fbc_image) {
1180 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1181 mhi_cntrl->fbc_image = NULL;
1182 }
1183
1184 if (mhi_cntrl->rddm_image) {
1185 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1186 mhi_cntrl->rddm_image = NULL;
1187 }
1188
1189 mhi_cntrl->bhi = NULL;
1190 mhi_cntrl->bhie = NULL;
1191
1192 mhi_deinit_dev_ctxt(mhi_cntrl);
1193 }
1194 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1195
mhi_release_device(struct device * dev)1196 static void mhi_release_device(struct device *dev)
1197 {
1198 struct mhi_device *mhi_dev = to_mhi_device(dev);
1199
1200 /*
1201 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1202 * devices for the channels will only get created if the mhi_dev
1203 * associated with it is NULL. This scenario will happen during the
1204 * controller suspend and resume.
1205 */
1206 if (mhi_dev->ul_chan)
1207 mhi_dev->ul_chan->mhi_dev = NULL;
1208
1209 if (mhi_dev->dl_chan)
1210 mhi_dev->dl_chan->mhi_dev = NULL;
1211
1212 kfree(mhi_dev);
1213 }
1214
mhi_alloc_device(struct mhi_controller * mhi_cntrl)1215 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1216 {
1217 struct mhi_device *mhi_dev;
1218 struct device *dev;
1219
1220 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1221 if (!mhi_dev)
1222 return ERR_PTR(-ENOMEM);
1223
1224 dev = &mhi_dev->dev;
1225 device_initialize(dev);
1226 dev->bus = &mhi_bus_type;
1227 dev->release = mhi_release_device;
1228
1229 if (mhi_cntrl->mhi_dev) {
1230 /* for MHI client devices, parent is the MHI controller device */
1231 dev->parent = &mhi_cntrl->mhi_dev->dev;
1232 } else {
1233 /* for MHI controller device, parent is the bus device (e.g. pci device) */
1234 dev->parent = mhi_cntrl->cntrl_dev;
1235 }
1236
1237 mhi_dev->mhi_cntrl = mhi_cntrl;
1238 mhi_dev->dev_wake = 0;
1239
1240 return mhi_dev;
1241 }
1242
mhi_driver_probe(struct device * dev)1243 static int mhi_driver_probe(struct device *dev)
1244 {
1245 struct mhi_device *mhi_dev = to_mhi_device(dev);
1246 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1247 struct device_driver *drv = dev->driver;
1248 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1249 struct mhi_event *mhi_event;
1250 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1251 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1252 int ret;
1253
1254 /* Bring device out of LPM */
1255 ret = mhi_device_get_sync(mhi_dev);
1256 if (ret)
1257 return ret;
1258
1259 ret = -EINVAL;
1260
1261 if (ul_chan) {
1262 /*
1263 * If channel supports LPM notifications then status_cb should
1264 * be provided
1265 */
1266 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1267 goto exit_probe;
1268
1269 /* For non-offload channels then xfer_cb should be provided */
1270 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1271 goto exit_probe;
1272
1273 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1274 }
1275
1276 ret = -EINVAL;
1277 if (dl_chan) {
1278 /*
1279 * If channel supports LPM notifications then status_cb should
1280 * be provided
1281 */
1282 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1283 goto exit_probe;
1284
1285 /* For non-offload channels then xfer_cb should be provided */
1286 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1287 goto exit_probe;
1288
1289 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1290
1291 /*
1292 * If the channel event ring is managed by client, then
1293 * status_cb must be provided so that the framework can
1294 * notify pending data
1295 */
1296 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1297 goto exit_probe;
1298
1299 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1300 }
1301
1302 /* Call the user provided probe function */
1303 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1304 if (ret)
1305 goto exit_probe;
1306
1307 mhi_device_put(mhi_dev);
1308
1309 return ret;
1310
1311 exit_probe:
1312 mhi_unprepare_from_transfer(mhi_dev);
1313
1314 mhi_device_put(mhi_dev);
1315
1316 return ret;
1317 }
1318
mhi_driver_remove(struct device * dev)1319 static int mhi_driver_remove(struct device *dev)
1320 {
1321 struct mhi_device *mhi_dev = to_mhi_device(dev);
1322 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1323 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1324 struct mhi_chan *mhi_chan;
1325 enum mhi_ch_state ch_state[] = {
1326 MHI_CH_STATE_DISABLED,
1327 MHI_CH_STATE_DISABLED
1328 };
1329 int dir;
1330
1331 /* Skip if it is a controller device */
1332 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1333 return 0;
1334
1335 /* Reset both channels */
1336 for (dir = 0; dir < 2; dir++) {
1337 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1338
1339 if (!mhi_chan)
1340 continue;
1341
1342 /* Wake all threads waiting for completion */
1343 write_lock_irq(&mhi_chan->lock);
1344 mhi_chan->ccs = MHI_EV_CC_INVALID;
1345 complete_all(&mhi_chan->completion);
1346 write_unlock_irq(&mhi_chan->lock);
1347
1348 /* Set the channel state to disabled */
1349 mutex_lock(&mhi_chan->mutex);
1350 write_lock_irq(&mhi_chan->lock);
1351 ch_state[dir] = mhi_chan->ch_state;
1352 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1353 write_unlock_irq(&mhi_chan->lock);
1354
1355 /* Reset the non-offload channel */
1356 if (!mhi_chan->offload_ch)
1357 mhi_reset_chan(mhi_cntrl, mhi_chan);
1358
1359 mutex_unlock(&mhi_chan->mutex);
1360 }
1361
1362 mhi_drv->remove(mhi_dev);
1363
1364 /* De-init channel if it was enabled */
1365 for (dir = 0; dir < 2; dir++) {
1366 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1367
1368 if (!mhi_chan)
1369 continue;
1370
1371 mutex_lock(&mhi_chan->mutex);
1372
1373 if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1374 ch_state[dir] == MHI_CH_STATE_STOP) &&
1375 !mhi_chan->offload_ch)
1376 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1377
1378 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1379
1380 mutex_unlock(&mhi_chan->mutex);
1381 }
1382
1383 while (mhi_dev->dev_wake)
1384 mhi_device_put(mhi_dev);
1385
1386 return 0;
1387 }
1388
__mhi_driver_register(struct mhi_driver * mhi_drv,struct module * owner)1389 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1390 {
1391 struct device_driver *driver = &mhi_drv->driver;
1392
1393 if (!mhi_drv->probe || !mhi_drv->remove)
1394 return -EINVAL;
1395
1396 driver->bus = &mhi_bus_type;
1397 driver->owner = owner;
1398 driver->probe = mhi_driver_probe;
1399 driver->remove = mhi_driver_remove;
1400
1401 return driver_register(driver);
1402 }
1403 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1404
mhi_driver_unregister(struct mhi_driver * mhi_drv)1405 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1406 {
1407 driver_unregister(&mhi_drv->driver);
1408 }
1409 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1410
mhi_uevent(const struct device * dev,struct kobj_uevent_env * env)1411 static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
1412 {
1413 const struct mhi_device *mhi_dev = to_mhi_device(dev);
1414
1415 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1416 mhi_dev->name);
1417 }
1418
mhi_match(struct device * dev,struct device_driver * drv)1419 static int mhi_match(struct device *dev, struct device_driver *drv)
1420 {
1421 struct mhi_device *mhi_dev = to_mhi_device(dev);
1422 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1423 const struct mhi_device_id *id;
1424
1425 /*
1426 * If the device is a controller type then there is no client driver
1427 * associated with it
1428 */
1429 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1430 return 0;
1431
1432 for (id = mhi_drv->id_table; id->chan[0]; id++)
1433 if (!strcmp(mhi_dev->name, id->chan)) {
1434 mhi_dev->id = id;
1435 return 1;
1436 }
1437
1438 return 0;
1439 };
1440
1441 struct bus_type mhi_bus_type = {
1442 .name = "mhi",
1443 .dev_name = "mhi",
1444 .match = mhi_match,
1445 .uevent = mhi_uevent,
1446 .dev_groups = mhi_dev_groups,
1447 };
1448
mhi_init(void)1449 static int __init mhi_init(void)
1450 {
1451 mhi_debugfs_init();
1452 return bus_register(&mhi_bus_type);
1453 }
1454
mhi_exit(void)1455 static void __exit mhi_exit(void)
1456 {
1457 mhi_debugfs_exit();
1458 bus_unregister(&mhi_bus_type);
1459 }
1460
1461 postcore_initcall(mhi_init);
1462 module_exit(mhi_exit);
1463
1464 MODULE_LICENSE("GPL v2");
1465 MODULE_DESCRIPTION("Modem Host Interface");
1466