1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include "internal.h"
17
mhi_read_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 * out)18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19 void __iomem *base, u32 offset, u32 *out)
20 {
21 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
22 }
23
mhi_read_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 shift,u32 * out)24 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
25 void __iomem *base, u32 offset,
26 u32 mask, u32 shift, u32 *out)
27 {
28 u32 tmp;
29 int ret;
30
31 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
32 if (ret)
33 return ret;
34
35 *out = (tmp & mask) >> shift;
36
37 return 0;
38 }
39
mhi_write_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 val)40 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
41 u32 offset, u32 val)
42 {
43 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
44 }
45
mhi_write_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 shift,u32 val)46 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
47 u32 offset, u32 mask, u32 shift, u32 val)
48 {
49 int ret;
50 u32 tmp;
51
52 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
53 if (ret)
54 return;
55
56 tmp &= ~mask;
57 tmp |= (val << shift);
58 mhi_write_reg(mhi_cntrl, base, offset, tmp);
59 }
60
mhi_write_db(struct mhi_controller * mhi_cntrl,void __iomem * db_addr,dma_addr_t db_val)61 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
62 dma_addr_t db_val)
63 {
64 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
65 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
66 }
67
mhi_db_brstmode(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)68 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
69 struct db_cfg *db_cfg,
70 void __iomem *db_addr,
71 dma_addr_t db_val)
72 {
73 if (db_cfg->db_mode) {
74 db_cfg->db_val = db_val;
75 mhi_write_db(mhi_cntrl, db_addr, db_val);
76 db_cfg->db_mode = 0;
77 }
78 }
79
mhi_db_brstmode_disable(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)80 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
81 struct db_cfg *db_cfg,
82 void __iomem *db_addr,
83 dma_addr_t db_val)
84 {
85 db_cfg->db_val = db_val;
86 mhi_write_db(mhi_cntrl, db_addr, db_val);
87 }
88
mhi_ring_er_db(struct mhi_event * mhi_event)89 void mhi_ring_er_db(struct mhi_event *mhi_event)
90 {
91 struct mhi_ring *ring = &mhi_event->ring;
92
93 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
94 ring->db_addr, *ring->ctxt_wp);
95 }
96
mhi_ring_cmd_db(struct mhi_controller * mhi_cntrl,struct mhi_cmd * mhi_cmd)97 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
98 {
99 dma_addr_t db;
100 struct mhi_ring *ring = &mhi_cmd->ring;
101
102 db = ring->iommu_base + (ring->wp - ring->base);
103 *ring->ctxt_wp = db;
104 mhi_write_db(mhi_cntrl, ring->db_addr, db);
105 }
106
mhi_ring_chan_db(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)107 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
108 struct mhi_chan *mhi_chan)
109 {
110 struct mhi_ring *ring = &mhi_chan->tre_ring;
111 dma_addr_t db;
112
113 db = ring->iommu_base + (ring->wp - ring->base);
114 *ring->ctxt_wp = db;
115 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
116 ring->db_addr, db);
117 }
118
mhi_get_exec_env(struct mhi_controller * mhi_cntrl)119 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
120 {
121 u32 exec;
122 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
123
124 return (ret) ? MHI_EE_MAX : exec;
125 }
126
mhi_get_mhi_state(struct mhi_controller * mhi_cntrl)127 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
128 {
129 u32 state;
130 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
131 MHISTATUS_MHISTATE_MASK,
132 MHISTATUS_MHISTATE_SHIFT, &state);
133 return ret ? MHI_STATE_MAX : state;
134 }
135
mhi_map_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)136 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
137 struct mhi_buf_info *buf_info)
138 {
139 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
140 buf_info->v_addr, buf_info->len,
141 buf_info->dir);
142 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
143 return -ENOMEM;
144
145 return 0;
146 }
147
mhi_map_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)148 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
149 struct mhi_buf_info *buf_info)
150 {
151 void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
152 &buf_info->p_addr, GFP_ATOMIC);
153
154 if (!buf)
155 return -ENOMEM;
156
157 if (buf_info->dir == DMA_TO_DEVICE)
158 memcpy(buf, buf_info->v_addr, buf_info->len);
159
160 buf_info->bb_addr = buf;
161
162 return 0;
163 }
164
mhi_unmap_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)165 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
166 struct mhi_buf_info *buf_info)
167 {
168 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
169 buf_info->dir);
170 }
171
mhi_unmap_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)172 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
173 struct mhi_buf_info *buf_info)
174 {
175 if (buf_info->dir == DMA_FROM_DEVICE)
176 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
177
178 mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
179 buf_info->p_addr);
180 }
181
get_nr_avail_ring_elements(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)182 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
183 struct mhi_ring *ring)
184 {
185 int nr_el;
186
187 if (ring->wp < ring->rp) {
188 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
189 } else {
190 nr_el = (ring->rp - ring->base) / ring->el_size;
191 nr_el += ((ring->base + ring->len - ring->wp) /
192 ring->el_size) - 1;
193 }
194
195 return nr_el;
196 }
197
mhi_to_virtual(struct mhi_ring * ring,dma_addr_t addr)198 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
199 {
200 return (addr - ring->iommu_base) + ring->base;
201 }
202
mhi_add_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)203 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
204 struct mhi_ring *ring)
205 {
206 ring->wp += ring->el_size;
207 if (ring->wp >= (ring->base + ring->len))
208 ring->wp = ring->base;
209 /* smp update */
210 smp_wmb();
211 }
212
mhi_del_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)213 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
214 struct mhi_ring *ring)
215 {
216 ring->rp += ring->el_size;
217 if (ring->rp >= (ring->base + ring->len))
218 ring->rp = ring->base;
219 /* smp update */
220 smp_wmb();
221 }
222
is_valid_ring_ptr(struct mhi_ring * ring,dma_addr_t addr)223 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
224 {
225 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
226 }
227
mhi_destroy_device(struct device * dev,void * data)228 int mhi_destroy_device(struct device *dev, void *data)
229 {
230 struct mhi_chan *ul_chan, *dl_chan;
231 struct mhi_device *mhi_dev;
232 struct mhi_controller *mhi_cntrl;
233 enum mhi_ee_type ee = MHI_EE_MAX;
234
235 if (dev->bus != &mhi_bus_type)
236 return 0;
237
238 mhi_dev = to_mhi_device(dev);
239 mhi_cntrl = mhi_dev->mhi_cntrl;
240
241 /* Only destroy virtual devices thats attached to bus */
242 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
243 return 0;
244
245 ul_chan = mhi_dev->ul_chan;
246 dl_chan = mhi_dev->dl_chan;
247
248 /*
249 * If execution environment is specified, remove only those devices that
250 * started in them based on ee_mask for the channels as we move on to a
251 * different execution environment
252 */
253 if (data)
254 ee = *(enum mhi_ee_type *)data;
255
256 /*
257 * For the suspend and resume case, this function will get called
258 * without mhi_unregister_controller(). Hence, we need to drop the
259 * references to mhi_dev created for ul and dl channels. We can
260 * be sure that there will be no instances of mhi_dev left after
261 * this.
262 */
263 if (ul_chan) {
264 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
265 return 0;
266
267 put_device(&ul_chan->mhi_dev->dev);
268 }
269
270 if (dl_chan) {
271 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
272 return 0;
273
274 put_device(&dl_chan->mhi_dev->dev);
275 }
276
277 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
278 mhi_dev->name);
279
280 /* Notify the client and remove the device from MHI bus */
281 device_del(dev);
282 put_device(dev);
283
284 return 0;
285 }
286
mhi_notify(struct mhi_device * mhi_dev,enum mhi_callback cb_reason)287 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
288 {
289 struct mhi_driver *mhi_drv;
290
291 if (!mhi_dev->dev.driver)
292 return;
293
294 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
295
296 if (mhi_drv->status_cb)
297 mhi_drv->status_cb(mhi_dev, cb_reason);
298 }
299 EXPORT_SYMBOL_GPL(mhi_notify);
300
301 /* Bind MHI channels to MHI devices */
mhi_create_devices(struct mhi_controller * mhi_cntrl)302 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
303 {
304 struct mhi_chan *mhi_chan;
305 struct mhi_device *mhi_dev;
306 struct device *dev = &mhi_cntrl->mhi_dev->dev;
307 int i, ret;
308
309 mhi_chan = mhi_cntrl->mhi_chan;
310 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
311 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
312 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
313 continue;
314 mhi_dev = mhi_alloc_device(mhi_cntrl);
315 if (IS_ERR(mhi_dev))
316 return;
317
318 mhi_dev->dev_type = MHI_DEVICE_XFER;
319 switch (mhi_chan->dir) {
320 case DMA_TO_DEVICE:
321 mhi_dev->ul_chan = mhi_chan;
322 mhi_dev->ul_chan_id = mhi_chan->chan;
323 break;
324 case DMA_FROM_DEVICE:
325 /* We use dl_chan as offload channels */
326 mhi_dev->dl_chan = mhi_chan;
327 mhi_dev->dl_chan_id = mhi_chan->chan;
328 break;
329 default:
330 dev_err(dev, "Direction not supported\n");
331 put_device(&mhi_dev->dev);
332 return;
333 }
334
335 get_device(&mhi_dev->dev);
336 mhi_chan->mhi_dev = mhi_dev;
337
338 /* Check next channel if it matches */
339 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
340 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
341 i++;
342 mhi_chan++;
343 if (mhi_chan->dir == DMA_TO_DEVICE) {
344 mhi_dev->ul_chan = mhi_chan;
345 mhi_dev->ul_chan_id = mhi_chan->chan;
346 } else {
347 mhi_dev->dl_chan = mhi_chan;
348 mhi_dev->dl_chan_id = mhi_chan->chan;
349 }
350 get_device(&mhi_dev->dev);
351 mhi_chan->mhi_dev = mhi_dev;
352 }
353 }
354
355 /* Channel name is same for both UL and DL */
356 mhi_dev->name = mhi_chan->name;
357 dev_set_name(&mhi_dev->dev, "%s_%s",
358 dev_name(mhi_cntrl->cntrl_dev),
359 mhi_dev->name);
360
361 /* Init wakeup source if available */
362 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
363 device_init_wakeup(&mhi_dev->dev, true);
364
365 ret = device_add(&mhi_dev->dev);
366 if (ret)
367 put_device(&mhi_dev->dev);
368 }
369 }
370
mhi_irq_handler(int irq_number,void * dev)371 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
372 {
373 struct mhi_event *mhi_event = dev;
374 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
375 struct mhi_event_ctxt *er_ctxt =
376 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
377 struct mhi_ring *ev_ring = &mhi_event->ring;
378 dma_addr_t ptr = er_ctxt->rp;
379 void *dev_rp;
380
381 if (!is_valid_ring_ptr(ev_ring, ptr)) {
382 dev_err(&mhi_cntrl->mhi_dev->dev,
383 "Event ring rp points outside of the event ring\n");
384 return IRQ_HANDLED;
385 }
386
387 dev_rp = mhi_to_virtual(ev_ring, ptr);
388
389 /* Only proceed if event ring has pending events */
390 if (ev_ring->rp == dev_rp)
391 return IRQ_HANDLED;
392
393 /* For client managed event ring, notify pending data */
394 if (mhi_event->cl_manage) {
395 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
396 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
397
398 if (mhi_dev)
399 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
400 } else {
401 tasklet_schedule(&mhi_event->task);
402 }
403
404 return IRQ_HANDLED;
405 }
406
mhi_intvec_threaded_handler(int irq_number,void * priv)407 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
408 {
409 struct mhi_controller *mhi_cntrl = priv;
410 struct device *dev = &mhi_cntrl->mhi_dev->dev;
411 enum mhi_state state = MHI_STATE_MAX;
412 enum mhi_pm_state pm_state = 0;
413 enum mhi_ee_type ee = 0;
414
415 write_lock_irq(&mhi_cntrl->pm_lock);
416 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
417 write_unlock_irq(&mhi_cntrl->pm_lock);
418 goto exit_intvec;
419 }
420
421 state = mhi_get_mhi_state(mhi_cntrl);
422 ee = mhi_cntrl->ee;
423 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
424 dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
425 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
426 TO_MHI_STATE_STR(state));
427
428 if (state == MHI_STATE_SYS_ERR) {
429 dev_dbg(dev, "System error detected\n");
430 pm_state = mhi_tryset_pm_state(mhi_cntrl,
431 MHI_PM_SYS_ERR_DETECT);
432 }
433 write_unlock_irq(&mhi_cntrl->pm_lock);
434
435 /* If device supports RDDM don't bother processing SYS error */
436 if (mhi_cntrl->rddm_image) {
437 if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
438 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
439 wake_up_all(&mhi_cntrl->state_event);
440 }
441 goto exit_intvec;
442 }
443
444 if (pm_state == MHI_PM_SYS_ERR_DETECT) {
445 wake_up_all(&mhi_cntrl->state_event);
446
447 /* For fatal errors, we let controller decide next step */
448 if (MHI_IN_PBL(ee))
449 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
450 else
451 mhi_pm_sys_err_handler(mhi_cntrl);
452 }
453
454 exit_intvec:
455
456 return IRQ_HANDLED;
457 }
458
mhi_intvec_handler(int irq_number,void * dev)459 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
460 {
461 struct mhi_controller *mhi_cntrl = dev;
462
463 /* Wake up events waiting for state change */
464 wake_up_all(&mhi_cntrl->state_event);
465
466 return IRQ_WAKE_THREAD;
467 }
468
mhi_recycle_ev_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)469 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
470 struct mhi_ring *ring)
471 {
472 dma_addr_t ctxt_wp;
473
474 /* Update the WP */
475 ring->wp += ring->el_size;
476 ctxt_wp = *ring->ctxt_wp + ring->el_size;
477
478 if (ring->wp >= (ring->base + ring->len)) {
479 ring->wp = ring->base;
480 ctxt_wp = ring->iommu_base;
481 }
482
483 *ring->ctxt_wp = ctxt_wp;
484
485 /* Update the RP */
486 ring->rp += ring->el_size;
487 if (ring->rp >= (ring->base + ring->len))
488 ring->rp = ring->base;
489
490 /* Update to all cores */
491 smp_wmb();
492 }
493
parse_xfer_event(struct mhi_controller * mhi_cntrl,struct mhi_tre * event,struct mhi_chan * mhi_chan)494 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
495 struct mhi_tre *event,
496 struct mhi_chan *mhi_chan)
497 {
498 struct mhi_ring *buf_ring, *tre_ring;
499 struct device *dev = &mhi_cntrl->mhi_dev->dev;
500 struct mhi_result result;
501 unsigned long flags = 0;
502 u32 ev_code;
503
504 ev_code = MHI_TRE_GET_EV_CODE(event);
505 buf_ring = &mhi_chan->buf_ring;
506 tre_ring = &mhi_chan->tre_ring;
507
508 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
509 -EOVERFLOW : 0;
510
511 /*
512 * If it's a DB Event then we need to grab the lock
513 * with preemption disabled and as a write because we
514 * have to update db register and there are chances that
515 * another thread could be doing the same.
516 */
517 if (ev_code >= MHI_EV_CC_OOB)
518 write_lock_irqsave(&mhi_chan->lock, flags);
519 else
520 read_lock_bh(&mhi_chan->lock);
521
522 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
523 goto end_process_tx_event;
524
525 switch (ev_code) {
526 case MHI_EV_CC_OVERFLOW:
527 case MHI_EV_CC_EOB:
528 case MHI_EV_CC_EOT:
529 {
530 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
531 struct mhi_tre *local_rp, *ev_tre;
532 void *dev_rp;
533 struct mhi_buf_info *buf_info;
534 u16 xfer_len;
535
536 if (!is_valid_ring_ptr(tre_ring, ptr)) {
537 dev_err(&mhi_cntrl->mhi_dev->dev,
538 "Event element points outside of the tre ring\n");
539 break;
540 }
541 /* Get the TRB this event points to */
542 ev_tre = mhi_to_virtual(tre_ring, ptr);
543
544 dev_rp = ev_tre + 1;
545 if (dev_rp >= (tre_ring->base + tre_ring->len))
546 dev_rp = tre_ring->base;
547
548 result.dir = mhi_chan->dir;
549
550 local_rp = tre_ring->rp;
551 while (local_rp != dev_rp) {
552 buf_info = buf_ring->rp;
553 /* If it's the last TRE, get length from the event */
554 if (local_rp == ev_tre)
555 xfer_len = MHI_TRE_GET_EV_LEN(event);
556 else
557 xfer_len = buf_info->len;
558
559 /* Unmap if it's not pre-mapped by client */
560 if (likely(!buf_info->pre_mapped))
561 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
562
563 result.buf_addr = buf_info->cb_buf;
564
565 /* truncate to buf len if xfer_len is larger */
566 result.bytes_xferd =
567 min_t(u16, xfer_len, buf_info->len);
568 mhi_del_ring_element(mhi_cntrl, buf_ring);
569 mhi_del_ring_element(mhi_cntrl, tre_ring);
570 local_rp = tre_ring->rp;
571
572 read_unlock_bh(&mhi_chan->lock);
573
574 /* notify client */
575 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
576
577 if (mhi_chan->dir == DMA_TO_DEVICE)
578 atomic_dec(&mhi_cntrl->pending_pkts);
579
580 /*
581 * Recycle the buffer if buffer is pre-allocated,
582 * if there is an error, not much we can do apart
583 * from dropping the packet
584 */
585 if (mhi_chan->pre_alloc) {
586 if (mhi_queue_buf(mhi_chan->mhi_dev,
587 mhi_chan->dir,
588 buf_info->cb_buf,
589 buf_info->len, MHI_EOT)) {
590 dev_err(dev,
591 "Error recycling buffer for chan:%d\n",
592 mhi_chan->chan);
593 kfree(buf_info->cb_buf);
594 }
595 }
596
597 read_lock_bh(&mhi_chan->lock);
598 }
599 break;
600 } /* CC_EOT */
601 case MHI_EV_CC_OOB:
602 case MHI_EV_CC_DB_MODE:
603 {
604 unsigned long flags;
605
606 mhi_chan->db_cfg.db_mode = 1;
607 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
608 if (tre_ring->wp != tre_ring->rp &&
609 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
610 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
611 }
612 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
613 break;
614 }
615 case MHI_EV_CC_BAD_TRE:
616 default:
617 dev_err(dev, "Unknown event 0x%x\n", ev_code);
618 break;
619 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
620
621 end_process_tx_event:
622 if (ev_code >= MHI_EV_CC_OOB)
623 write_unlock_irqrestore(&mhi_chan->lock, flags);
624 else
625 read_unlock_bh(&mhi_chan->lock);
626
627 return 0;
628 }
629
parse_rsc_event(struct mhi_controller * mhi_cntrl,struct mhi_tre * event,struct mhi_chan * mhi_chan)630 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
631 struct mhi_tre *event,
632 struct mhi_chan *mhi_chan)
633 {
634 struct mhi_ring *buf_ring, *tre_ring;
635 struct mhi_buf_info *buf_info;
636 struct mhi_result result;
637 int ev_code;
638 u32 cookie; /* offset to local descriptor */
639 u16 xfer_len;
640
641 buf_ring = &mhi_chan->buf_ring;
642 tre_ring = &mhi_chan->tre_ring;
643
644 ev_code = MHI_TRE_GET_EV_CODE(event);
645 cookie = MHI_TRE_GET_EV_COOKIE(event);
646 xfer_len = MHI_TRE_GET_EV_LEN(event);
647
648 /* Received out of bound cookie */
649 WARN_ON(cookie >= buf_ring->len);
650
651 buf_info = buf_ring->base + cookie;
652
653 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
654 -EOVERFLOW : 0;
655
656 /* truncate to buf len if xfer_len is larger */
657 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
658 result.buf_addr = buf_info->cb_buf;
659 result.dir = mhi_chan->dir;
660
661 read_lock_bh(&mhi_chan->lock);
662
663 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
664 goto end_process_rsc_event;
665
666 WARN_ON(!buf_info->used);
667
668 /* notify the client */
669 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
670
671 /*
672 * Note: We're arbitrarily incrementing RP even though, completion
673 * packet we processed might not be the same one, reason we can do this
674 * is because device guaranteed to cache descriptors in order it
675 * receive, so even though completion event is different we can re-use
676 * all descriptors in between.
677 * Example:
678 * Transfer Ring has descriptors: A, B, C, D
679 * Last descriptor host queue is D (WP) and first descriptor
680 * host queue is A (RP).
681 * The completion event we just serviced is descriptor C.
682 * Then we can safely queue descriptors to replace A, B, and C
683 * even though host did not receive any completions.
684 */
685 mhi_del_ring_element(mhi_cntrl, tre_ring);
686 buf_info->used = false;
687
688 end_process_rsc_event:
689 read_unlock_bh(&mhi_chan->lock);
690
691 return 0;
692 }
693
mhi_process_cmd_completion(struct mhi_controller * mhi_cntrl,struct mhi_tre * tre)694 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
695 struct mhi_tre *tre)
696 {
697 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
698 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
699 struct mhi_ring *mhi_ring = &cmd_ring->ring;
700 struct mhi_tre *cmd_pkt;
701 struct mhi_chan *mhi_chan;
702 u32 chan;
703
704 if (!is_valid_ring_ptr(mhi_ring, ptr)) {
705 dev_err(&mhi_cntrl->mhi_dev->dev,
706 "Event element points outside of the cmd ring\n");
707 return;
708 }
709
710 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
711
712 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
713
714 if (chan < mhi_cntrl->max_chan &&
715 mhi_cntrl->mhi_chan[chan].configured) {
716 mhi_chan = &mhi_cntrl->mhi_chan[chan];
717 write_lock_bh(&mhi_chan->lock);
718 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
719 complete(&mhi_chan->completion);
720 write_unlock_bh(&mhi_chan->lock);
721 } else {
722 dev_err(&mhi_cntrl->mhi_dev->dev,
723 "Completion packet for invalid channel ID: %d\n", chan);
724 }
725
726 mhi_del_ring_element(mhi_cntrl, mhi_ring);
727 }
728
mhi_process_ctrl_ev_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)729 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
730 struct mhi_event *mhi_event,
731 u32 event_quota)
732 {
733 struct mhi_tre *dev_rp, *local_rp;
734 struct mhi_ring *ev_ring = &mhi_event->ring;
735 struct mhi_event_ctxt *er_ctxt =
736 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
737 struct mhi_chan *mhi_chan;
738 struct device *dev = &mhi_cntrl->mhi_dev->dev;
739 u32 chan;
740 int count = 0;
741 dma_addr_t ptr = er_ctxt->rp;
742
743 /*
744 * This is a quick check to avoid unnecessary event processing
745 * in case MHI is already in error state, but it's still possible
746 * to transition to error state while processing events
747 */
748 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
749 return -EIO;
750
751 if (!is_valid_ring_ptr(ev_ring, ptr)) {
752 dev_err(&mhi_cntrl->mhi_dev->dev,
753 "Event ring rp points outside of the event ring\n");
754 return -EIO;
755 }
756
757 dev_rp = mhi_to_virtual(ev_ring, ptr);
758 local_rp = ev_ring->rp;
759
760 while (dev_rp != local_rp) {
761 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
762
763 switch (type) {
764 case MHI_PKT_TYPE_BW_REQ_EVENT:
765 {
766 struct mhi_link_info *link_info;
767
768 link_info = &mhi_cntrl->mhi_link_info;
769 write_lock_irq(&mhi_cntrl->pm_lock);
770 link_info->target_link_speed =
771 MHI_TRE_GET_EV_LINKSPEED(local_rp);
772 link_info->target_link_width =
773 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
774 write_unlock_irq(&mhi_cntrl->pm_lock);
775 dev_dbg(dev, "Received BW_REQ event\n");
776 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
777 break;
778 }
779 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
780 {
781 enum mhi_state new_state;
782
783 new_state = MHI_TRE_GET_EV_STATE(local_rp);
784
785 dev_dbg(dev, "State change event to state: %s\n",
786 TO_MHI_STATE_STR(new_state));
787
788 switch (new_state) {
789 case MHI_STATE_M0:
790 mhi_pm_m0_transition(mhi_cntrl);
791 break;
792 case MHI_STATE_M1:
793 mhi_pm_m1_transition(mhi_cntrl);
794 break;
795 case MHI_STATE_M3:
796 mhi_pm_m3_transition(mhi_cntrl);
797 break;
798 case MHI_STATE_SYS_ERR:
799 {
800 enum mhi_pm_state new_state;
801
802 /* skip SYS_ERROR handling if RDDM supported */
803 if (mhi_cntrl->ee == MHI_EE_RDDM ||
804 mhi_cntrl->rddm_image)
805 break;
806
807 dev_dbg(dev, "System error detected\n");
808 write_lock_irq(&mhi_cntrl->pm_lock);
809 new_state = mhi_tryset_pm_state(mhi_cntrl,
810 MHI_PM_SYS_ERR_DETECT);
811 write_unlock_irq(&mhi_cntrl->pm_lock);
812 if (new_state == MHI_PM_SYS_ERR_DETECT)
813 mhi_pm_sys_err_handler(mhi_cntrl);
814 break;
815 }
816 default:
817 dev_err(dev, "Invalid state: %s\n",
818 TO_MHI_STATE_STR(new_state));
819 }
820
821 break;
822 }
823 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
824 mhi_process_cmd_completion(mhi_cntrl, local_rp);
825 break;
826 case MHI_PKT_TYPE_EE_EVENT:
827 {
828 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
829 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
830
831 dev_dbg(dev, "Received EE event: %s\n",
832 TO_MHI_EXEC_STR(event));
833 switch (event) {
834 case MHI_EE_SBL:
835 st = DEV_ST_TRANSITION_SBL;
836 break;
837 case MHI_EE_WFW:
838 case MHI_EE_AMSS:
839 st = DEV_ST_TRANSITION_MISSION_MODE;
840 break;
841 case MHI_EE_RDDM:
842 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
843 write_lock_irq(&mhi_cntrl->pm_lock);
844 mhi_cntrl->ee = event;
845 write_unlock_irq(&mhi_cntrl->pm_lock);
846 wake_up_all(&mhi_cntrl->state_event);
847 break;
848 default:
849 dev_err(dev,
850 "Unhandled EE event: 0x%x\n", type);
851 }
852 if (st != DEV_ST_TRANSITION_MAX)
853 mhi_queue_state_transition(mhi_cntrl, st);
854
855 break;
856 }
857 case MHI_PKT_TYPE_TX_EVENT:
858 chan = MHI_TRE_GET_EV_CHID(local_rp);
859
860 WARN_ON(chan >= mhi_cntrl->max_chan);
861
862 /*
863 * Only process the event ring elements whose channel
864 * ID is within the maximum supported range.
865 */
866 if (chan < mhi_cntrl->max_chan) {
867 mhi_chan = &mhi_cntrl->mhi_chan[chan];
868 if (!mhi_chan->configured)
869 break;
870 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
871 event_quota--;
872 }
873 break;
874 default:
875 dev_err(dev, "Unhandled event type: %d\n", type);
876 break;
877 }
878
879 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
880 local_rp = ev_ring->rp;
881
882 ptr = er_ctxt->rp;
883 if (!is_valid_ring_ptr(ev_ring, ptr)) {
884 dev_err(&mhi_cntrl->mhi_dev->dev,
885 "Event ring rp points outside of the event ring\n");
886 return -EIO;
887 }
888
889 dev_rp = mhi_to_virtual(ev_ring, ptr);
890 count++;
891 }
892
893 read_lock_bh(&mhi_cntrl->pm_lock);
894 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
895 mhi_ring_er_db(mhi_event);
896 read_unlock_bh(&mhi_cntrl->pm_lock);
897
898 return count;
899 }
900
mhi_process_data_event_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)901 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
902 struct mhi_event *mhi_event,
903 u32 event_quota)
904 {
905 struct mhi_tre *dev_rp, *local_rp;
906 struct mhi_ring *ev_ring = &mhi_event->ring;
907 struct mhi_event_ctxt *er_ctxt =
908 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
909 int count = 0;
910 u32 chan;
911 struct mhi_chan *mhi_chan;
912 dma_addr_t ptr = er_ctxt->rp;
913
914 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
915 return -EIO;
916
917 if (!is_valid_ring_ptr(ev_ring, ptr)) {
918 dev_err(&mhi_cntrl->mhi_dev->dev,
919 "Event ring rp points outside of the event ring\n");
920 return -EIO;
921 }
922
923 dev_rp = mhi_to_virtual(ev_ring, ptr);
924 local_rp = ev_ring->rp;
925
926 while (dev_rp != local_rp && event_quota > 0) {
927 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
928
929 chan = MHI_TRE_GET_EV_CHID(local_rp);
930
931 WARN_ON(chan >= mhi_cntrl->max_chan);
932
933 /*
934 * Only process the event ring elements whose channel
935 * ID is within the maximum supported range.
936 */
937 if (chan < mhi_cntrl->max_chan &&
938 mhi_cntrl->mhi_chan[chan].configured) {
939 mhi_chan = &mhi_cntrl->mhi_chan[chan];
940
941 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
942 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
943 event_quota--;
944 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
945 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
946 event_quota--;
947 }
948 }
949
950 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
951 local_rp = ev_ring->rp;
952
953 ptr = er_ctxt->rp;
954 if (!is_valid_ring_ptr(ev_ring, ptr)) {
955 dev_err(&mhi_cntrl->mhi_dev->dev,
956 "Event ring rp points outside of the event ring\n");
957 return -EIO;
958 }
959
960 dev_rp = mhi_to_virtual(ev_ring, ptr);
961 count++;
962 }
963 read_lock_bh(&mhi_cntrl->pm_lock);
964 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
965 mhi_ring_er_db(mhi_event);
966 read_unlock_bh(&mhi_cntrl->pm_lock);
967
968 return count;
969 }
970
mhi_ev_task(unsigned long data)971 void mhi_ev_task(unsigned long data)
972 {
973 struct mhi_event *mhi_event = (struct mhi_event *)data;
974 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
975
976 /* process all pending events */
977 spin_lock_bh(&mhi_event->lock);
978 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
979 spin_unlock_bh(&mhi_event->lock);
980 }
981
mhi_ctrl_ev_task(unsigned long data)982 void mhi_ctrl_ev_task(unsigned long data)
983 {
984 struct mhi_event *mhi_event = (struct mhi_event *)data;
985 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
986 struct device *dev = &mhi_cntrl->mhi_dev->dev;
987 enum mhi_state state;
988 enum mhi_pm_state pm_state = 0;
989 int ret;
990
991 /*
992 * We can check PM state w/o a lock here because there is no way
993 * PM state can change from reg access valid to no access while this
994 * thread being executed.
995 */
996 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
997 /*
998 * We may have a pending event but not allowed to
999 * process it since we are probably in a suspended state,
1000 * so trigger a resume.
1001 */
1002 mhi_trigger_resume(mhi_cntrl);
1003
1004 return;
1005 }
1006
1007 /* Process ctrl events events */
1008 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1009
1010 /*
1011 * We received an IRQ but no events to process, maybe device went to
1012 * SYS_ERR state? Check the state to confirm.
1013 */
1014 if (!ret) {
1015 write_lock_irq(&mhi_cntrl->pm_lock);
1016 state = mhi_get_mhi_state(mhi_cntrl);
1017 if (state == MHI_STATE_SYS_ERR) {
1018 dev_dbg(dev, "System error detected\n");
1019 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1020 MHI_PM_SYS_ERR_DETECT);
1021 }
1022 write_unlock_irq(&mhi_cntrl->pm_lock);
1023 if (pm_state == MHI_PM_SYS_ERR_DETECT)
1024 mhi_pm_sys_err_handler(mhi_cntrl);
1025 }
1026 }
1027
mhi_is_ring_full(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)1028 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1029 struct mhi_ring *ring)
1030 {
1031 void *tmp = ring->wp + ring->el_size;
1032
1033 if (tmp >= (ring->base + ring->len))
1034 tmp = ring->base;
1035
1036 return (tmp == ring->rp);
1037 }
1038
mhi_queue_skb(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct sk_buff * skb,size_t len,enum mhi_flags mflags)1039 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1040 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1041 {
1042 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1043 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1044 mhi_dev->dl_chan;
1045 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1046 struct mhi_buf_info buf_info = { };
1047 int ret;
1048
1049 /* If MHI host pre-allocates buffers then client drivers cannot queue */
1050 if (mhi_chan->pre_alloc)
1051 return -EINVAL;
1052
1053 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1054 return -ENOMEM;
1055
1056 read_lock_bh(&mhi_cntrl->pm_lock);
1057 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1058 read_unlock_bh(&mhi_cntrl->pm_lock);
1059 return -EIO;
1060 }
1061
1062 /* we're in M3 or transitioning to M3 */
1063 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1064 mhi_trigger_resume(mhi_cntrl);
1065
1066 /* Toggle wake to exit out of M2 */
1067 mhi_cntrl->wake_toggle(mhi_cntrl);
1068
1069 buf_info.v_addr = skb->data;
1070 buf_info.cb_buf = skb;
1071 buf_info.len = len;
1072
1073 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1074 if (unlikely(ret)) {
1075 read_unlock_bh(&mhi_cntrl->pm_lock);
1076 return ret;
1077 }
1078
1079 if (mhi_chan->dir == DMA_TO_DEVICE)
1080 atomic_inc(&mhi_cntrl->pending_pkts);
1081
1082 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1083 read_lock_bh(&mhi_chan->lock);
1084 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1085 read_unlock_bh(&mhi_chan->lock);
1086 }
1087
1088 read_unlock_bh(&mhi_cntrl->pm_lock);
1089
1090 return 0;
1091 }
1092 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1093
mhi_queue_dma(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct mhi_buf * mhi_buf,size_t len,enum mhi_flags mflags)1094 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1095 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1096 {
1097 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1098 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1099 mhi_dev->dl_chan;
1100 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1101 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1102 struct mhi_buf_info buf_info = { };
1103 int ret;
1104
1105 /* If MHI host pre-allocates buffers then client drivers cannot queue */
1106 if (mhi_chan->pre_alloc)
1107 return -EINVAL;
1108
1109 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1110 return -ENOMEM;
1111
1112 read_lock_bh(&mhi_cntrl->pm_lock);
1113 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1114 dev_err(dev, "MHI is not in activate state, PM state: %s\n",
1115 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1116 read_unlock_bh(&mhi_cntrl->pm_lock);
1117
1118 return -EIO;
1119 }
1120
1121 /* we're in M3 or transitioning to M3 */
1122 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1123 mhi_trigger_resume(mhi_cntrl);
1124
1125 /* Toggle wake to exit out of M2 */
1126 mhi_cntrl->wake_toggle(mhi_cntrl);
1127
1128 buf_info.p_addr = mhi_buf->dma_addr;
1129 buf_info.cb_buf = mhi_buf;
1130 buf_info.pre_mapped = true;
1131 buf_info.len = len;
1132
1133 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1134 if (unlikely(ret)) {
1135 read_unlock_bh(&mhi_cntrl->pm_lock);
1136 return ret;
1137 }
1138
1139 if (mhi_chan->dir == DMA_TO_DEVICE)
1140 atomic_inc(&mhi_cntrl->pending_pkts);
1141
1142 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1143 read_lock_bh(&mhi_chan->lock);
1144 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1145 read_unlock_bh(&mhi_chan->lock);
1146 }
1147
1148 read_unlock_bh(&mhi_cntrl->pm_lock);
1149
1150 return 0;
1151 }
1152 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1153
mhi_gen_tre(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,struct mhi_buf_info * info,enum mhi_flags flags)1154 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1155 struct mhi_buf_info *info, enum mhi_flags flags)
1156 {
1157 struct mhi_ring *buf_ring, *tre_ring;
1158 struct mhi_tre *mhi_tre;
1159 struct mhi_buf_info *buf_info;
1160 int eot, eob, chain, bei;
1161 int ret;
1162
1163 buf_ring = &mhi_chan->buf_ring;
1164 tre_ring = &mhi_chan->tre_ring;
1165
1166 buf_info = buf_ring->wp;
1167 WARN_ON(buf_info->used);
1168 buf_info->pre_mapped = info->pre_mapped;
1169 if (info->pre_mapped)
1170 buf_info->p_addr = info->p_addr;
1171 else
1172 buf_info->v_addr = info->v_addr;
1173 buf_info->cb_buf = info->cb_buf;
1174 buf_info->wp = tre_ring->wp;
1175 buf_info->dir = mhi_chan->dir;
1176 buf_info->len = info->len;
1177
1178 if (!info->pre_mapped) {
1179 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1180 if (ret)
1181 return ret;
1182 }
1183
1184 eob = !!(flags & MHI_EOB);
1185 eot = !!(flags & MHI_EOT);
1186 chain = !!(flags & MHI_CHAIN);
1187 bei = !!(mhi_chan->intmod);
1188
1189 mhi_tre = tre_ring->wp;
1190 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1191 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1192 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1193
1194 /* increment WP */
1195 mhi_add_ring_element(mhi_cntrl, tre_ring);
1196 mhi_add_ring_element(mhi_cntrl, buf_ring);
1197
1198 return 0;
1199 }
1200
mhi_queue_buf(struct mhi_device * mhi_dev,enum dma_data_direction dir,void * buf,size_t len,enum mhi_flags mflags)1201 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1202 void *buf, size_t len, enum mhi_flags mflags)
1203 {
1204 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1205 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1206 mhi_dev->dl_chan;
1207 struct mhi_ring *tre_ring;
1208 struct mhi_buf_info buf_info = { };
1209 unsigned long flags;
1210 int ret;
1211
1212 /*
1213 * this check here only as a guard, it's always
1214 * possible mhi can enter error while executing rest of function,
1215 * which is not fatal so we do not need to hold pm_lock
1216 */
1217 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1218 return -EIO;
1219
1220 tre_ring = &mhi_chan->tre_ring;
1221 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1222 return -ENOMEM;
1223
1224 buf_info.v_addr = buf;
1225 buf_info.cb_buf = buf;
1226 buf_info.len = len;
1227
1228 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1229 if (unlikely(ret))
1230 return ret;
1231
1232 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1233
1234 /* we're in M3 or transitioning to M3 */
1235 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1236 mhi_trigger_resume(mhi_cntrl);
1237
1238 /* Toggle wake to exit out of M2 */
1239 mhi_cntrl->wake_toggle(mhi_cntrl);
1240
1241 if (mhi_chan->dir == DMA_TO_DEVICE)
1242 atomic_inc(&mhi_cntrl->pending_pkts);
1243
1244 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1245 unsigned long flags;
1246
1247 read_lock_irqsave(&mhi_chan->lock, flags);
1248 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1249 read_unlock_irqrestore(&mhi_chan->lock, flags);
1250 }
1251
1252 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1253
1254 return 0;
1255 }
1256 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1257
mhi_send_cmd(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_cmd_type cmd)1258 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1259 struct mhi_chan *mhi_chan,
1260 enum mhi_cmd_type cmd)
1261 {
1262 struct mhi_tre *cmd_tre = NULL;
1263 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1264 struct mhi_ring *ring = &mhi_cmd->ring;
1265 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1266 int chan = 0;
1267
1268 if (mhi_chan)
1269 chan = mhi_chan->chan;
1270
1271 spin_lock_bh(&mhi_cmd->lock);
1272 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1273 spin_unlock_bh(&mhi_cmd->lock);
1274 return -ENOMEM;
1275 }
1276
1277 /* prepare the cmd tre */
1278 cmd_tre = ring->wp;
1279 switch (cmd) {
1280 case MHI_CMD_RESET_CHAN:
1281 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1282 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1283 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1284 break;
1285 case MHI_CMD_START_CHAN:
1286 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1287 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1288 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1289 break;
1290 default:
1291 dev_err(dev, "Command not supported\n");
1292 break;
1293 }
1294
1295 /* queue to hardware */
1296 mhi_add_ring_element(mhi_cntrl, ring);
1297 read_lock_bh(&mhi_cntrl->pm_lock);
1298 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1299 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1300 read_unlock_bh(&mhi_cntrl->pm_lock);
1301 spin_unlock_bh(&mhi_cmd->lock);
1302
1303 return 0;
1304 }
1305
__mhi_unprepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1306 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1307 struct mhi_chan *mhi_chan)
1308 {
1309 int ret;
1310 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1311
1312 dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1313
1314 /* no more processing events for this channel */
1315 mutex_lock(&mhi_chan->mutex);
1316 write_lock_irq(&mhi_chan->lock);
1317 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1318 write_unlock_irq(&mhi_chan->lock);
1319 mutex_unlock(&mhi_chan->mutex);
1320 return;
1321 }
1322
1323 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1324 write_unlock_irq(&mhi_chan->lock);
1325
1326 reinit_completion(&mhi_chan->completion);
1327 read_lock_bh(&mhi_cntrl->pm_lock);
1328 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1329 read_unlock_bh(&mhi_cntrl->pm_lock);
1330 goto error_invalid_state;
1331 }
1332
1333 mhi_cntrl->wake_toggle(mhi_cntrl);
1334 read_unlock_bh(&mhi_cntrl->pm_lock);
1335
1336 mhi_cntrl->runtime_get(mhi_cntrl);
1337 mhi_cntrl->runtime_put(mhi_cntrl);
1338 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1339 if (ret)
1340 goto error_invalid_state;
1341
1342 /* even if it fails we will still reset */
1343 ret = wait_for_completion_timeout(&mhi_chan->completion,
1344 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1345 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1346 dev_err(dev,
1347 "Failed to receive cmd completion, still resetting\n");
1348
1349 error_invalid_state:
1350 if (!mhi_chan->offload_ch) {
1351 mhi_reset_chan(mhi_cntrl, mhi_chan);
1352 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1353 }
1354 dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1355 mutex_unlock(&mhi_chan->mutex);
1356 }
1357
mhi_prepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1358 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1359 struct mhi_chan *mhi_chan)
1360 {
1361 int ret = 0;
1362 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1363
1364 dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1365
1366 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1367 dev_err(dev,
1368 "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1369 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1370 mhi_chan->name);
1371 return -ENOTCONN;
1372 }
1373
1374 mutex_lock(&mhi_chan->mutex);
1375
1376 /* If channel is not in disable state, do not allow it to start */
1377 if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1378 ret = -EIO;
1379 dev_dbg(dev, "channel: %d is not in disabled state\n",
1380 mhi_chan->chan);
1381 goto error_init_chan;
1382 }
1383
1384 /* Check of client manages channel context for offload channels */
1385 if (!mhi_chan->offload_ch) {
1386 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1387 if (ret)
1388 goto error_init_chan;
1389 }
1390
1391 reinit_completion(&mhi_chan->completion);
1392 read_lock_bh(&mhi_cntrl->pm_lock);
1393 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1394 read_unlock_bh(&mhi_cntrl->pm_lock);
1395 ret = -EIO;
1396 goto error_pm_state;
1397 }
1398
1399 mhi_cntrl->wake_toggle(mhi_cntrl);
1400 read_unlock_bh(&mhi_cntrl->pm_lock);
1401 mhi_cntrl->runtime_get(mhi_cntrl);
1402 mhi_cntrl->runtime_put(mhi_cntrl);
1403
1404 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1405 if (ret)
1406 goto error_pm_state;
1407
1408 ret = wait_for_completion_timeout(&mhi_chan->completion,
1409 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1410 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1411 ret = -EIO;
1412 goto error_pm_state;
1413 }
1414
1415 write_lock_irq(&mhi_chan->lock);
1416 mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1417 write_unlock_irq(&mhi_chan->lock);
1418
1419 /* Pre-allocate buffer for xfer ring */
1420 if (mhi_chan->pre_alloc) {
1421 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1422 &mhi_chan->tre_ring);
1423 size_t len = mhi_cntrl->buffer_len;
1424
1425 while (nr_el--) {
1426 void *buf;
1427 struct mhi_buf_info info = { };
1428 buf = kmalloc(len, GFP_KERNEL);
1429 if (!buf) {
1430 ret = -ENOMEM;
1431 goto error_pre_alloc;
1432 }
1433
1434 /* Prepare transfer descriptors */
1435 info.v_addr = buf;
1436 info.cb_buf = buf;
1437 info.len = len;
1438 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1439 if (ret) {
1440 kfree(buf);
1441 goto error_pre_alloc;
1442 }
1443 }
1444
1445 read_lock_bh(&mhi_cntrl->pm_lock);
1446 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1447 read_lock_irq(&mhi_chan->lock);
1448 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1449 read_unlock_irq(&mhi_chan->lock);
1450 }
1451 read_unlock_bh(&mhi_cntrl->pm_lock);
1452 }
1453
1454 mutex_unlock(&mhi_chan->mutex);
1455
1456 dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1457 mhi_chan->chan);
1458
1459 return 0;
1460
1461 error_pm_state:
1462 if (!mhi_chan->offload_ch)
1463 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1464
1465 error_init_chan:
1466 mutex_unlock(&mhi_chan->mutex);
1467
1468 return ret;
1469
1470 error_pre_alloc:
1471 mutex_unlock(&mhi_chan->mutex);
1472 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1473
1474 return ret;
1475 }
1476
mhi_mark_stale_events(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,struct mhi_event_ctxt * er_ctxt,int chan)1477 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1478 struct mhi_event *mhi_event,
1479 struct mhi_event_ctxt *er_ctxt,
1480 int chan)
1481
1482 {
1483 struct mhi_tre *dev_rp, *local_rp;
1484 struct mhi_ring *ev_ring;
1485 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1486 unsigned long flags;
1487 dma_addr_t ptr;
1488
1489 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1490
1491 ev_ring = &mhi_event->ring;
1492
1493 /* mark all stale events related to channel as STALE event */
1494 spin_lock_irqsave(&mhi_event->lock, flags);
1495
1496 ptr = er_ctxt->rp;
1497 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1498 dev_err(&mhi_cntrl->mhi_dev->dev,
1499 "Event ring rp points outside of the event ring\n");
1500 dev_rp = ev_ring->rp;
1501 } else {
1502 dev_rp = mhi_to_virtual(ev_ring, ptr);
1503 }
1504
1505 local_rp = ev_ring->rp;
1506 while (dev_rp != local_rp) {
1507 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1508 chan == MHI_TRE_GET_EV_CHID(local_rp))
1509 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1510 MHI_PKT_TYPE_STALE_EVENT);
1511 local_rp++;
1512 if (local_rp == (ev_ring->base + ev_ring->len))
1513 local_rp = ev_ring->base;
1514 }
1515
1516 dev_dbg(dev, "Finished marking events as stale events\n");
1517 spin_unlock_irqrestore(&mhi_event->lock, flags);
1518 }
1519
mhi_reset_data_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1520 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1521 struct mhi_chan *mhi_chan)
1522 {
1523 struct mhi_ring *buf_ring, *tre_ring;
1524 struct mhi_result result;
1525
1526 /* Reset any pending buffers */
1527 buf_ring = &mhi_chan->buf_ring;
1528 tre_ring = &mhi_chan->tre_ring;
1529 result.transaction_status = -ENOTCONN;
1530 result.bytes_xferd = 0;
1531 while (tre_ring->rp != tre_ring->wp) {
1532 struct mhi_buf_info *buf_info = buf_ring->rp;
1533
1534 if (mhi_chan->dir == DMA_TO_DEVICE)
1535 atomic_dec(&mhi_cntrl->pending_pkts);
1536
1537 if (!buf_info->pre_mapped)
1538 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1539
1540 mhi_del_ring_element(mhi_cntrl, buf_ring);
1541 mhi_del_ring_element(mhi_cntrl, tre_ring);
1542
1543 if (mhi_chan->pre_alloc) {
1544 kfree(buf_info->cb_buf);
1545 } else {
1546 result.buf_addr = buf_info->cb_buf;
1547 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1548 }
1549 }
1550 }
1551
mhi_reset_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1552 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1553 {
1554 struct mhi_event *mhi_event;
1555 struct mhi_event_ctxt *er_ctxt;
1556 int chan = mhi_chan->chan;
1557
1558 /* Nothing to reset, client doesn't queue buffers */
1559 if (mhi_chan->offload_ch)
1560 return;
1561
1562 read_lock_bh(&mhi_cntrl->pm_lock);
1563 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1564 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1565
1566 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1567
1568 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1569
1570 read_unlock_bh(&mhi_cntrl->pm_lock);
1571 }
1572
1573 /* Move channel to start state */
mhi_prepare_for_transfer(struct mhi_device * mhi_dev)1574 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1575 {
1576 int ret, dir;
1577 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1578 struct mhi_chan *mhi_chan;
1579
1580 for (dir = 0; dir < 2; dir++) {
1581 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1582 if (!mhi_chan)
1583 continue;
1584
1585 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1586 if (ret)
1587 goto error_open_chan;
1588 }
1589
1590 return 0;
1591
1592 error_open_chan:
1593 for (--dir; dir >= 0; dir--) {
1594 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1595 if (!mhi_chan)
1596 continue;
1597
1598 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1599 }
1600
1601 return ret;
1602 }
1603 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1604
mhi_unprepare_from_transfer(struct mhi_device * mhi_dev)1605 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1606 {
1607 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1608 struct mhi_chan *mhi_chan;
1609 int dir;
1610
1611 for (dir = 0; dir < 2; dir++) {
1612 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1613 if (!mhi_chan)
1614 continue;
1615
1616 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1617 }
1618 }
1619 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1620
mhi_poll(struct mhi_device * mhi_dev,u32 budget)1621 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1622 {
1623 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1624 struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1625 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1626 int ret;
1627
1628 spin_lock_bh(&mhi_event->lock);
1629 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1630 spin_unlock_bh(&mhi_event->lock);
1631
1632 return ret;
1633 }
1634 EXPORT_SYMBOL_GPL(mhi_poll);
1635