1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include "internal.h"
18
19 /*
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
23 *
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
28 * L1 or L0 states.
29 *
30 * Valid transitions:
31 * L0: DISABLE <--> POR
32 * POR <--> POR
33 * POR -> M0 -> M2 --> M0
34 * POR -> FW_DL_ERR
35 * FW_DL_ERR <--> FW_DL_ERR
36 * M0 <--> M0
37 * M0 -> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> DISABLE
41 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
42 * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
43 */
44 static struct mhi_pm_transitions const dev_state_transitions[] = {
45 /* L0 States */
46 {
47 MHI_PM_DISABLE,
48 MHI_PM_POR
49 },
50 {
51 MHI_PM_POR,
52 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
53 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
54 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
55 },
56 {
57 MHI_PM_M0,
58 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
59 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
60 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
61 },
62 {
63 MHI_PM_M2,
64 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
65 MHI_PM_LD_ERR_FATAL_DETECT
66 },
67 {
68 MHI_PM_M3_ENTER,
69 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
70 MHI_PM_LD_ERR_FATAL_DETECT
71 },
72 {
73 MHI_PM_M3,
74 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
75 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
76 },
77 {
78 MHI_PM_M3_EXIT,
79 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
80 MHI_PM_LD_ERR_FATAL_DETECT
81 },
82 {
83 MHI_PM_FW_DL_ERR,
84 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
85 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
86 },
87 /* L1 States */
88 {
89 MHI_PM_SYS_ERR_DETECT,
90 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
91 MHI_PM_LD_ERR_FATAL_DETECT
92 },
93 {
94 MHI_PM_SYS_ERR_PROCESS,
95 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
96 MHI_PM_LD_ERR_FATAL_DETECT
97 },
98 /* L2 States */
99 {
100 MHI_PM_SHUTDOWN_PROCESS,
101 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
102 },
103 /* L3 States */
104 {
105 MHI_PM_LD_ERR_FATAL_DETECT,
106 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
107 },
108 };
109
mhi_tryset_pm_state(struct mhi_controller * mhi_cntrl,enum mhi_pm_state state)110 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
111 enum mhi_pm_state state)
112 {
113 unsigned long cur_state = mhi_cntrl->pm_state;
114 int index = find_last_bit(&cur_state, 32);
115
116 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
117 return cur_state;
118
119 if (unlikely(dev_state_transitions[index].from_state != cur_state))
120 return cur_state;
121
122 if (unlikely(!(dev_state_transitions[index].to_states & state)))
123 return cur_state;
124
125 mhi_cntrl->pm_state = state;
126 return mhi_cntrl->pm_state;
127 }
128
mhi_set_mhi_state(struct mhi_controller * mhi_cntrl,enum mhi_state state)129 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
130 {
131 if (state == MHI_STATE_RESET) {
132 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
133 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
134 } else {
135 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
136 MHICTRL_MHISTATE_MASK,
137 MHICTRL_MHISTATE_SHIFT, state);
138 }
139 }
140
141 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
mhi_toggle_dev_wake_nop(struct mhi_controller * mhi_cntrl)142 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
143 {
144 }
145
mhi_toggle_dev_wake(struct mhi_controller * mhi_cntrl)146 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
147 {
148 mhi_cntrl->wake_get(mhi_cntrl, false);
149 mhi_cntrl->wake_put(mhi_cntrl, true);
150 }
151
152 /* Handle device ready state transition */
mhi_ready_state_transition(struct mhi_controller * mhi_cntrl)153 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
154 {
155 void __iomem *base = mhi_cntrl->regs;
156 struct mhi_event *mhi_event;
157 enum mhi_pm_state cur_state;
158 struct device *dev = &mhi_cntrl->mhi_dev->dev;
159 u32 reset = 1, ready = 0;
160 int ret, i;
161
162 /* Wait for RESET to be cleared and READY bit to be set by the device */
163 wait_event_timeout(mhi_cntrl->state_event,
164 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
165 mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
166 MHICTRL_RESET_MASK,
167 MHICTRL_RESET_SHIFT, &reset) ||
168 mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
169 MHISTATUS_READY_MASK,
170 MHISTATUS_READY_SHIFT, &ready) ||
171 (!reset && ready),
172 msecs_to_jiffies(mhi_cntrl->timeout_ms));
173
174 /* Check if device entered error state */
175 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
176 dev_err(dev, "Device link is not accessible\n");
177 return -EIO;
178 }
179
180 /* Timeout if device did not transition to ready state */
181 if (reset || !ready) {
182 dev_err(dev, "Device Ready timeout\n");
183 return -ETIMEDOUT;
184 }
185
186 dev_dbg(dev, "Device in READY State\n");
187 write_lock_irq(&mhi_cntrl->pm_lock);
188 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
189 mhi_cntrl->dev_state = MHI_STATE_READY;
190 write_unlock_irq(&mhi_cntrl->pm_lock);
191
192 if (cur_state != MHI_PM_POR) {
193 dev_err(dev, "Error moving to state %s from %s\n",
194 to_mhi_pm_state_str(MHI_PM_POR),
195 to_mhi_pm_state_str(cur_state));
196 return -EIO;
197 }
198
199 read_lock_bh(&mhi_cntrl->pm_lock);
200 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
201 dev_err(dev, "Device registers not accessible\n");
202 goto error_mmio;
203 }
204
205 /* Configure MMIO registers */
206 ret = mhi_init_mmio(mhi_cntrl);
207 if (ret) {
208 dev_err(dev, "Error configuring MMIO registers\n");
209 goto error_mmio;
210 }
211
212 /* Add elements to all SW event rings */
213 mhi_event = mhi_cntrl->mhi_event;
214 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
215 struct mhi_ring *ring = &mhi_event->ring;
216
217 /* Skip if this is an offload or HW event */
218 if (mhi_event->offload_ev || mhi_event->hw_ring)
219 continue;
220
221 ring->wp = ring->base + ring->len - ring->el_size;
222 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
223 /* Update all cores */
224 smp_wmb();
225
226 /* Ring the event ring db */
227 spin_lock_irq(&mhi_event->lock);
228 mhi_ring_er_db(mhi_event);
229 spin_unlock_irq(&mhi_event->lock);
230 }
231
232 /* Set MHI to M0 state */
233 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
234 read_unlock_bh(&mhi_cntrl->pm_lock);
235
236 return 0;
237
238 error_mmio:
239 read_unlock_bh(&mhi_cntrl->pm_lock);
240
241 return -EIO;
242 }
243
mhi_pm_m0_transition(struct mhi_controller * mhi_cntrl)244 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
245 {
246 enum mhi_pm_state cur_state;
247 struct mhi_chan *mhi_chan;
248 struct device *dev = &mhi_cntrl->mhi_dev->dev;
249 int i;
250
251 write_lock_irq(&mhi_cntrl->pm_lock);
252 mhi_cntrl->dev_state = MHI_STATE_M0;
253 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
254 write_unlock_irq(&mhi_cntrl->pm_lock);
255 if (unlikely(cur_state != MHI_PM_M0)) {
256 dev_err(dev, "Unable to transition to M0 state\n");
257 return -EIO;
258 }
259 mhi_cntrl->M0++;
260
261 /* Wake up the device */
262 read_lock_bh(&mhi_cntrl->pm_lock);
263 mhi_cntrl->wake_get(mhi_cntrl, true);
264
265 /* Ring all event rings and CMD ring only if we're in mission mode */
266 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
267 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
268 struct mhi_cmd *mhi_cmd =
269 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
270
271 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
272 if (mhi_event->offload_ev)
273 continue;
274
275 spin_lock_irq(&mhi_event->lock);
276 mhi_ring_er_db(mhi_event);
277 spin_unlock_irq(&mhi_event->lock);
278 }
279
280 /* Only ring primary cmd ring if ring is not empty */
281 spin_lock_irq(&mhi_cmd->lock);
282 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
283 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
284 spin_unlock_irq(&mhi_cmd->lock);
285 }
286
287 /* Ring channel DB registers */
288 mhi_chan = mhi_cntrl->mhi_chan;
289 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
290 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
291
292 if (mhi_chan->db_cfg.reset_req) {
293 write_lock_irq(&mhi_chan->lock);
294 mhi_chan->db_cfg.db_mode = true;
295 write_unlock_irq(&mhi_chan->lock);
296 }
297
298 read_lock_irq(&mhi_chan->lock);
299
300 /* Only ring DB if ring is not empty */
301 if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
302 mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
303 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
304 read_unlock_irq(&mhi_chan->lock);
305 }
306
307 mhi_cntrl->wake_put(mhi_cntrl, false);
308 read_unlock_bh(&mhi_cntrl->pm_lock);
309 wake_up_all(&mhi_cntrl->state_event);
310
311 return 0;
312 }
313
314 /*
315 * After receiving the MHI state change event from the device indicating the
316 * transition to M1 state, the host can transition the device to M2 state
317 * for keeping it in low power state.
318 */
mhi_pm_m1_transition(struct mhi_controller * mhi_cntrl)319 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
320 {
321 enum mhi_pm_state state;
322 struct device *dev = &mhi_cntrl->mhi_dev->dev;
323
324 write_lock_irq(&mhi_cntrl->pm_lock);
325 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
326 if (state == MHI_PM_M2) {
327 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
328 mhi_cntrl->dev_state = MHI_STATE_M2;
329
330 write_unlock_irq(&mhi_cntrl->pm_lock);
331
332 mhi_cntrl->M2++;
333 wake_up_all(&mhi_cntrl->state_event);
334
335 /* If there are any pending resources, exit M2 immediately */
336 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
337 atomic_read(&mhi_cntrl->dev_wake))) {
338 dev_dbg(dev,
339 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
340 atomic_read(&mhi_cntrl->pending_pkts),
341 atomic_read(&mhi_cntrl->dev_wake));
342 read_lock_bh(&mhi_cntrl->pm_lock);
343 mhi_cntrl->wake_get(mhi_cntrl, true);
344 mhi_cntrl->wake_put(mhi_cntrl, true);
345 read_unlock_bh(&mhi_cntrl->pm_lock);
346 } else {
347 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
348 }
349 } else {
350 write_unlock_irq(&mhi_cntrl->pm_lock);
351 }
352 }
353
354 /* MHI M3 completion handler */
mhi_pm_m3_transition(struct mhi_controller * mhi_cntrl)355 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
356 {
357 enum mhi_pm_state state;
358 struct device *dev = &mhi_cntrl->mhi_dev->dev;
359
360 write_lock_irq(&mhi_cntrl->pm_lock);
361 mhi_cntrl->dev_state = MHI_STATE_M3;
362 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
363 write_unlock_irq(&mhi_cntrl->pm_lock);
364 if (state != MHI_PM_M3) {
365 dev_err(dev, "Unable to transition to M3 state\n");
366 return -EIO;
367 }
368
369 mhi_cntrl->M3++;
370 wake_up_all(&mhi_cntrl->state_event);
371
372 return 0;
373 }
374
375 /* Handle device Mission Mode transition */
mhi_pm_mission_mode_transition(struct mhi_controller * mhi_cntrl)376 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
377 {
378 struct mhi_event *mhi_event;
379 struct device *dev = &mhi_cntrl->mhi_dev->dev;
380 enum mhi_ee_type current_ee = mhi_cntrl->ee;
381 int i, ret;
382
383 dev_dbg(dev, "Processing Mission Mode transition\n");
384
385 write_lock_irq(&mhi_cntrl->pm_lock);
386 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
387 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
388 write_unlock_irq(&mhi_cntrl->pm_lock);
389
390 if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
391 return -EIO;
392
393 wake_up_all(&mhi_cntrl->state_event);
394
395 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
396 mhi_destroy_device);
397 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
398
399 /* Force MHI to be in M0 state before continuing */
400 ret = __mhi_device_get_sync(mhi_cntrl);
401 if (ret)
402 return ret;
403
404 read_lock_bh(&mhi_cntrl->pm_lock);
405
406 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
407 ret = -EIO;
408 goto error_mission_mode;
409 }
410
411 /* Add elements to all HW event rings */
412 mhi_event = mhi_cntrl->mhi_event;
413 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
414 struct mhi_ring *ring = &mhi_event->ring;
415
416 if (mhi_event->offload_ev || !mhi_event->hw_ring)
417 continue;
418
419 ring->wp = ring->base + ring->len - ring->el_size;
420 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
421 /* Update to all cores */
422 smp_wmb();
423
424 spin_lock_irq(&mhi_event->lock);
425 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
426 mhi_ring_er_db(mhi_event);
427 spin_unlock_irq(&mhi_event->lock);
428 }
429
430 read_unlock_bh(&mhi_cntrl->pm_lock);
431
432 /*
433 * The MHI devices are only created when the client device switches its
434 * Execution Environment (EE) to either SBL or AMSS states
435 */
436 mhi_create_devices(mhi_cntrl);
437
438 read_lock_bh(&mhi_cntrl->pm_lock);
439
440 error_mission_mode:
441 mhi_cntrl->wake_put(mhi_cntrl, false);
442 read_unlock_bh(&mhi_cntrl->pm_lock);
443
444 return ret;
445 }
446
447 /* Handle SYS_ERR and Shutdown transitions */
mhi_pm_disable_transition(struct mhi_controller * mhi_cntrl,enum mhi_pm_state transition_state)448 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
449 enum mhi_pm_state transition_state)
450 {
451 enum mhi_pm_state cur_state, prev_state;
452 struct mhi_event *mhi_event;
453 struct mhi_cmd_ctxt *cmd_ctxt;
454 struct mhi_cmd *mhi_cmd;
455 struct mhi_event_ctxt *er_ctxt;
456 struct device *dev = &mhi_cntrl->mhi_dev->dev;
457 int ret, i;
458
459 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
460 to_mhi_pm_state_str(mhi_cntrl->pm_state),
461 to_mhi_pm_state_str(transition_state));
462
463 /* We must notify MHI control driver so it can clean up first */
464 if (transition_state == MHI_PM_SYS_ERR_PROCESS)
465 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
466
467 mutex_lock(&mhi_cntrl->pm_mutex);
468 write_lock_irq(&mhi_cntrl->pm_lock);
469 prev_state = mhi_cntrl->pm_state;
470 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
471 if (cur_state == transition_state) {
472 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
473 mhi_cntrl->dev_state = MHI_STATE_RESET;
474 }
475 write_unlock_irq(&mhi_cntrl->pm_lock);
476
477 /* Wake up threads waiting for state transition */
478 wake_up_all(&mhi_cntrl->state_event);
479
480 if (cur_state != transition_state) {
481 dev_err(dev, "Failed to transition to state: %s from: %s\n",
482 to_mhi_pm_state_str(transition_state),
483 to_mhi_pm_state_str(cur_state));
484 mutex_unlock(&mhi_cntrl->pm_mutex);
485 return;
486 }
487
488 /* Trigger MHI RESET so that the device will not access host memory */
489 if (MHI_REG_ACCESS_VALID(prev_state)) {
490 u32 in_reset = -1;
491 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
492
493 /* Skip MHI RESET if in RDDM state */
494 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
495 goto skip_mhi_reset;
496
497 dev_dbg(dev, "Triggering MHI Reset in device\n");
498 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
499
500 /* Wait for the reset bit to be cleared by the device */
501 ret = wait_event_timeout(mhi_cntrl->state_event,
502 mhi_read_reg_field(mhi_cntrl,
503 mhi_cntrl->regs,
504 MHICTRL,
505 MHICTRL_RESET_MASK,
506 MHICTRL_RESET_SHIFT,
507 &in_reset) ||
508 !in_reset, timeout);
509 if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
510 dev_err(dev, "Device failed to exit MHI Reset state\n");
511 mutex_unlock(&mhi_cntrl->pm_mutex);
512 return;
513 }
514
515 /*
516 * Device will clear BHI_INTVEC as a part of RESET processing,
517 * hence re-program it
518 */
519 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
520 }
521
522 skip_mhi_reset:
523 dev_dbg(dev,
524 "Waiting for all pending event ring processing to complete\n");
525 mhi_event = mhi_cntrl->mhi_event;
526 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
527 if (mhi_event->offload_ev)
528 continue;
529 tasklet_kill(&mhi_event->task);
530 }
531
532 /* Release lock and wait for all pending threads to complete */
533 mutex_unlock(&mhi_cntrl->pm_mutex);
534 dev_dbg(dev, "Waiting for all pending threads to complete\n");
535 wake_up_all(&mhi_cntrl->state_event);
536
537 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
538 device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
539
540 mutex_lock(&mhi_cntrl->pm_mutex);
541
542 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
543 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
544
545 /* Reset the ev rings and cmd rings */
546 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
547 mhi_cmd = mhi_cntrl->mhi_cmd;
548 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
549 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
550 struct mhi_ring *ring = &mhi_cmd->ring;
551
552 ring->rp = ring->base;
553 ring->wp = ring->base;
554 cmd_ctxt->rp = cmd_ctxt->rbase;
555 cmd_ctxt->wp = cmd_ctxt->rbase;
556 }
557
558 mhi_event = mhi_cntrl->mhi_event;
559 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
560 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
561 mhi_event++) {
562 struct mhi_ring *ring = &mhi_event->ring;
563
564 /* Skip offload events */
565 if (mhi_event->offload_ev)
566 continue;
567
568 ring->rp = ring->base;
569 ring->wp = ring->base;
570 er_ctxt->rp = er_ctxt->rbase;
571 er_ctxt->wp = er_ctxt->rbase;
572 }
573
574 if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
575 mhi_ready_state_transition(mhi_cntrl);
576 } else {
577 /* Move to disable state */
578 write_lock_irq(&mhi_cntrl->pm_lock);
579 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
580 write_unlock_irq(&mhi_cntrl->pm_lock);
581 if (unlikely(cur_state != MHI_PM_DISABLE))
582 dev_err(dev, "Error moving from PM state: %s to: %s\n",
583 to_mhi_pm_state_str(cur_state),
584 to_mhi_pm_state_str(MHI_PM_DISABLE));
585 }
586
587 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
588 to_mhi_pm_state_str(mhi_cntrl->pm_state),
589 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
590
591 mutex_unlock(&mhi_cntrl->pm_mutex);
592 }
593
594 /* Queue a new work item and schedule work */
mhi_queue_state_transition(struct mhi_controller * mhi_cntrl,enum dev_st_transition state)595 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
596 enum dev_st_transition state)
597 {
598 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
599 unsigned long flags;
600
601 if (!item)
602 return -ENOMEM;
603
604 item->state = state;
605 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
606 list_add_tail(&item->node, &mhi_cntrl->transition_list);
607 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
608
609 schedule_work(&mhi_cntrl->st_worker);
610
611 return 0;
612 }
613
614 /* SYS_ERR worker */
mhi_pm_sys_err_handler(struct mhi_controller * mhi_cntrl)615 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
616 {
617 struct device *dev = &mhi_cntrl->mhi_dev->dev;
618
619 /* skip if controller supports RDDM */
620 if (mhi_cntrl->rddm_image) {
621 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
622 return;
623 }
624
625 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
626 }
627
628 /* Device State Transition worker */
mhi_pm_st_worker(struct work_struct * work)629 void mhi_pm_st_worker(struct work_struct *work)
630 {
631 struct state_transition *itr, *tmp;
632 LIST_HEAD(head);
633 struct mhi_controller *mhi_cntrl = container_of(work,
634 struct mhi_controller,
635 st_worker);
636 struct device *dev = &mhi_cntrl->mhi_dev->dev;
637
638 spin_lock_irq(&mhi_cntrl->transition_lock);
639 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
640 spin_unlock_irq(&mhi_cntrl->transition_lock);
641
642 list_for_each_entry_safe(itr, tmp, &head, node) {
643 list_del(&itr->node);
644 dev_dbg(dev, "Handling state transition: %s\n",
645 TO_DEV_STATE_TRANS_STR(itr->state));
646
647 switch (itr->state) {
648 case DEV_ST_TRANSITION_PBL:
649 write_lock_irq(&mhi_cntrl->pm_lock);
650 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
651 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
652 write_unlock_irq(&mhi_cntrl->pm_lock);
653 if (MHI_IN_PBL(mhi_cntrl->ee))
654 mhi_fw_load_handler(mhi_cntrl);
655 break;
656 case DEV_ST_TRANSITION_SBL:
657 write_lock_irq(&mhi_cntrl->pm_lock);
658 mhi_cntrl->ee = MHI_EE_SBL;
659 write_unlock_irq(&mhi_cntrl->pm_lock);
660 /*
661 * The MHI devices are only created when the client
662 * device switches its Execution Environment (EE) to
663 * either SBL or AMSS states
664 */
665 mhi_create_devices(mhi_cntrl);
666 break;
667 case DEV_ST_TRANSITION_MISSION_MODE:
668 mhi_pm_mission_mode_transition(mhi_cntrl);
669 break;
670 case DEV_ST_TRANSITION_READY:
671 mhi_ready_state_transition(mhi_cntrl);
672 break;
673 case DEV_ST_TRANSITION_SYS_ERR:
674 mhi_pm_disable_transition
675 (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
676 break;
677 case DEV_ST_TRANSITION_DISABLE:
678 mhi_pm_disable_transition
679 (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
680 break;
681 default:
682 break;
683 }
684 kfree(itr);
685 }
686 }
687
mhi_pm_suspend(struct mhi_controller * mhi_cntrl)688 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
689 {
690 struct mhi_chan *itr, *tmp;
691 struct device *dev = &mhi_cntrl->mhi_dev->dev;
692 enum mhi_pm_state new_state;
693 int ret;
694
695 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
696 return -EINVAL;
697
698 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
699 return -EIO;
700
701 /* Return busy if there are any pending resources */
702 if (atomic_read(&mhi_cntrl->dev_wake) ||
703 atomic_read(&mhi_cntrl->pending_pkts))
704 return -EBUSY;
705
706 /* Take MHI out of M2 state */
707 read_lock_bh(&mhi_cntrl->pm_lock);
708 mhi_cntrl->wake_get(mhi_cntrl, false);
709 read_unlock_bh(&mhi_cntrl->pm_lock);
710
711 ret = wait_event_timeout(mhi_cntrl->state_event,
712 mhi_cntrl->dev_state == MHI_STATE_M0 ||
713 mhi_cntrl->dev_state == MHI_STATE_M1 ||
714 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
715 msecs_to_jiffies(mhi_cntrl->timeout_ms));
716
717 read_lock_bh(&mhi_cntrl->pm_lock);
718 mhi_cntrl->wake_put(mhi_cntrl, false);
719 read_unlock_bh(&mhi_cntrl->pm_lock);
720
721 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
722 dev_err(dev,
723 "Could not enter M0/M1 state");
724 return -EIO;
725 }
726
727 write_lock_irq(&mhi_cntrl->pm_lock);
728
729 if (atomic_read(&mhi_cntrl->dev_wake) ||
730 atomic_read(&mhi_cntrl->pending_pkts)) {
731 write_unlock_irq(&mhi_cntrl->pm_lock);
732 return -EBUSY;
733 }
734
735 dev_info(dev, "Allowing M3 transition\n");
736 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
737 if (new_state != MHI_PM_M3_ENTER) {
738 write_unlock_irq(&mhi_cntrl->pm_lock);
739 dev_err(dev,
740 "Error setting to PM state: %s from: %s\n",
741 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
742 to_mhi_pm_state_str(mhi_cntrl->pm_state));
743 return -EIO;
744 }
745
746 /* Set MHI to M3 and wait for completion */
747 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
748 write_unlock_irq(&mhi_cntrl->pm_lock);
749 dev_info(dev, "Wait for M3 completion\n");
750
751 ret = wait_event_timeout(mhi_cntrl->state_event,
752 mhi_cntrl->dev_state == MHI_STATE_M3 ||
753 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
754 msecs_to_jiffies(mhi_cntrl->timeout_ms));
755
756 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
757 dev_err(dev,
758 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
759 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
760 to_mhi_pm_state_str(mhi_cntrl->pm_state));
761 return -EIO;
762 }
763
764 /* Notify clients about entering LPM */
765 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
766 mutex_lock(&itr->mutex);
767 if (itr->mhi_dev)
768 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
769 mutex_unlock(&itr->mutex);
770 }
771
772 return 0;
773 }
774 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
775
mhi_pm_resume(struct mhi_controller * mhi_cntrl)776 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
777 {
778 struct mhi_chan *itr, *tmp;
779 struct device *dev = &mhi_cntrl->mhi_dev->dev;
780 enum mhi_pm_state cur_state;
781 int ret;
782
783 dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
784 to_mhi_pm_state_str(mhi_cntrl->pm_state),
785 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
786
787 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
788 return 0;
789
790 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
791 return -EIO;
792
793 /* Notify clients about exiting LPM */
794 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
795 mutex_lock(&itr->mutex);
796 if (itr->mhi_dev)
797 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
798 mutex_unlock(&itr->mutex);
799 }
800
801 write_lock_irq(&mhi_cntrl->pm_lock);
802 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
803 if (cur_state != MHI_PM_M3_EXIT) {
804 write_unlock_irq(&mhi_cntrl->pm_lock);
805 dev_info(dev,
806 "Error setting to PM state: %s from: %s\n",
807 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
808 to_mhi_pm_state_str(mhi_cntrl->pm_state));
809 return -EIO;
810 }
811
812 /* Set MHI to M0 and wait for completion */
813 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
814 write_unlock_irq(&mhi_cntrl->pm_lock);
815
816 ret = wait_event_timeout(mhi_cntrl->state_event,
817 mhi_cntrl->dev_state == MHI_STATE_M0 ||
818 mhi_cntrl->dev_state == MHI_STATE_M2 ||
819 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
820 msecs_to_jiffies(mhi_cntrl->timeout_ms));
821
822 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
823 dev_err(dev,
824 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
825 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
826 to_mhi_pm_state_str(mhi_cntrl->pm_state));
827 return -EIO;
828 }
829
830 return 0;
831 }
832 EXPORT_SYMBOL_GPL(mhi_pm_resume);
833
__mhi_device_get_sync(struct mhi_controller * mhi_cntrl)834 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
835 {
836 int ret;
837
838 /* Wake up the device */
839 read_lock_bh(&mhi_cntrl->pm_lock);
840 mhi_cntrl->wake_get(mhi_cntrl, true);
841 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
842 mhi_trigger_resume(mhi_cntrl);
843 read_unlock_bh(&mhi_cntrl->pm_lock);
844
845 ret = wait_event_timeout(mhi_cntrl->state_event,
846 mhi_cntrl->pm_state == MHI_PM_M0 ||
847 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
848 msecs_to_jiffies(mhi_cntrl->timeout_ms));
849
850 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
851 read_lock_bh(&mhi_cntrl->pm_lock);
852 mhi_cntrl->wake_put(mhi_cntrl, false);
853 read_unlock_bh(&mhi_cntrl->pm_lock);
854 return -EIO;
855 }
856
857 return 0;
858 }
859
860 /* Assert device wake db */
mhi_assert_dev_wake(struct mhi_controller * mhi_cntrl,bool force)861 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
862 {
863 unsigned long flags;
864
865 /*
866 * If force flag is set, then increment the wake count value and
867 * ring wake db
868 */
869 if (unlikely(force)) {
870 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
871 atomic_inc(&mhi_cntrl->dev_wake);
872 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
873 !mhi_cntrl->wake_set) {
874 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
875 mhi_cntrl->wake_set = true;
876 }
877 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
878 } else {
879 /*
880 * If resources are already requested, then just increment
881 * the wake count value and return
882 */
883 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
884 return;
885
886 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
887 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
888 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
889 !mhi_cntrl->wake_set) {
890 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
891 mhi_cntrl->wake_set = true;
892 }
893 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
894 }
895 }
896
897 /* De-assert device wake db */
mhi_deassert_dev_wake(struct mhi_controller * mhi_cntrl,bool override)898 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
899 bool override)
900 {
901 unsigned long flags;
902
903 /*
904 * Only continue if there is a single resource, else just decrement
905 * and return
906 */
907 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
908 return;
909
910 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
911 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
912 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
913 mhi_cntrl->wake_set) {
914 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
915 mhi_cntrl->wake_set = false;
916 }
917 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
918 }
919
mhi_async_power_up(struct mhi_controller * mhi_cntrl)920 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
921 {
922 enum mhi_state state;
923 enum mhi_ee_type current_ee;
924 enum dev_st_transition next_state;
925 struct device *dev = &mhi_cntrl->mhi_dev->dev;
926 u32 val;
927 int ret;
928
929 dev_info(dev, "Requested to power ON\n");
930
931 if (mhi_cntrl->nr_irqs < 1)
932 return -EINVAL;
933
934 /* Supply default wake routines if not provided by controller driver */
935 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
936 !mhi_cntrl->wake_toggle) {
937 mhi_cntrl->wake_get = mhi_assert_dev_wake;
938 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
939 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
940 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
941 }
942
943 mutex_lock(&mhi_cntrl->pm_mutex);
944 mhi_cntrl->pm_state = MHI_PM_DISABLE;
945
946 if (!mhi_cntrl->pre_init) {
947 /* Setup device context */
948 ret = mhi_init_dev_ctxt(mhi_cntrl);
949 if (ret)
950 goto error_dev_ctxt;
951 }
952
953 ret = mhi_init_irq_setup(mhi_cntrl);
954 if (ret)
955 goto error_setup_irq;
956
957 /* Setup BHI offset & INTVEC */
958 write_lock_irq(&mhi_cntrl->pm_lock);
959 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
960 if (ret) {
961 write_unlock_irq(&mhi_cntrl->pm_lock);
962 goto error_bhi_offset;
963 }
964
965 mhi_cntrl->bhi = mhi_cntrl->regs + val;
966
967 /* Setup BHIE offset */
968 if (mhi_cntrl->fbc_download) {
969 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
970 if (ret) {
971 write_unlock_irq(&mhi_cntrl->pm_lock);
972 dev_err(dev, "Error reading BHIE offset\n");
973 goto error_bhi_offset;
974 }
975
976 mhi_cntrl->bhie = mhi_cntrl->regs + val;
977 }
978
979 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
980 mhi_cntrl->pm_state = MHI_PM_POR;
981 mhi_cntrl->ee = MHI_EE_MAX;
982 current_ee = mhi_get_exec_env(mhi_cntrl);
983 write_unlock_irq(&mhi_cntrl->pm_lock);
984
985 /* Confirm that the device is in valid exec env */
986 if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
987 dev_err(dev, "Not a valid EE for power on\n");
988 ret = -EIO;
989 goto error_bhi_offset;
990 }
991
992 state = mhi_get_mhi_state(mhi_cntrl);
993 if (state == MHI_STATE_SYS_ERR) {
994 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
995 ret = wait_event_timeout(mhi_cntrl->state_event,
996 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
997 mhi_read_reg_field(mhi_cntrl,
998 mhi_cntrl->regs,
999 MHICTRL,
1000 MHICTRL_RESET_MASK,
1001 MHICTRL_RESET_SHIFT,
1002 &val) ||
1003 !val,
1004 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1005 if (!ret) {
1006 ret = -EIO;
1007 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1008 goto error_bhi_offset;
1009 }
1010
1011 /*
1012 * device cleares INTVEC as part of RESET processing,
1013 * re-program it
1014 */
1015 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1016 }
1017
1018 /* Transition to next state */
1019 next_state = MHI_IN_PBL(current_ee) ?
1020 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1021
1022 mhi_queue_state_transition(mhi_cntrl, next_state);
1023
1024 mutex_unlock(&mhi_cntrl->pm_mutex);
1025
1026 dev_info(dev, "Power on setup success\n");
1027
1028 return 0;
1029
1030 error_bhi_offset:
1031 mhi_deinit_free_irq(mhi_cntrl);
1032
1033 error_setup_irq:
1034 if (!mhi_cntrl->pre_init)
1035 mhi_deinit_dev_ctxt(mhi_cntrl);
1036
1037 error_dev_ctxt:
1038 mutex_unlock(&mhi_cntrl->pm_mutex);
1039
1040 return ret;
1041 }
1042 EXPORT_SYMBOL_GPL(mhi_async_power_up);
1043
mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful)1044 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1045 {
1046 enum mhi_pm_state cur_state;
1047 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1048
1049 /* If it's not a graceful shutdown, force MHI to linkdown state */
1050 if (!graceful) {
1051 mutex_lock(&mhi_cntrl->pm_mutex);
1052 write_lock_irq(&mhi_cntrl->pm_lock);
1053 cur_state = mhi_tryset_pm_state(mhi_cntrl,
1054 MHI_PM_LD_ERR_FATAL_DETECT);
1055 write_unlock_irq(&mhi_cntrl->pm_lock);
1056 mutex_unlock(&mhi_cntrl->pm_mutex);
1057 if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
1058 dev_dbg(dev, "Failed to move to state: %s from: %s\n",
1059 to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
1060 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1061 }
1062
1063 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1064
1065 /* Wait for shutdown to complete */
1066 flush_work(&mhi_cntrl->st_worker);
1067
1068 mhi_deinit_free_irq(mhi_cntrl);
1069
1070 if (!mhi_cntrl->pre_init) {
1071 /* Free all allocated resources */
1072 if (mhi_cntrl->fbc_image) {
1073 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1074 mhi_cntrl->fbc_image = NULL;
1075 }
1076 mhi_deinit_dev_ctxt(mhi_cntrl);
1077 }
1078 }
1079 EXPORT_SYMBOL_GPL(mhi_power_down);
1080
mhi_sync_power_up(struct mhi_controller * mhi_cntrl)1081 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1082 {
1083 int ret = mhi_async_power_up(mhi_cntrl);
1084
1085 if (ret)
1086 return ret;
1087
1088 wait_event_timeout(mhi_cntrl->state_event,
1089 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1090 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1091 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1092
1093 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1094 if (ret)
1095 mhi_power_down(mhi_cntrl, false);
1096
1097 return ret;
1098 }
1099 EXPORT_SYMBOL(mhi_sync_power_up);
1100
mhi_force_rddm_mode(struct mhi_controller * mhi_cntrl)1101 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1102 {
1103 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1104 int ret;
1105
1106 /* Check if device is already in RDDM */
1107 if (mhi_cntrl->ee == MHI_EE_RDDM)
1108 return 0;
1109
1110 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1111 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1112
1113 /* Wait for RDDM event */
1114 ret = wait_event_timeout(mhi_cntrl->state_event,
1115 mhi_cntrl->ee == MHI_EE_RDDM,
1116 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1117 ret = ret ? 0 : -EIO;
1118
1119 return ret;
1120 }
1121 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1122
mhi_device_get(struct mhi_device * mhi_dev)1123 void mhi_device_get(struct mhi_device *mhi_dev)
1124 {
1125 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1126
1127 mhi_dev->dev_wake++;
1128 read_lock_bh(&mhi_cntrl->pm_lock);
1129 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1130 mhi_trigger_resume(mhi_cntrl);
1131
1132 mhi_cntrl->wake_get(mhi_cntrl, true);
1133 read_unlock_bh(&mhi_cntrl->pm_lock);
1134 }
1135 EXPORT_SYMBOL_GPL(mhi_device_get);
1136
mhi_device_get_sync(struct mhi_device * mhi_dev)1137 int mhi_device_get_sync(struct mhi_device *mhi_dev)
1138 {
1139 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1140 int ret;
1141
1142 ret = __mhi_device_get_sync(mhi_cntrl);
1143 if (!ret)
1144 mhi_dev->dev_wake++;
1145
1146 return ret;
1147 }
1148 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1149
mhi_device_put(struct mhi_device * mhi_dev)1150 void mhi_device_put(struct mhi_device *mhi_dev)
1151 {
1152 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1153
1154 mhi_dev->dev_wake--;
1155 read_lock_bh(&mhi_cntrl->pm_lock);
1156 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1157 mhi_trigger_resume(mhi_cntrl);
1158
1159 mhi_cntrl->wake_put(mhi_cntrl, false);
1160 read_unlock_bh(&mhi_cntrl->pm_lock);
1161 }
1162 EXPORT_SYMBOL_GPL(mhi_device_put);
1163