1
2
3
4
5
6
7#include <linux/delay.h>
8#include <linux/device.h>
9#include <linux/dma-direction.h>
10#include <linux/dma-mapping.h>
11#include <linux/interrupt.h>
12#include <linux/list.h>
13#include <linux/mhi.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/wait.h>
17#include "internal.h"
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45static struct mhi_pm_transitions const dev_state_transitions[] = {
46
47 {
48 MHI_PM_DISABLE,
49 MHI_PM_POR
50 },
51 {
52 MHI_PM_POR,
53 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
54 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
55 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
56 },
57 {
58 MHI_PM_M0,
59 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
60 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
61 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
62 },
63 {
64 MHI_PM_M2,
65 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
66 MHI_PM_LD_ERR_FATAL_DETECT
67 },
68 {
69 MHI_PM_M3_ENTER,
70 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
71 MHI_PM_LD_ERR_FATAL_DETECT
72 },
73 {
74 MHI_PM_M3,
75 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
76 MHI_PM_LD_ERR_FATAL_DETECT
77 },
78 {
79 MHI_PM_M3_EXIT,
80 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
81 MHI_PM_LD_ERR_FATAL_DETECT
82 },
83 {
84 MHI_PM_FW_DL_ERR,
85 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
86 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
87 },
88
89 {
90 MHI_PM_SYS_ERR_DETECT,
91 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
92 MHI_PM_LD_ERR_FATAL_DETECT
93 },
94 {
95 MHI_PM_SYS_ERR_PROCESS,
96 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
97 MHI_PM_LD_ERR_FATAL_DETECT
98 },
99
100 {
101 MHI_PM_SHUTDOWN_PROCESS,
102 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
103 },
104
105 {
106 MHI_PM_LD_ERR_FATAL_DETECT,
107 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
108 },
109};
110
111enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
112 enum mhi_pm_state state)
113{
114 unsigned long cur_state = mhi_cntrl->pm_state;
115 int index = find_last_bit(&cur_state, 32);
116
117 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
118 return cur_state;
119
120 if (unlikely(dev_state_transitions[index].from_state != cur_state))
121 return cur_state;
122
123 if (unlikely(!(dev_state_transitions[index].to_states & state)))
124 return cur_state;
125
126 mhi_cntrl->pm_state = state;
127 return mhi_cntrl->pm_state;
128}
129
130void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
131{
132 if (state == MHI_STATE_RESET) {
133 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
134 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
135 } else {
136 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
137 MHICTRL_MHISTATE_MASK,
138 MHICTRL_MHISTATE_SHIFT, state);
139 }
140}
141
142
143static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
144{
145}
146
147static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
148{
149 mhi_cntrl->wake_get(mhi_cntrl, false);
150 mhi_cntrl->wake_put(mhi_cntrl, true);
151}
152
153
154int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
155{
156 void __iomem *base = mhi_cntrl->regs;
157 struct mhi_event *mhi_event;
158 enum mhi_pm_state cur_state;
159 struct device *dev = &mhi_cntrl->mhi_dev->dev;
160 u32 reset = 1, ready = 0;
161 int ret, i;
162
163
164 wait_event_timeout(mhi_cntrl->state_event,
165 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
166 mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
167 MHICTRL_RESET_MASK,
168 MHICTRL_RESET_SHIFT, &reset) ||
169 mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
170 MHISTATUS_READY_MASK,
171 MHISTATUS_READY_SHIFT, &ready) ||
172 (!reset && ready),
173 msecs_to_jiffies(mhi_cntrl->timeout_ms));
174
175
176 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
177 dev_err(dev, "Device link is not accessible\n");
178 return -EIO;
179 }
180
181
182 if (reset || !ready) {
183 dev_err(dev, "Device Ready timeout\n");
184 return -ETIMEDOUT;
185 }
186
187 dev_dbg(dev, "Device in READY State\n");
188 write_lock_irq(&mhi_cntrl->pm_lock);
189 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
190 mhi_cntrl->dev_state = MHI_STATE_READY;
191 write_unlock_irq(&mhi_cntrl->pm_lock);
192
193 if (cur_state != MHI_PM_POR) {
194 dev_err(dev, "Error moving to state %s from %s\n",
195 to_mhi_pm_state_str(MHI_PM_POR),
196 to_mhi_pm_state_str(cur_state));
197 return -EIO;
198 }
199
200 read_lock_bh(&mhi_cntrl->pm_lock);
201 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
202 dev_err(dev, "Device registers not accessible\n");
203 goto error_mmio;
204 }
205
206
207 ret = mhi_init_mmio(mhi_cntrl);
208 if (ret) {
209 dev_err(dev, "Error configuring MMIO registers\n");
210 goto error_mmio;
211 }
212
213
214 mhi_event = mhi_cntrl->mhi_event;
215 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
216 struct mhi_ring *ring = &mhi_event->ring;
217
218
219 if (mhi_event->offload_ev || mhi_event->hw_ring)
220 continue;
221
222 ring->wp = ring->base + ring->len - ring->el_size;
223 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
224
225 smp_wmb();
226
227
228 spin_lock_irq(&mhi_event->lock);
229 mhi_ring_er_db(mhi_event);
230 spin_unlock_irq(&mhi_event->lock);
231 }
232
233
234 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
235 read_unlock_bh(&mhi_cntrl->pm_lock);
236
237 return 0;
238
239error_mmio:
240 read_unlock_bh(&mhi_cntrl->pm_lock);
241
242 return -EIO;
243}
244
245int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
246{
247 enum mhi_pm_state cur_state;
248 struct mhi_chan *mhi_chan;
249 struct device *dev = &mhi_cntrl->mhi_dev->dev;
250 int i;
251
252 write_lock_irq(&mhi_cntrl->pm_lock);
253 mhi_cntrl->dev_state = MHI_STATE_M0;
254 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
255 write_unlock_irq(&mhi_cntrl->pm_lock);
256 if (unlikely(cur_state != MHI_PM_M0)) {
257 dev_err(dev, "Unable to transition to M0 state\n");
258 return -EIO;
259 }
260 mhi_cntrl->M0++;
261
262
263 read_lock_bh(&mhi_cntrl->pm_lock);
264 mhi_cntrl->wake_get(mhi_cntrl, true);
265
266
267 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
268 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
269 struct mhi_cmd *mhi_cmd =
270 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
271
272 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
273 if (mhi_event->offload_ev)
274 continue;
275
276 spin_lock_irq(&mhi_event->lock);
277 mhi_ring_er_db(mhi_event);
278 spin_unlock_irq(&mhi_event->lock);
279 }
280
281
282 spin_lock_irq(&mhi_cmd->lock);
283 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
284 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
285 spin_unlock_irq(&mhi_cmd->lock);
286 }
287
288
289 mhi_chan = mhi_cntrl->mhi_chan;
290 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
291 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
292
293 if (mhi_chan->db_cfg.reset_req) {
294 write_lock_irq(&mhi_chan->lock);
295 mhi_chan->db_cfg.db_mode = true;
296 write_unlock_irq(&mhi_chan->lock);
297 }
298
299 read_lock_irq(&mhi_chan->lock);
300
301
302 if (tre_ring->base && tre_ring->wp != tre_ring->rp)
303 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
304 read_unlock_irq(&mhi_chan->lock);
305 }
306
307 mhi_cntrl->wake_put(mhi_cntrl, false);
308 read_unlock_bh(&mhi_cntrl->pm_lock);
309 wake_up_all(&mhi_cntrl->state_event);
310
311 return 0;
312}
313
314
315
316
317
318
319void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
320{
321 enum mhi_pm_state state;
322 struct device *dev = &mhi_cntrl->mhi_dev->dev;
323
324 write_lock_irq(&mhi_cntrl->pm_lock);
325 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
326 if (state == MHI_PM_M2) {
327 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
328 mhi_cntrl->dev_state = MHI_STATE_M2;
329
330 write_unlock_irq(&mhi_cntrl->pm_lock);
331
332 mhi_cntrl->M2++;
333 wake_up_all(&mhi_cntrl->state_event);
334
335
336 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
337 atomic_read(&mhi_cntrl->dev_wake))) {
338 dev_dbg(dev,
339 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
340 atomic_read(&mhi_cntrl->pending_pkts),
341 atomic_read(&mhi_cntrl->dev_wake));
342 read_lock_bh(&mhi_cntrl->pm_lock);
343 mhi_cntrl->wake_get(mhi_cntrl, true);
344 mhi_cntrl->wake_put(mhi_cntrl, true);
345 read_unlock_bh(&mhi_cntrl->pm_lock);
346 } else {
347 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
348 }
349 } else {
350 write_unlock_irq(&mhi_cntrl->pm_lock);
351 }
352}
353
354
355int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
356{
357 enum mhi_pm_state state;
358 struct device *dev = &mhi_cntrl->mhi_dev->dev;
359
360 write_lock_irq(&mhi_cntrl->pm_lock);
361 mhi_cntrl->dev_state = MHI_STATE_M3;
362 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
363 write_unlock_irq(&mhi_cntrl->pm_lock);
364 if (state != MHI_PM_M3) {
365 dev_err(dev, "Unable to transition to M3 state\n");
366 return -EIO;
367 }
368
369 mhi_cntrl->M3++;
370 wake_up_all(&mhi_cntrl->state_event);
371
372 return 0;
373}
374
375
376static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
377{
378 struct mhi_event *mhi_event;
379 struct device *dev = &mhi_cntrl->mhi_dev->dev;
380 int i, ret;
381
382 dev_dbg(dev, "Processing Mission Mode transition\n");
383
384 write_lock_irq(&mhi_cntrl->pm_lock);
385 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
386 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
387
388 if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
389 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
390 write_unlock_irq(&mhi_cntrl->pm_lock);
391 wake_up_all(&mhi_cntrl->state_event);
392 return -EIO;
393 }
394 write_unlock_irq(&mhi_cntrl->pm_lock);
395
396 wake_up_all(&mhi_cntrl->state_event);
397
398 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
399
400
401 ret = __mhi_device_get_sync(mhi_cntrl);
402 if (ret)
403 return ret;
404
405 read_lock_bh(&mhi_cntrl->pm_lock);
406
407 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
408 ret = -EIO;
409 goto error_mission_mode;
410 }
411
412
413 mhi_event = mhi_cntrl->mhi_event;
414 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
415 struct mhi_ring *ring = &mhi_event->ring;
416
417 if (mhi_event->offload_ev || !mhi_event->hw_ring)
418 continue;
419
420 ring->wp = ring->base + ring->len - ring->el_size;
421 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
422
423 smp_wmb();
424
425 spin_lock_irq(&mhi_event->lock);
426 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
427 mhi_ring_er_db(mhi_event);
428 spin_unlock_irq(&mhi_event->lock);
429 }
430
431 read_unlock_bh(&mhi_cntrl->pm_lock);
432
433
434
435
436
437 mhi_create_devices(mhi_cntrl);
438
439 read_lock_bh(&mhi_cntrl->pm_lock);
440
441error_mission_mode:
442 mhi_cntrl->wake_put(mhi_cntrl, false);
443 read_unlock_bh(&mhi_cntrl->pm_lock);
444
445 return ret;
446}
447
448
449static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
450{
451 enum mhi_pm_state cur_state;
452 struct mhi_event *mhi_event;
453 struct mhi_cmd_ctxt *cmd_ctxt;
454 struct mhi_cmd *mhi_cmd;
455 struct mhi_event_ctxt *er_ctxt;
456 struct device *dev = &mhi_cntrl->mhi_dev->dev;
457 int ret, i;
458
459 dev_dbg(dev, "Processing disable transition with PM state: %s\n",
460 to_mhi_pm_state_str(mhi_cntrl->pm_state));
461
462 mutex_lock(&mhi_cntrl->pm_mutex);
463
464
465 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
466 u32 in_reset = -1;
467 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
468
469 dev_dbg(dev, "Triggering MHI Reset in device\n");
470 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
471
472
473 ret = wait_event_timeout(mhi_cntrl->state_event,
474 mhi_read_reg_field(mhi_cntrl,
475 mhi_cntrl->regs,
476 MHICTRL,
477 MHICTRL_RESET_MASK,
478 MHICTRL_RESET_SHIFT,
479 &in_reset) ||
480 !in_reset, timeout);
481 if (!ret || in_reset)
482 dev_err(dev, "Device failed to exit MHI Reset state\n");
483
484
485
486
487
488 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
489 }
490
491 dev_dbg(dev,
492 "Waiting for all pending event ring processing to complete\n");
493 mhi_event = mhi_cntrl->mhi_event;
494 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
495 if (mhi_event->offload_ev)
496 continue;
497 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
498 tasklet_kill(&mhi_event->task);
499 }
500
501
502 mutex_unlock(&mhi_cntrl->pm_mutex);
503 dev_dbg(dev, "Waiting for all pending threads to complete\n");
504 wake_up_all(&mhi_cntrl->state_event);
505
506 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
507 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
508
509 mutex_lock(&mhi_cntrl->pm_mutex);
510
511 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
512 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
513
514
515 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
516 mhi_cmd = mhi_cntrl->mhi_cmd;
517 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
518 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
519 struct mhi_ring *ring = &mhi_cmd->ring;
520
521 ring->rp = ring->base;
522 ring->wp = ring->base;
523 cmd_ctxt->rp = cmd_ctxt->rbase;
524 cmd_ctxt->wp = cmd_ctxt->rbase;
525 }
526
527 mhi_event = mhi_cntrl->mhi_event;
528 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
529 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
530 mhi_event++) {
531 struct mhi_ring *ring = &mhi_event->ring;
532
533
534 if (mhi_event->offload_ev)
535 continue;
536
537 ring->rp = ring->base;
538 ring->wp = ring->base;
539 er_ctxt->rp = er_ctxt->rbase;
540 er_ctxt->wp = er_ctxt->rbase;
541 }
542
543
544 write_lock_irq(&mhi_cntrl->pm_lock);
545 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
546 write_unlock_irq(&mhi_cntrl->pm_lock);
547 if (unlikely(cur_state != MHI_PM_DISABLE))
548 dev_err(dev, "Error moving from PM state: %s to: %s\n",
549 to_mhi_pm_state_str(cur_state),
550 to_mhi_pm_state_str(MHI_PM_DISABLE));
551
552 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
553 to_mhi_pm_state_str(mhi_cntrl->pm_state),
554 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
555
556 mutex_unlock(&mhi_cntrl->pm_mutex);
557}
558
559
560static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
561{
562 enum mhi_pm_state cur_state, prev_state;
563 struct mhi_event *mhi_event;
564 struct mhi_cmd_ctxt *cmd_ctxt;
565 struct mhi_cmd *mhi_cmd;
566 struct mhi_event_ctxt *er_ctxt;
567 struct device *dev = &mhi_cntrl->mhi_dev->dev;
568 int ret, i;
569
570 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
571 to_mhi_pm_state_str(mhi_cntrl->pm_state),
572 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
573
574
575 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
576
577 mutex_lock(&mhi_cntrl->pm_mutex);
578 write_lock_irq(&mhi_cntrl->pm_lock);
579 prev_state = mhi_cntrl->pm_state;
580 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
581 write_unlock_irq(&mhi_cntrl->pm_lock);
582
583 if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
584 dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
585 to_mhi_pm_state_str(cur_state),
586 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
587 goto exit_sys_error_transition;
588 }
589
590 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
591 mhi_cntrl->dev_state = MHI_STATE_RESET;
592
593
594 wake_up_all(&mhi_cntrl->state_event);
595
596
597 if (MHI_REG_ACCESS_VALID(prev_state)) {
598 u32 in_reset = -1;
599 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
600
601 dev_dbg(dev, "Triggering MHI Reset in device\n");
602 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
603
604
605 ret = wait_event_timeout(mhi_cntrl->state_event,
606 mhi_read_reg_field(mhi_cntrl,
607 mhi_cntrl->regs,
608 MHICTRL,
609 MHICTRL_RESET_MASK,
610 MHICTRL_RESET_SHIFT,
611 &in_reset) ||
612 !in_reset, timeout);
613 if (!ret || in_reset) {
614 dev_err(dev, "Device failed to exit MHI Reset state\n");
615 goto exit_sys_error_transition;
616 }
617
618
619
620
621
622 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
623 }
624
625 dev_dbg(dev,
626 "Waiting for all pending event ring processing to complete\n");
627 mhi_event = mhi_cntrl->mhi_event;
628 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
629 if (mhi_event->offload_ev)
630 continue;
631 tasklet_kill(&mhi_event->task);
632 }
633
634
635 mutex_unlock(&mhi_cntrl->pm_mutex);
636 dev_dbg(dev, "Waiting for all pending threads to complete\n");
637 wake_up_all(&mhi_cntrl->state_event);
638
639 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
640 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
641
642 mutex_lock(&mhi_cntrl->pm_mutex);
643
644 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
645 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
646
647
648 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
649 mhi_cmd = mhi_cntrl->mhi_cmd;
650 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
651 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
652 struct mhi_ring *ring = &mhi_cmd->ring;
653
654 ring->rp = ring->base;
655 ring->wp = ring->base;
656 cmd_ctxt->rp = cmd_ctxt->rbase;
657 cmd_ctxt->wp = cmd_ctxt->rbase;
658 }
659
660 mhi_event = mhi_cntrl->mhi_event;
661 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
662 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
663 mhi_event++) {
664 struct mhi_ring *ring = &mhi_event->ring;
665
666
667 if (mhi_event->offload_ev)
668 continue;
669
670 ring->rp = ring->base;
671 ring->wp = ring->base;
672 er_ctxt->rp = er_ctxt->rbase;
673 er_ctxt->wp = er_ctxt->rbase;
674 }
675
676 mhi_ready_state_transition(mhi_cntrl);
677
678exit_sys_error_transition:
679 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
680 to_mhi_pm_state_str(mhi_cntrl->pm_state),
681 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
682
683 mutex_unlock(&mhi_cntrl->pm_mutex);
684}
685
686
687int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
688 enum dev_st_transition state)
689{
690 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
691 unsigned long flags;
692
693 if (!item)
694 return -ENOMEM;
695
696 item->state = state;
697 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
698 list_add_tail(&item->node, &mhi_cntrl->transition_list);
699 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
700
701 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
702
703 return 0;
704}
705
706
707void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
708{
709 struct device *dev = &mhi_cntrl->mhi_dev->dev;
710
711
712 if (mhi_cntrl->rddm_image) {
713 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
714 return;
715 }
716
717 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
718}
719
720
721void mhi_pm_st_worker(struct work_struct *work)
722{
723 struct state_transition *itr, *tmp;
724 LIST_HEAD(head);
725 struct mhi_controller *mhi_cntrl = container_of(work,
726 struct mhi_controller,
727 st_worker);
728 struct device *dev = &mhi_cntrl->mhi_dev->dev;
729
730 spin_lock_irq(&mhi_cntrl->transition_lock);
731 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
732 spin_unlock_irq(&mhi_cntrl->transition_lock);
733
734 list_for_each_entry_safe(itr, tmp, &head, node) {
735 list_del(&itr->node);
736 dev_dbg(dev, "Handling state transition: %s\n",
737 TO_DEV_STATE_TRANS_STR(itr->state));
738
739 switch (itr->state) {
740 case DEV_ST_TRANSITION_PBL:
741 write_lock_irq(&mhi_cntrl->pm_lock);
742 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
743 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
744 write_unlock_irq(&mhi_cntrl->pm_lock);
745 if (MHI_IN_PBL(mhi_cntrl->ee))
746 mhi_fw_load_handler(mhi_cntrl);
747 break;
748 case DEV_ST_TRANSITION_SBL:
749 write_lock_irq(&mhi_cntrl->pm_lock);
750 mhi_cntrl->ee = MHI_EE_SBL;
751 write_unlock_irq(&mhi_cntrl->pm_lock);
752
753
754
755
756
757 mhi_create_devices(mhi_cntrl);
758 break;
759 case DEV_ST_TRANSITION_MISSION_MODE:
760 mhi_pm_mission_mode_transition(mhi_cntrl);
761 break;
762 case DEV_ST_TRANSITION_READY:
763 mhi_ready_state_transition(mhi_cntrl);
764 break;
765 case DEV_ST_TRANSITION_SYS_ERR:
766 mhi_pm_sys_error_transition(mhi_cntrl);
767 break;
768 case DEV_ST_TRANSITION_DISABLE:
769 mhi_pm_disable_transition(mhi_cntrl);
770 break;
771 default:
772 break;
773 }
774 kfree(itr);
775 }
776}
777
778int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
779{
780 struct mhi_chan *itr, *tmp;
781 struct device *dev = &mhi_cntrl->mhi_dev->dev;
782 enum mhi_pm_state new_state;
783 int ret;
784
785 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
786 return -EINVAL;
787
788 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
789 return -EIO;
790
791
792 if (atomic_read(&mhi_cntrl->dev_wake) ||
793 atomic_read(&mhi_cntrl->pending_pkts))
794 return -EBUSY;
795
796
797 read_lock_bh(&mhi_cntrl->pm_lock);
798 mhi_cntrl->wake_get(mhi_cntrl, false);
799 read_unlock_bh(&mhi_cntrl->pm_lock);
800
801 ret = wait_event_timeout(mhi_cntrl->state_event,
802 mhi_cntrl->dev_state == MHI_STATE_M0 ||
803 mhi_cntrl->dev_state == MHI_STATE_M1 ||
804 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
805 msecs_to_jiffies(mhi_cntrl->timeout_ms));
806
807 read_lock_bh(&mhi_cntrl->pm_lock);
808 mhi_cntrl->wake_put(mhi_cntrl, false);
809 read_unlock_bh(&mhi_cntrl->pm_lock);
810
811 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
812 dev_err(dev,
813 "Could not enter M0/M1 state");
814 return -EIO;
815 }
816
817 write_lock_irq(&mhi_cntrl->pm_lock);
818
819 if (atomic_read(&mhi_cntrl->dev_wake) ||
820 atomic_read(&mhi_cntrl->pending_pkts)) {
821 write_unlock_irq(&mhi_cntrl->pm_lock);
822 return -EBUSY;
823 }
824
825 dev_info(dev, "Allowing M3 transition\n");
826 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
827 if (new_state != MHI_PM_M3_ENTER) {
828 write_unlock_irq(&mhi_cntrl->pm_lock);
829 dev_err(dev,
830 "Error setting to PM state: %s from: %s\n",
831 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
832 to_mhi_pm_state_str(mhi_cntrl->pm_state));
833 return -EIO;
834 }
835
836
837 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
838 write_unlock_irq(&mhi_cntrl->pm_lock);
839 dev_info(dev, "Wait for M3 completion\n");
840
841 ret = wait_event_timeout(mhi_cntrl->state_event,
842 mhi_cntrl->dev_state == MHI_STATE_M3 ||
843 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
844 msecs_to_jiffies(mhi_cntrl->timeout_ms));
845
846 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
847 dev_err(dev,
848 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
849 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
850 to_mhi_pm_state_str(mhi_cntrl->pm_state));
851 return -EIO;
852 }
853
854
855 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
856 mutex_lock(&itr->mutex);
857 if (itr->mhi_dev)
858 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
859 mutex_unlock(&itr->mutex);
860 }
861
862 return 0;
863}
864EXPORT_SYMBOL_GPL(mhi_pm_suspend);
865
866int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
867{
868 struct mhi_chan *itr, *tmp;
869 struct device *dev = &mhi_cntrl->mhi_dev->dev;
870 enum mhi_pm_state cur_state;
871 int ret;
872
873 dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
874 to_mhi_pm_state_str(mhi_cntrl->pm_state),
875 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
876
877 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
878 return 0;
879
880 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
881 return -EIO;
882
883
884 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
885 mutex_lock(&itr->mutex);
886 if (itr->mhi_dev)
887 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
888 mutex_unlock(&itr->mutex);
889 }
890
891 write_lock_irq(&mhi_cntrl->pm_lock);
892 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
893 if (cur_state != MHI_PM_M3_EXIT) {
894 write_unlock_irq(&mhi_cntrl->pm_lock);
895 dev_info(dev,
896 "Error setting to PM state: %s from: %s\n",
897 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
898 to_mhi_pm_state_str(mhi_cntrl->pm_state));
899 return -EIO;
900 }
901
902
903 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
904 write_unlock_irq(&mhi_cntrl->pm_lock);
905
906 ret = wait_event_timeout(mhi_cntrl->state_event,
907 mhi_cntrl->dev_state == MHI_STATE_M0 ||
908 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
909 msecs_to_jiffies(mhi_cntrl->timeout_ms));
910
911 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
912 dev_err(dev,
913 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
914 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
915 to_mhi_pm_state_str(mhi_cntrl->pm_state));
916 return -EIO;
917 }
918
919 return 0;
920}
921EXPORT_SYMBOL_GPL(mhi_pm_resume);
922
923int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
924{
925 int ret;
926
927
928 read_lock_bh(&mhi_cntrl->pm_lock);
929 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
930 read_unlock_bh(&mhi_cntrl->pm_lock);
931 return -EIO;
932 }
933 mhi_cntrl->wake_get(mhi_cntrl, true);
934 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
935 mhi_trigger_resume(mhi_cntrl);
936 read_unlock_bh(&mhi_cntrl->pm_lock);
937
938 ret = wait_event_timeout(mhi_cntrl->state_event,
939 mhi_cntrl->pm_state == MHI_PM_M0 ||
940 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
941 msecs_to_jiffies(mhi_cntrl->timeout_ms));
942
943 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
944 read_lock_bh(&mhi_cntrl->pm_lock);
945 mhi_cntrl->wake_put(mhi_cntrl, false);
946 read_unlock_bh(&mhi_cntrl->pm_lock);
947 return -EIO;
948 }
949
950 return 0;
951}
952
953
954static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
955{
956 unsigned long flags;
957
958
959
960
961
962 if (unlikely(force)) {
963 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
964 atomic_inc(&mhi_cntrl->dev_wake);
965 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
966 !mhi_cntrl->wake_set) {
967 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
968 mhi_cntrl->wake_set = true;
969 }
970 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
971 } else {
972
973
974
975
976 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
977 return;
978
979 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
980 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
981 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
982 !mhi_cntrl->wake_set) {
983 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
984 mhi_cntrl->wake_set = true;
985 }
986 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
987 }
988}
989
990
991static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
992 bool override)
993{
994 unsigned long flags;
995
996
997
998
999
1000 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1001 return;
1002
1003 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1004 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1005 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1006 mhi_cntrl->wake_set) {
1007 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1008 mhi_cntrl->wake_set = false;
1009 }
1010 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1011}
1012
1013int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1014{
1015 enum mhi_state state;
1016 enum mhi_ee_type current_ee;
1017 enum dev_st_transition next_state;
1018 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1019 u32 val;
1020 int ret;
1021
1022 dev_info(dev, "Requested to power ON\n");
1023
1024
1025 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1026 !mhi_cntrl->wake_toggle) {
1027 mhi_cntrl->wake_get = mhi_assert_dev_wake;
1028 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1029 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1030 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1031 }
1032
1033 mutex_lock(&mhi_cntrl->pm_mutex);
1034 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1035
1036 if (!mhi_cntrl->pre_init) {
1037
1038 ret = mhi_init_dev_ctxt(mhi_cntrl);
1039 if (ret)
1040 goto error_dev_ctxt;
1041 }
1042
1043 ret = mhi_init_irq_setup(mhi_cntrl);
1044 if (ret)
1045 goto error_setup_irq;
1046
1047
1048 write_lock_irq(&mhi_cntrl->pm_lock);
1049 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
1050 if (ret) {
1051 write_unlock_irq(&mhi_cntrl->pm_lock);
1052 goto error_bhi_offset;
1053 }
1054
1055 mhi_cntrl->bhi = mhi_cntrl->regs + val;
1056
1057
1058 if (mhi_cntrl->fbc_download) {
1059 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
1060 if (ret) {
1061 write_unlock_irq(&mhi_cntrl->pm_lock);
1062 dev_err(dev, "Error reading BHIE offset\n");
1063 goto error_bhi_offset;
1064 }
1065
1066 mhi_cntrl->bhie = mhi_cntrl->regs + val;
1067 }
1068
1069 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1070 mhi_cntrl->pm_state = MHI_PM_POR;
1071 mhi_cntrl->ee = MHI_EE_MAX;
1072 current_ee = mhi_get_exec_env(mhi_cntrl);
1073 write_unlock_irq(&mhi_cntrl->pm_lock);
1074
1075
1076 if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
1077 dev_err(dev, "Not a valid EE for power on\n");
1078 ret = -EIO;
1079 goto error_bhi_offset;
1080 }
1081
1082 state = mhi_get_mhi_state(mhi_cntrl);
1083 if (state == MHI_STATE_SYS_ERR) {
1084 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1085 ret = wait_event_timeout(mhi_cntrl->state_event,
1086 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
1087 mhi_read_reg_field(mhi_cntrl,
1088 mhi_cntrl->regs,
1089 MHICTRL,
1090 MHICTRL_RESET_MASK,
1091 MHICTRL_RESET_SHIFT,
1092 &val) ||
1093 !val,
1094 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1095 if (ret) {
1096 ret = -EIO;
1097 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1098 goto error_bhi_offset;
1099 }
1100
1101
1102
1103
1104
1105 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1106 }
1107
1108
1109 next_state = MHI_IN_PBL(current_ee) ?
1110 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1111
1112 mhi_queue_state_transition(mhi_cntrl, next_state);
1113
1114 mutex_unlock(&mhi_cntrl->pm_mutex);
1115
1116 dev_info(dev, "Power on setup success\n");
1117
1118 return 0;
1119
1120error_bhi_offset:
1121 mhi_deinit_free_irq(mhi_cntrl);
1122
1123error_setup_irq:
1124 if (!mhi_cntrl->pre_init)
1125 mhi_deinit_dev_ctxt(mhi_cntrl);
1126
1127error_dev_ctxt:
1128 mutex_unlock(&mhi_cntrl->pm_mutex);
1129
1130 return ret;
1131}
1132EXPORT_SYMBOL_GPL(mhi_async_power_up);
1133
1134void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1135{
1136 enum mhi_pm_state cur_state, transition_state;
1137 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1138
1139
1140 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1141 MHI_PM_LD_ERR_FATAL_DETECT;
1142
1143 mutex_lock(&mhi_cntrl->pm_mutex);
1144 write_lock_irq(&mhi_cntrl->pm_lock);
1145 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1146 if (cur_state != transition_state) {
1147 dev_err(dev, "Failed to move to state: %s from: %s\n",
1148 to_mhi_pm_state_str(transition_state),
1149 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1150
1151 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1152 }
1153
1154
1155 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1156 mhi_cntrl->dev_state = MHI_STATE_RESET;
1157
1158 wake_up_all(&mhi_cntrl->state_event);
1159
1160 write_unlock_irq(&mhi_cntrl->pm_lock);
1161 mutex_unlock(&mhi_cntrl->pm_mutex);
1162
1163 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1164
1165
1166 flush_work(&mhi_cntrl->st_worker);
1167
1168 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
1169
1170 if (!mhi_cntrl->pre_init) {
1171
1172 if (mhi_cntrl->fbc_image) {
1173 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1174 mhi_cntrl->fbc_image = NULL;
1175 }
1176 mhi_deinit_dev_ctxt(mhi_cntrl);
1177 }
1178}
1179EXPORT_SYMBOL_GPL(mhi_power_down);
1180
1181int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1182{
1183 int ret = mhi_async_power_up(mhi_cntrl);
1184
1185 if (ret)
1186 return ret;
1187
1188 wait_event_timeout(mhi_cntrl->state_event,
1189 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1190 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1191 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1192
1193 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1194 if (ret)
1195 mhi_power_down(mhi_cntrl, false);
1196
1197 return ret;
1198}
1199EXPORT_SYMBOL(mhi_sync_power_up);
1200
1201int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1202{
1203 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1204 int ret;
1205
1206
1207 if (mhi_cntrl->ee == MHI_EE_RDDM)
1208 return 0;
1209
1210 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1211 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1212
1213
1214 ret = wait_event_timeout(mhi_cntrl->state_event,
1215 mhi_cntrl->ee == MHI_EE_RDDM,
1216 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1217 ret = ret ? 0 : -EIO;
1218
1219 return ret;
1220}
1221EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1222
1223void mhi_device_get(struct mhi_device *mhi_dev)
1224{
1225 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1226
1227 mhi_dev->dev_wake++;
1228 read_lock_bh(&mhi_cntrl->pm_lock);
1229 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1230 mhi_trigger_resume(mhi_cntrl);
1231
1232 mhi_cntrl->wake_get(mhi_cntrl, true);
1233 read_unlock_bh(&mhi_cntrl->pm_lock);
1234}
1235EXPORT_SYMBOL_GPL(mhi_device_get);
1236
1237int mhi_device_get_sync(struct mhi_device *mhi_dev)
1238{
1239 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1240 int ret;
1241
1242 ret = __mhi_device_get_sync(mhi_cntrl);
1243 if (!ret)
1244 mhi_dev->dev_wake++;
1245
1246 return ret;
1247}
1248EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1249
1250void mhi_device_put(struct mhi_device *mhi_dev)
1251{
1252 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1253
1254 mhi_dev->dev_wake--;
1255 read_lock_bh(&mhi_cntrl->pm_lock);
1256 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1257 mhi_trigger_resume(mhi_cntrl);
1258
1259 mhi_cntrl->wake_put(mhi_cntrl, false);
1260 read_unlock_bh(&mhi_cntrl->pm_lock);
1261}
1262EXPORT_SYMBOL_GPL(mhi_device_put);
1263