1
2
3
4
5
6
7#include <linux/delay.h>
8#include <linux/device.h>
9#include <linux/dma-direction.h>
10#include <linux/dma-mapping.h>
11#include <linux/interrupt.h>
12#include <linux/list.h>
13#include <linux/mhi.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/wait.h>
17#include "internal.h"
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44static struct mhi_pm_transitions const dev_state_transitions[] = {
45
46 {
47 MHI_PM_DISABLE,
48 MHI_PM_POR
49 },
50 {
51 MHI_PM_POR,
52 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
53 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
54 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
55 },
56 {
57 MHI_PM_M0,
58 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
59 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
60 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
61 },
62 {
63 MHI_PM_M2,
64 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
65 MHI_PM_LD_ERR_FATAL_DETECT
66 },
67 {
68 MHI_PM_M3_ENTER,
69 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
70 MHI_PM_LD_ERR_FATAL_DETECT
71 },
72 {
73 MHI_PM_M3,
74 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
75 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
76 },
77 {
78 MHI_PM_M3_EXIT,
79 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
80 MHI_PM_LD_ERR_FATAL_DETECT
81 },
82 {
83 MHI_PM_FW_DL_ERR,
84 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
85 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
86 },
87
88 {
89 MHI_PM_SYS_ERR_DETECT,
90 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
91 MHI_PM_LD_ERR_FATAL_DETECT
92 },
93 {
94 MHI_PM_SYS_ERR_PROCESS,
95 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
96 MHI_PM_LD_ERR_FATAL_DETECT
97 },
98
99 {
100 MHI_PM_SHUTDOWN_PROCESS,
101 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
102 },
103
104 {
105 MHI_PM_LD_ERR_FATAL_DETECT,
106 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
107 },
108};
109
110enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
111 enum mhi_pm_state state)
112{
113 unsigned long cur_state = mhi_cntrl->pm_state;
114 int index = find_last_bit(&cur_state, 32);
115
116 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
117 return cur_state;
118
119 if (unlikely(dev_state_transitions[index].from_state != cur_state))
120 return cur_state;
121
122 if (unlikely(!(dev_state_transitions[index].to_states & state)))
123 return cur_state;
124
125 mhi_cntrl->pm_state = state;
126 return mhi_cntrl->pm_state;
127}
128
129void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
130{
131 if (state == MHI_STATE_RESET) {
132 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
133 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
134 } else {
135 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
136 MHICTRL_MHISTATE_MASK,
137 MHICTRL_MHISTATE_SHIFT, state);
138 }
139}
140
141
142static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
143{
144}
145
146static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
147{
148 mhi_cntrl->wake_get(mhi_cntrl, false);
149 mhi_cntrl->wake_put(mhi_cntrl, true);
150}
151
152
153int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
154{
155 void __iomem *base = mhi_cntrl->regs;
156 struct mhi_event *mhi_event;
157 enum mhi_pm_state cur_state;
158 struct device *dev = &mhi_cntrl->mhi_dev->dev;
159 u32 reset = 1, ready = 0;
160 int ret, i;
161
162
163 wait_event_timeout(mhi_cntrl->state_event,
164 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
165 mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
166 MHICTRL_RESET_MASK,
167 MHICTRL_RESET_SHIFT, &reset) ||
168 mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
169 MHISTATUS_READY_MASK,
170 MHISTATUS_READY_SHIFT, &ready) ||
171 (!reset && ready),
172 msecs_to_jiffies(mhi_cntrl->timeout_ms));
173
174
175 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
176 dev_err(dev, "Device link is not accessible\n");
177 return -EIO;
178 }
179
180
181 if (reset || !ready) {
182 dev_err(dev, "Device Ready timeout\n");
183 return -ETIMEDOUT;
184 }
185
186 dev_dbg(dev, "Device in READY State\n");
187 write_lock_irq(&mhi_cntrl->pm_lock);
188 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
189 mhi_cntrl->dev_state = MHI_STATE_READY;
190 write_unlock_irq(&mhi_cntrl->pm_lock);
191
192 if (cur_state != MHI_PM_POR) {
193 dev_err(dev, "Error moving to state %s from %s\n",
194 to_mhi_pm_state_str(MHI_PM_POR),
195 to_mhi_pm_state_str(cur_state));
196 return -EIO;
197 }
198
199 read_lock_bh(&mhi_cntrl->pm_lock);
200 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
201 dev_err(dev, "Device registers not accessible\n");
202 goto error_mmio;
203 }
204
205
206 ret = mhi_init_mmio(mhi_cntrl);
207 if (ret) {
208 dev_err(dev, "Error configuring MMIO registers\n");
209 goto error_mmio;
210 }
211
212
213 mhi_event = mhi_cntrl->mhi_event;
214 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
215 struct mhi_ring *ring = &mhi_event->ring;
216
217
218 if (mhi_event->offload_ev || mhi_event->hw_ring)
219 continue;
220
221 ring->wp = ring->base + ring->len - ring->el_size;
222 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
223
224 smp_wmb();
225
226
227 spin_lock_irq(&mhi_event->lock);
228 mhi_ring_er_db(mhi_event);
229 spin_unlock_irq(&mhi_event->lock);
230 }
231
232
233 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
234 read_unlock_bh(&mhi_cntrl->pm_lock);
235
236 return 0;
237
238error_mmio:
239 read_unlock_bh(&mhi_cntrl->pm_lock);
240
241 return -EIO;
242}
243
244int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
245{
246 enum mhi_pm_state cur_state;
247 struct mhi_chan *mhi_chan;
248 struct device *dev = &mhi_cntrl->mhi_dev->dev;
249 int i;
250
251 write_lock_irq(&mhi_cntrl->pm_lock);
252 mhi_cntrl->dev_state = MHI_STATE_M0;
253 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
254 write_unlock_irq(&mhi_cntrl->pm_lock);
255 if (unlikely(cur_state != MHI_PM_M0)) {
256 dev_err(dev, "Unable to transition to M0 state\n");
257 return -EIO;
258 }
259
260
261 read_lock_bh(&mhi_cntrl->pm_lock);
262 mhi_cntrl->wake_get(mhi_cntrl, true);
263
264
265 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
266 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
267 struct mhi_cmd *mhi_cmd =
268 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
269
270 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
271 if (mhi_event->offload_ev)
272 continue;
273
274 spin_lock_irq(&mhi_event->lock);
275 mhi_ring_er_db(mhi_event);
276 spin_unlock_irq(&mhi_event->lock);
277 }
278
279
280 spin_lock_irq(&mhi_cmd->lock);
281 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
282 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
283 spin_unlock_irq(&mhi_cmd->lock);
284 }
285
286
287 mhi_chan = mhi_cntrl->mhi_chan;
288 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
289 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
290
291 if (mhi_chan->db_cfg.reset_req) {
292 write_lock_irq(&mhi_chan->lock);
293 mhi_chan->db_cfg.db_mode = true;
294 write_unlock_irq(&mhi_chan->lock);
295 }
296
297 read_lock_irq(&mhi_chan->lock);
298
299
300 if (tre_ring->base && tre_ring->wp != tre_ring->rp)
301 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
302 read_unlock_irq(&mhi_chan->lock);
303 }
304
305 mhi_cntrl->wake_put(mhi_cntrl, false);
306 read_unlock_bh(&mhi_cntrl->pm_lock);
307 wake_up_all(&mhi_cntrl->state_event);
308
309 return 0;
310}
311
312
313
314
315
316
317void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
318{
319 enum mhi_pm_state state;
320 struct device *dev = &mhi_cntrl->mhi_dev->dev;
321
322 write_lock_irq(&mhi_cntrl->pm_lock);
323 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
324 if (state == MHI_PM_M2) {
325 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
326 mhi_cntrl->dev_state = MHI_STATE_M2;
327
328 write_unlock_irq(&mhi_cntrl->pm_lock);
329 wake_up_all(&mhi_cntrl->state_event);
330
331
332 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
333 atomic_read(&mhi_cntrl->dev_wake))) {
334 dev_dbg(dev,
335 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
336 atomic_read(&mhi_cntrl->pending_pkts),
337 atomic_read(&mhi_cntrl->dev_wake));
338 read_lock_bh(&mhi_cntrl->pm_lock);
339 mhi_cntrl->wake_get(mhi_cntrl, true);
340 mhi_cntrl->wake_put(mhi_cntrl, true);
341 read_unlock_bh(&mhi_cntrl->pm_lock);
342 } else {
343 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
344 }
345 } else {
346 write_unlock_irq(&mhi_cntrl->pm_lock);
347 }
348}
349
350
351int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
352{
353 enum mhi_pm_state state;
354 struct device *dev = &mhi_cntrl->mhi_dev->dev;
355
356 write_lock_irq(&mhi_cntrl->pm_lock);
357 mhi_cntrl->dev_state = MHI_STATE_M3;
358 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
359 write_unlock_irq(&mhi_cntrl->pm_lock);
360 if (state != MHI_PM_M3) {
361 dev_err(dev, "Unable to transition to M3 state\n");
362 return -EIO;
363 }
364
365 wake_up_all(&mhi_cntrl->state_event);
366
367 return 0;
368}
369
370
371static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
372{
373 struct mhi_event *mhi_event;
374 struct device *dev = &mhi_cntrl->mhi_dev->dev;
375 int i, ret;
376
377 dev_dbg(dev, "Processing Mission Mode transition\n");
378
379 write_lock_irq(&mhi_cntrl->pm_lock);
380 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
381 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
382 write_unlock_irq(&mhi_cntrl->pm_lock);
383
384 if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
385 return -EIO;
386
387 wake_up_all(&mhi_cntrl->state_event);
388
389 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
390
391
392 ret = __mhi_device_get_sync(mhi_cntrl);
393 if (ret)
394 return ret;
395
396 read_lock_bh(&mhi_cntrl->pm_lock);
397
398 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
399 ret = -EIO;
400 goto error_mission_mode;
401 }
402
403
404 mhi_event = mhi_cntrl->mhi_event;
405 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
406 struct mhi_ring *ring = &mhi_event->ring;
407
408 if (mhi_event->offload_ev || !mhi_event->hw_ring)
409 continue;
410
411 ring->wp = ring->base + ring->len - ring->el_size;
412 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
413
414 smp_wmb();
415
416 spin_lock_irq(&mhi_event->lock);
417 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
418 mhi_ring_er_db(mhi_event);
419 spin_unlock_irq(&mhi_event->lock);
420 }
421
422 read_unlock_bh(&mhi_cntrl->pm_lock);
423
424
425
426
427
428 mhi_create_devices(mhi_cntrl);
429
430 read_lock_bh(&mhi_cntrl->pm_lock);
431
432error_mission_mode:
433 mhi_cntrl->wake_put(mhi_cntrl, false);
434 read_unlock_bh(&mhi_cntrl->pm_lock);
435
436 return ret;
437}
438
439
440static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
441 enum mhi_pm_state transition_state)
442{
443 enum mhi_pm_state cur_state, prev_state;
444 struct mhi_event *mhi_event;
445 struct mhi_cmd_ctxt *cmd_ctxt;
446 struct mhi_cmd *mhi_cmd;
447 struct mhi_event_ctxt *er_ctxt;
448 struct device *dev = &mhi_cntrl->mhi_dev->dev;
449 int ret, i;
450
451 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
452 to_mhi_pm_state_str(mhi_cntrl->pm_state),
453 to_mhi_pm_state_str(transition_state));
454
455
456 if (transition_state == MHI_PM_SYS_ERR_PROCESS)
457 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
458
459 mutex_lock(&mhi_cntrl->pm_mutex);
460 write_lock_irq(&mhi_cntrl->pm_lock);
461 prev_state = mhi_cntrl->pm_state;
462 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
463 if (cur_state == transition_state) {
464 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
465 mhi_cntrl->dev_state = MHI_STATE_RESET;
466 }
467 write_unlock_irq(&mhi_cntrl->pm_lock);
468
469
470 wake_up_all(&mhi_cntrl->state_event);
471
472 if (cur_state != transition_state) {
473 dev_err(dev, "Failed to transition to state: %s from: %s\n",
474 to_mhi_pm_state_str(transition_state),
475 to_mhi_pm_state_str(cur_state));
476 mutex_unlock(&mhi_cntrl->pm_mutex);
477 return;
478 }
479
480
481 if (MHI_REG_ACCESS_VALID(prev_state)) {
482 u32 in_reset = -1;
483 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
484
485 dev_dbg(dev, "Triggering MHI Reset in device\n");
486 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
487
488
489 ret = wait_event_timeout(mhi_cntrl->state_event,
490 mhi_read_reg_field(mhi_cntrl,
491 mhi_cntrl->regs,
492 MHICTRL,
493 MHICTRL_RESET_MASK,
494 MHICTRL_RESET_SHIFT,
495 &in_reset) ||
496 !in_reset, timeout);
497 if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
498 dev_err(dev, "Device failed to exit MHI Reset state\n");
499 mutex_unlock(&mhi_cntrl->pm_mutex);
500 return;
501 }
502
503
504
505
506
507 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
508 }
509
510 dev_dbg(dev,
511 "Waiting for all pending event ring processing to complete\n");
512 mhi_event = mhi_cntrl->mhi_event;
513 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
514 if (mhi_event->offload_ev)
515 continue;
516 tasklet_kill(&mhi_event->task);
517 }
518
519
520 mutex_unlock(&mhi_cntrl->pm_mutex);
521 dev_dbg(dev, "Waiting for all pending threads to complete\n");
522 wake_up_all(&mhi_cntrl->state_event);
523
524 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
525 device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
526
527 mutex_lock(&mhi_cntrl->pm_mutex);
528
529 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
530 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
531
532
533 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
534 mhi_cmd = mhi_cntrl->mhi_cmd;
535 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
536 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
537 struct mhi_ring *ring = &mhi_cmd->ring;
538
539 ring->rp = ring->base;
540 ring->wp = ring->base;
541 cmd_ctxt->rp = cmd_ctxt->rbase;
542 cmd_ctxt->wp = cmd_ctxt->rbase;
543 }
544
545 mhi_event = mhi_cntrl->mhi_event;
546 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
547 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
548 mhi_event++) {
549 struct mhi_ring *ring = &mhi_event->ring;
550
551
552 if (mhi_event->offload_ev)
553 continue;
554
555 ring->rp = ring->base;
556 ring->wp = ring->base;
557 er_ctxt->rp = er_ctxt->rbase;
558 er_ctxt->wp = er_ctxt->rbase;
559 }
560
561 if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
562 mhi_ready_state_transition(mhi_cntrl);
563 } else {
564
565 write_lock_irq(&mhi_cntrl->pm_lock);
566 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
567 write_unlock_irq(&mhi_cntrl->pm_lock);
568 if (unlikely(cur_state != MHI_PM_DISABLE))
569 dev_err(dev, "Error moving from PM state: %s to: %s\n",
570 to_mhi_pm_state_str(cur_state),
571 to_mhi_pm_state_str(MHI_PM_DISABLE));
572 }
573
574 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
575 to_mhi_pm_state_str(mhi_cntrl->pm_state),
576 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
577
578 mutex_unlock(&mhi_cntrl->pm_mutex);
579}
580
581
582int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
583 enum dev_st_transition state)
584{
585 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
586 unsigned long flags;
587
588 if (!item)
589 return -ENOMEM;
590
591 item->state = state;
592 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
593 list_add_tail(&item->node, &mhi_cntrl->transition_list);
594 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
595
596 schedule_work(&mhi_cntrl->st_worker);
597
598 return 0;
599}
600
601
602void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
603{
604 struct device *dev = &mhi_cntrl->mhi_dev->dev;
605
606
607 if (mhi_cntrl->rddm_image) {
608 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
609 return;
610 }
611
612 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
613}
614
615
616void mhi_pm_st_worker(struct work_struct *work)
617{
618 struct state_transition *itr, *tmp;
619 LIST_HEAD(head);
620 struct mhi_controller *mhi_cntrl = container_of(work,
621 struct mhi_controller,
622 st_worker);
623 struct device *dev = &mhi_cntrl->mhi_dev->dev;
624
625 spin_lock_irq(&mhi_cntrl->transition_lock);
626 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
627 spin_unlock_irq(&mhi_cntrl->transition_lock);
628
629 list_for_each_entry_safe(itr, tmp, &head, node) {
630 list_del(&itr->node);
631 dev_dbg(dev, "Handling state transition: %s\n",
632 TO_DEV_STATE_TRANS_STR(itr->state));
633
634 switch (itr->state) {
635 case DEV_ST_TRANSITION_PBL:
636 write_lock_irq(&mhi_cntrl->pm_lock);
637 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
638 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
639 write_unlock_irq(&mhi_cntrl->pm_lock);
640 if (MHI_IN_PBL(mhi_cntrl->ee))
641 mhi_fw_load_handler(mhi_cntrl);
642 break;
643 case DEV_ST_TRANSITION_SBL:
644 write_lock_irq(&mhi_cntrl->pm_lock);
645 mhi_cntrl->ee = MHI_EE_SBL;
646 write_unlock_irq(&mhi_cntrl->pm_lock);
647
648
649
650
651
652 mhi_create_devices(mhi_cntrl);
653 break;
654 case DEV_ST_TRANSITION_MISSION_MODE:
655 mhi_pm_mission_mode_transition(mhi_cntrl);
656 break;
657 case DEV_ST_TRANSITION_READY:
658 mhi_ready_state_transition(mhi_cntrl);
659 break;
660 case DEV_ST_TRANSITION_SYS_ERR:
661 mhi_pm_disable_transition
662 (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
663 break;
664 case DEV_ST_TRANSITION_DISABLE:
665 mhi_pm_disable_transition
666 (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
667 break;
668 default:
669 break;
670 }
671 kfree(itr);
672 }
673}
674
675int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
676{
677 struct mhi_chan *itr, *tmp;
678 struct device *dev = &mhi_cntrl->mhi_dev->dev;
679 enum mhi_pm_state new_state;
680 int ret;
681
682 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
683 return -EINVAL;
684
685 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
686 return -EIO;
687
688
689 if (atomic_read(&mhi_cntrl->dev_wake))
690 return -EBUSY;
691
692
693 read_lock_bh(&mhi_cntrl->pm_lock);
694 mhi_cntrl->wake_get(mhi_cntrl, false);
695 read_unlock_bh(&mhi_cntrl->pm_lock);
696
697 ret = wait_event_timeout(mhi_cntrl->state_event,
698 mhi_cntrl->dev_state == MHI_STATE_M0 ||
699 mhi_cntrl->dev_state == MHI_STATE_M1 ||
700 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
701 msecs_to_jiffies(mhi_cntrl->timeout_ms));
702
703 read_lock_bh(&mhi_cntrl->pm_lock);
704 mhi_cntrl->wake_put(mhi_cntrl, false);
705 read_unlock_bh(&mhi_cntrl->pm_lock);
706
707 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
708 dev_err(dev,
709 "Could not enter M0/M1 state");
710 return -EIO;
711 }
712
713 write_lock_irq(&mhi_cntrl->pm_lock);
714
715 if (atomic_read(&mhi_cntrl->dev_wake)) {
716 write_unlock_irq(&mhi_cntrl->pm_lock);
717 return -EBUSY;
718 }
719
720 dev_info(dev, "Allowing M3 transition\n");
721 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
722 if (new_state != MHI_PM_M3_ENTER) {
723 write_unlock_irq(&mhi_cntrl->pm_lock);
724 dev_err(dev,
725 "Error setting to PM state: %s from: %s\n",
726 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
727 to_mhi_pm_state_str(mhi_cntrl->pm_state));
728 return -EIO;
729 }
730
731
732 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
733 write_unlock_irq(&mhi_cntrl->pm_lock);
734 dev_info(dev, "Wait for M3 completion\n");
735
736 ret = wait_event_timeout(mhi_cntrl->state_event,
737 mhi_cntrl->dev_state == MHI_STATE_M3 ||
738 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
739 msecs_to_jiffies(mhi_cntrl->timeout_ms));
740
741 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
742 dev_err(dev,
743 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
744 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
745 to_mhi_pm_state_str(mhi_cntrl->pm_state));
746 return -EIO;
747 }
748
749
750 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
751 mutex_lock(&itr->mutex);
752 if (itr->mhi_dev)
753 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
754 mutex_unlock(&itr->mutex);
755 }
756
757 return 0;
758}
759EXPORT_SYMBOL_GPL(mhi_pm_suspend);
760
761int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
762{
763 struct mhi_chan *itr, *tmp;
764 struct device *dev = &mhi_cntrl->mhi_dev->dev;
765 enum mhi_pm_state cur_state;
766 int ret;
767
768 dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
769 to_mhi_pm_state_str(mhi_cntrl->pm_state),
770 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
771
772 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
773 return 0;
774
775 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
776 return -EIO;
777
778
779 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
780 mutex_lock(&itr->mutex);
781 if (itr->mhi_dev)
782 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
783 mutex_unlock(&itr->mutex);
784 }
785
786 write_lock_irq(&mhi_cntrl->pm_lock);
787 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
788 if (cur_state != MHI_PM_M3_EXIT) {
789 write_unlock_irq(&mhi_cntrl->pm_lock);
790 dev_info(dev,
791 "Error setting to PM state: %s from: %s\n",
792 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
793 to_mhi_pm_state_str(mhi_cntrl->pm_state));
794 return -EIO;
795 }
796
797
798 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
799 write_unlock_irq(&mhi_cntrl->pm_lock);
800
801 ret = wait_event_timeout(mhi_cntrl->state_event,
802 mhi_cntrl->dev_state == MHI_STATE_M0 ||
803 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
804 msecs_to_jiffies(mhi_cntrl->timeout_ms));
805
806 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
807 dev_err(dev,
808 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
809 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
810 to_mhi_pm_state_str(mhi_cntrl->pm_state));
811 return -EIO;
812 }
813
814 return 0;
815}
816EXPORT_SYMBOL_GPL(mhi_pm_resume);
817
818int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
819{
820 int ret;
821
822
823 read_lock_bh(&mhi_cntrl->pm_lock);
824 mhi_cntrl->wake_get(mhi_cntrl, true);
825 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
826 pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
827 mhi_cntrl->runtime_get(mhi_cntrl);
828 mhi_cntrl->runtime_put(mhi_cntrl);
829 }
830 read_unlock_bh(&mhi_cntrl->pm_lock);
831
832 ret = wait_event_timeout(mhi_cntrl->state_event,
833 mhi_cntrl->pm_state == MHI_PM_M0 ||
834 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
835 msecs_to_jiffies(mhi_cntrl->timeout_ms));
836
837 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
838 read_lock_bh(&mhi_cntrl->pm_lock);
839 mhi_cntrl->wake_put(mhi_cntrl, false);
840 read_unlock_bh(&mhi_cntrl->pm_lock);
841 return -EIO;
842 }
843
844 return 0;
845}
846
847
848static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
849{
850 unsigned long flags;
851
852
853
854
855
856 if (unlikely(force)) {
857 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
858 atomic_inc(&mhi_cntrl->dev_wake);
859 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
860 !mhi_cntrl->wake_set) {
861 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
862 mhi_cntrl->wake_set = true;
863 }
864 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
865 } else {
866
867
868
869
870 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
871 return;
872
873 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
874 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
875 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
876 !mhi_cntrl->wake_set) {
877 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
878 mhi_cntrl->wake_set = true;
879 }
880 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
881 }
882}
883
884
885static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
886 bool override)
887{
888 unsigned long flags;
889
890
891
892
893
894 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
895 return;
896
897 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
898 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
899 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
900 mhi_cntrl->wake_set) {
901 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
902 mhi_cntrl->wake_set = false;
903 }
904 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
905}
906
907int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
908{
909 enum mhi_state state;
910 enum mhi_ee_type current_ee;
911 enum dev_st_transition next_state;
912 struct device *dev = &mhi_cntrl->mhi_dev->dev;
913 u32 val;
914 int ret;
915
916 dev_info(dev, "Requested to power ON\n");
917
918 if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings)
919 return -EINVAL;
920
921
922 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
923 !mhi_cntrl->wake_toggle) {
924 mhi_cntrl->wake_get = mhi_assert_dev_wake;
925 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
926 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
927 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
928 }
929
930 mutex_lock(&mhi_cntrl->pm_mutex);
931 mhi_cntrl->pm_state = MHI_PM_DISABLE;
932
933 if (!mhi_cntrl->pre_init) {
934
935 ret = mhi_init_dev_ctxt(mhi_cntrl);
936 if (ret)
937 goto error_dev_ctxt;
938 }
939
940 ret = mhi_init_irq_setup(mhi_cntrl);
941 if (ret)
942 goto error_setup_irq;
943
944
945 write_lock_irq(&mhi_cntrl->pm_lock);
946 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
947 if (ret) {
948 write_unlock_irq(&mhi_cntrl->pm_lock);
949 goto error_bhi_offset;
950 }
951
952 mhi_cntrl->bhi = mhi_cntrl->regs + val;
953
954
955 if (mhi_cntrl->fbc_download) {
956 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
957 if (ret) {
958 write_unlock_irq(&mhi_cntrl->pm_lock);
959 dev_err(dev, "Error reading BHIE offset\n");
960 goto error_bhi_offset;
961 }
962
963 mhi_cntrl->bhie = mhi_cntrl->regs + val;
964 }
965
966 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
967 mhi_cntrl->pm_state = MHI_PM_POR;
968 mhi_cntrl->ee = MHI_EE_MAX;
969 current_ee = mhi_get_exec_env(mhi_cntrl);
970 write_unlock_irq(&mhi_cntrl->pm_lock);
971
972
973 if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
974 dev_err(dev, "Not a valid EE for power on\n");
975 ret = -EIO;
976 goto error_bhi_offset;
977 }
978
979 state = mhi_get_mhi_state(mhi_cntrl);
980 if (state == MHI_STATE_SYS_ERR) {
981 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
982 ret = wait_event_timeout(mhi_cntrl->state_event,
983 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
984 mhi_read_reg_field(mhi_cntrl,
985 mhi_cntrl->regs,
986 MHICTRL,
987 MHICTRL_RESET_MASK,
988 MHICTRL_RESET_SHIFT,
989 &val) ||
990 !val,
991 msecs_to_jiffies(mhi_cntrl->timeout_ms));
992 if (ret) {
993 ret = -EIO;
994 dev_info(dev, "Failed to reset MHI due to syserr state\n");
995 goto error_bhi_offset;
996 }
997
998
999
1000
1001
1002 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1003 }
1004
1005
1006 next_state = MHI_IN_PBL(current_ee) ?
1007 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1008
1009 mhi_queue_state_transition(mhi_cntrl, next_state);
1010
1011 mutex_unlock(&mhi_cntrl->pm_mutex);
1012
1013 dev_info(dev, "Power on setup success\n");
1014
1015 return 0;
1016
1017error_bhi_offset:
1018 mhi_deinit_free_irq(mhi_cntrl);
1019
1020error_setup_irq:
1021 if (!mhi_cntrl->pre_init)
1022 mhi_deinit_dev_ctxt(mhi_cntrl);
1023
1024error_dev_ctxt:
1025 mutex_unlock(&mhi_cntrl->pm_mutex);
1026
1027 return ret;
1028}
1029EXPORT_SYMBOL_GPL(mhi_async_power_up);
1030
1031void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1032{
1033 enum mhi_pm_state cur_state;
1034 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1035
1036
1037 if (!graceful) {
1038 mutex_lock(&mhi_cntrl->pm_mutex);
1039 write_lock_irq(&mhi_cntrl->pm_lock);
1040 cur_state = mhi_tryset_pm_state(mhi_cntrl,
1041 MHI_PM_LD_ERR_FATAL_DETECT);
1042 write_unlock_irq(&mhi_cntrl->pm_lock);
1043 mutex_unlock(&mhi_cntrl->pm_mutex);
1044 if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
1045 dev_dbg(dev, "Failed to move to state: %s from: %s\n",
1046 to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
1047 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1048 }
1049
1050 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1051
1052
1053 flush_work(&mhi_cntrl->st_worker);
1054
1055 mhi_deinit_free_irq(mhi_cntrl);
1056
1057 if (!mhi_cntrl->pre_init) {
1058
1059 if (mhi_cntrl->fbc_image) {
1060 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1061 mhi_cntrl->fbc_image = NULL;
1062 }
1063 mhi_deinit_dev_ctxt(mhi_cntrl);
1064 }
1065}
1066EXPORT_SYMBOL_GPL(mhi_power_down);
1067
1068int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1069{
1070 int ret = mhi_async_power_up(mhi_cntrl);
1071
1072 if (ret)
1073 return ret;
1074
1075 wait_event_timeout(mhi_cntrl->state_event,
1076 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1077 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1078 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1079
1080 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1081 if (ret)
1082 mhi_power_down(mhi_cntrl, false);
1083
1084 return ret;
1085}
1086EXPORT_SYMBOL(mhi_sync_power_up);
1087
1088int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1089{
1090 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1091 int ret;
1092
1093
1094 if (mhi_cntrl->ee == MHI_EE_RDDM)
1095 return 0;
1096
1097 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1098 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1099
1100
1101 ret = wait_event_timeout(mhi_cntrl->state_event,
1102 mhi_cntrl->ee == MHI_EE_RDDM,
1103 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1104 ret = ret ? 0 : -EIO;
1105
1106 return ret;
1107}
1108EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1109
1110void mhi_device_get(struct mhi_device *mhi_dev)
1111{
1112 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1113
1114 mhi_dev->dev_wake++;
1115 read_lock_bh(&mhi_cntrl->pm_lock);
1116 mhi_cntrl->wake_get(mhi_cntrl, true);
1117 read_unlock_bh(&mhi_cntrl->pm_lock);
1118}
1119EXPORT_SYMBOL_GPL(mhi_device_get);
1120
1121int mhi_device_get_sync(struct mhi_device *mhi_dev)
1122{
1123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1124 int ret;
1125
1126 ret = __mhi_device_get_sync(mhi_cntrl);
1127 if (!ret)
1128 mhi_dev->dev_wake++;
1129
1130 return ret;
1131}
1132EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1133
1134void mhi_device_put(struct mhi_device *mhi_dev)
1135{
1136 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1137
1138 mhi_dev->dev_wake--;
1139 read_lock_bh(&mhi_cntrl->pm_lock);
1140 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
1141 mhi_cntrl->runtime_get(mhi_cntrl);
1142 mhi_cntrl->runtime_put(mhi_cntrl);
1143 }
1144
1145 mhi_cntrl->wake_put(mhi_cntrl, false);
1146 read_unlock_bh(&mhi_cntrl->pm_lock);
1147}
1148EXPORT_SYMBOL_GPL(mhi_device_put);
1149