linux/drivers/bus/mhi/core/pm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
   4 *
   5 */
   6
   7#include <linux/delay.h>
   8#include <linux/device.h>
   9#include <linux/dma-direction.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/interrupt.h>
  12#include <linux/list.h>
  13#include <linux/mhi.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/wait.h>
  17#include "internal.h"
  18
  19/*
  20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
  21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
  22 * transition to a new state only if we're allowed to.
  23 *
  24 * Priority increases as we go down. For instance, from any state in L0, the
  25 * transition can be made to states in L1, L2 and L3. A notable exception to
  26 * this rule is state DISABLE.  From DISABLE state we can only transition to
  27 * POR state. Also, while in L2 state, user cannot jump back to previous
  28 * L1 or L0 states.
  29 *
  30 * Valid transitions:
  31 * L0: DISABLE <--> POR
  32 *     POR <--> POR
  33 *     POR -> M0 -> M2 --> M0
  34 *     POR -> FW_DL_ERR
  35 *     FW_DL_ERR <--> FW_DL_ERR
  36 *     M0 <--> M0
  37 *     M0 -> FW_DL_ERR
  38 *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
  39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
  40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
  41 *     SHUTDOWN_PROCESS -> DISABLE
  42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
  43 *     LD_ERR_FATAL_DETECT -> DISABLE
  44 */
  45static struct mhi_pm_transitions const dev_state_transitions[] = {
  46        /* L0 States */
  47        {
  48                MHI_PM_DISABLE,
  49                MHI_PM_POR
  50        },
  51        {
  52                MHI_PM_POR,
  53                MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
  54                MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  55                MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
  56        },
  57        {
  58                MHI_PM_M0,
  59                MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
  60                MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  61                MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
  62        },
  63        {
  64                MHI_PM_M2,
  65                MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  66                MHI_PM_LD_ERR_FATAL_DETECT
  67        },
  68        {
  69                MHI_PM_M3_ENTER,
  70                MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  71                MHI_PM_LD_ERR_FATAL_DETECT
  72        },
  73        {
  74                MHI_PM_M3,
  75                MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
  76                MHI_PM_LD_ERR_FATAL_DETECT
  77        },
  78        {
  79                MHI_PM_M3_EXIT,
  80                MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
  81                MHI_PM_LD_ERR_FATAL_DETECT
  82        },
  83        {
  84                MHI_PM_FW_DL_ERR,
  85                MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
  86                MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
  87        },
  88        /* L1 States */
  89        {
  90                MHI_PM_SYS_ERR_DETECT,
  91                MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
  92                MHI_PM_LD_ERR_FATAL_DETECT
  93        },
  94        {
  95                MHI_PM_SYS_ERR_PROCESS,
  96                MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
  97                MHI_PM_LD_ERR_FATAL_DETECT
  98        },
  99        /* L2 States */
 100        {
 101                MHI_PM_SHUTDOWN_PROCESS,
 102                MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
 103        },
 104        /* L3 States */
 105        {
 106                MHI_PM_LD_ERR_FATAL_DETECT,
 107                MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
 108        },
 109};
 110
 111enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
 112                                                   enum mhi_pm_state state)
 113{
 114        unsigned long cur_state = mhi_cntrl->pm_state;
 115        int index = find_last_bit(&cur_state, 32);
 116
 117        if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
 118                return cur_state;
 119
 120        if (unlikely(dev_state_transitions[index].from_state != cur_state))
 121                return cur_state;
 122
 123        if (unlikely(!(dev_state_transitions[index].to_states & state)))
 124                return cur_state;
 125
 126        mhi_cntrl->pm_state = state;
 127        return mhi_cntrl->pm_state;
 128}
 129
 130void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
 131{
 132        if (state == MHI_STATE_RESET) {
 133                mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
 134                                    MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
 135        } else {
 136                mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
 137                                    MHICTRL_MHISTATE_MASK,
 138                                    MHICTRL_MHISTATE_SHIFT, state);
 139        }
 140}
 141
 142/* NOP for backward compatibility, host allowed to ring DB in M2 state */
 143static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
 144{
 145}
 146
 147static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
 148{
 149        mhi_cntrl->wake_get(mhi_cntrl, false);
 150        mhi_cntrl->wake_put(mhi_cntrl, true);
 151}
 152
 153/* Handle device ready state transition */
 154int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
 155{
 156        struct mhi_event *mhi_event;
 157        enum mhi_pm_state cur_state;
 158        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 159        u32 interval_us = 25000; /* poll register field every 25 milliseconds */
 160        int ret, i;
 161
 162        /* Check if device entered error state */
 163        if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
 164                dev_err(dev, "Device link is not accessible\n");
 165                return -EIO;
 166        }
 167
 168        /* Wait for RESET to be cleared and READY bit to be set by the device */
 169        ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
 170                                 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
 171                                 interval_us);
 172        if (ret) {
 173                dev_err(dev, "Device failed to clear MHI Reset\n");
 174                return ret;
 175        }
 176
 177        ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
 178                                 MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1,
 179                                 interval_us);
 180        if (ret) {
 181                dev_err(dev, "Device failed to enter MHI Ready\n");
 182                return ret;
 183        }
 184
 185        dev_dbg(dev, "Device in READY State\n");
 186        write_lock_irq(&mhi_cntrl->pm_lock);
 187        cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
 188        mhi_cntrl->dev_state = MHI_STATE_READY;
 189        write_unlock_irq(&mhi_cntrl->pm_lock);
 190
 191        if (cur_state != MHI_PM_POR) {
 192                dev_err(dev, "Error moving to state %s from %s\n",
 193                        to_mhi_pm_state_str(MHI_PM_POR),
 194                        to_mhi_pm_state_str(cur_state));
 195                return -EIO;
 196        }
 197
 198        read_lock_bh(&mhi_cntrl->pm_lock);
 199        if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
 200                dev_err(dev, "Device registers not accessible\n");
 201                goto error_mmio;
 202        }
 203
 204        /* Configure MMIO registers */
 205        ret = mhi_init_mmio(mhi_cntrl);
 206        if (ret) {
 207                dev_err(dev, "Error configuring MMIO registers\n");
 208                goto error_mmio;
 209        }
 210
 211        /* Add elements to all SW event rings */
 212        mhi_event = mhi_cntrl->mhi_event;
 213        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 214                struct mhi_ring *ring = &mhi_event->ring;
 215
 216                /* Skip if this is an offload or HW event */
 217                if (mhi_event->offload_ev || mhi_event->hw_ring)
 218                        continue;
 219
 220                ring->wp = ring->base + ring->len - ring->el_size;
 221                *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
 222                /* Update all cores */
 223                smp_wmb();
 224
 225                /* Ring the event ring db */
 226                spin_lock_irq(&mhi_event->lock);
 227                mhi_ring_er_db(mhi_event);
 228                spin_unlock_irq(&mhi_event->lock);
 229        }
 230
 231        /* Set MHI to M0 state */
 232        mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
 233        read_unlock_bh(&mhi_cntrl->pm_lock);
 234
 235        return 0;
 236
 237error_mmio:
 238        read_unlock_bh(&mhi_cntrl->pm_lock);
 239
 240        return -EIO;
 241}
 242
 243int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
 244{
 245        enum mhi_pm_state cur_state;
 246        struct mhi_chan *mhi_chan;
 247        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 248        int i;
 249
 250        write_lock_irq(&mhi_cntrl->pm_lock);
 251        mhi_cntrl->dev_state = MHI_STATE_M0;
 252        cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
 253        write_unlock_irq(&mhi_cntrl->pm_lock);
 254        if (unlikely(cur_state != MHI_PM_M0)) {
 255                dev_err(dev, "Unable to transition to M0 state\n");
 256                return -EIO;
 257        }
 258        mhi_cntrl->M0++;
 259
 260        /* Wake up the device */
 261        read_lock_bh(&mhi_cntrl->pm_lock);
 262        mhi_cntrl->wake_get(mhi_cntrl, true);
 263
 264        /* Ring all event rings and CMD ring only if we're in mission mode */
 265        if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
 266                struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 267                struct mhi_cmd *mhi_cmd =
 268                        &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
 269
 270                for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 271                        if (mhi_event->offload_ev)
 272                                continue;
 273
 274                        spin_lock_irq(&mhi_event->lock);
 275                        mhi_ring_er_db(mhi_event);
 276                        spin_unlock_irq(&mhi_event->lock);
 277                }
 278
 279                /* Only ring primary cmd ring if ring is not empty */
 280                spin_lock_irq(&mhi_cmd->lock);
 281                if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
 282                        mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
 283                spin_unlock_irq(&mhi_cmd->lock);
 284        }
 285
 286        /* Ring channel DB registers */
 287        mhi_chan = mhi_cntrl->mhi_chan;
 288        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 289                struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
 290
 291                if (mhi_chan->db_cfg.reset_req) {
 292                        write_lock_irq(&mhi_chan->lock);
 293                        mhi_chan->db_cfg.db_mode = true;
 294                        write_unlock_irq(&mhi_chan->lock);
 295                }
 296
 297                read_lock_irq(&mhi_chan->lock);
 298
 299                /* Only ring DB if ring is not empty */
 300                if (tre_ring->base && tre_ring->wp  != tre_ring->rp)
 301                        mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 302                read_unlock_irq(&mhi_chan->lock);
 303        }
 304
 305        mhi_cntrl->wake_put(mhi_cntrl, false);
 306        read_unlock_bh(&mhi_cntrl->pm_lock);
 307        wake_up_all(&mhi_cntrl->state_event);
 308
 309        return 0;
 310}
 311
 312/*
 313 * After receiving the MHI state change event from the device indicating the
 314 * transition to M1 state, the host can transition the device to M2 state
 315 * for keeping it in low power state.
 316 */
 317void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
 318{
 319        enum mhi_pm_state state;
 320        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 321
 322        write_lock_irq(&mhi_cntrl->pm_lock);
 323        state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
 324        if (state == MHI_PM_M2) {
 325                mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
 326                mhi_cntrl->dev_state = MHI_STATE_M2;
 327
 328                write_unlock_irq(&mhi_cntrl->pm_lock);
 329
 330                mhi_cntrl->M2++;
 331                wake_up_all(&mhi_cntrl->state_event);
 332
 333                /* If there are any pending resources, exit M2 immediately */
 334                if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
 335                             atomic_read(&mhi_cntrl->dev_wake))) {
 336                        dev_dbg(dev,
 337                                "Exiting M2, pending_pkts: %d dev_wake: %d\n",
 338                                atomic_read(&mhi_cntrl->pending_pkts),
 339                                atomic_read(&mhi_cntrl->dev_wake));
 340                        read_lock_bh(&mhi_cntrl->pm_lock);
 341                        mhi_cntrl->wake_get(mhi_cntrl, true);
 342                        mhi_cntrl->wake_put(mhi_cntrl, true);
 343                        read_unlock_bh(&mhi_cntrl->pm_lock);
 344                } else {
 345                        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
 346                }
 347        } else {
 348                write_unlock_irq(&mhi_cntrl->pm_lock);
 349        }
 350}
 351
 352/* MHI M3 completion handler */
 353int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
 354{
 355        enum mhi_pm_state state;
 356        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 357
 358        write_lock_irq(&mhi_cntrl->pm_lock);
 359        mhi_cntrl->dev_state = MHI_STATE_M3;
 360        state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
 361        write_unlock_irq(&mhi_cntrl->pm_lock);
 362        if (state != MHI_PM_M3) {
 363                dev_err(dev, "Unable to transition to M3 state\n");
 364                return -EIO;
 365        }
 366
 367        mhi_cntrl->M3++;
 368        wake_up_all(&mhi_cntrl->state_event);
 369
 370        return 0;
 371}
 372
 373/* Handle device Mission Mode transition */
 374static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 375{
 376        struct mhi_event *mhi_event;
 377        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 378        enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
 379        int i, ret;
 380
 381        dev_dbg(dev, "Processing Mission Mode transition\n");
 382
 383        write_lock_irq(&mhi_cntrl->pm_lock);
 384        if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
 385                ee = mhi_get_exec_env(mhi_cntrl);
 386
 387        if (!MHI_IN_MISSION_MODE(ee)) {
 388                mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
 389                write_unlock_irq(&mhi_cntrl->pm_lock);
 390                wake_up_all(&mhi_cntrl->state_event);
 391                return -EIO;
 392        }
 393        mhi_cntrl->ee = ee;
 394        write_unlock_irq(&mhi_cntrl->pm_lock);
 395
 396        wake_up_all(&mhi_cntrl->state_event);
 397
 398        device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
 399                              mhi_destroy_device);
 400        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
 401
 402        /* Force MHI to be in M0 state before continuing */
 403        ret = __mhi_device_get_sync(mhi_cntrl);
 404        if (ret)
 405                return ret;
 406
 407        read_lock_bh(&mhi_cntrl->pm_lock);
 408
 409        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 410                ret = -EIO;
 411                goto error_mission_mode;
 412        }
 413
 414        /* Add elements to all HW event rings */
 415        mhi_event = mhi_cntrl->mhi_event;
 416        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 417                struct mhi_ring *ring = &mhi_event->ring;
 418
 419                if (mhi_event->offload_ev || !mhi_event->hw_ring)
 420                        continue;
 421
 422                ring->wp = ring->base + ring->len - ring->el_size;
 423                *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
 424                /* Update to all cores */
 425                smp_wmb();
 426
 427                spin_lock_irq(&mhi_event->lock);
 428                if (MHI_DB_ACCESS_VALID(mhi_cntrl))
 429                        mhi_ring_er_db(mhi_event);
 430                spin_unlock_irq(&mhi_event->lock);
 431        }
 432
 433        read_unlock_bh(&mhi_cntrl->pm_lock);
 434
 435        /*
 436         * The MHI devices are only created when the client device switches its
 437         * Execution Environment (EE) to either SBL or AMSS states
 438         */
 439        mhi_create_devices(mhi_cntrl);
 440
 441        read_lock_bh(&mhi_cntrl->pm_lock);
 442
 443error_mission_mode:
 444        mhi_cntrl->wake_put(mhi_cntrl, false);
 445        read_unlock_bh(&mhi_cntrl->pm_lock);
 446
 447        return ret;
 448}
 449
 450/* Handle shutdown transitions */
 451static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
 452{
 453        enum mhi_pm_state cur_state;
 454        struct mhi_event *mhi_event;
 455        struct mhi_cmd_ctxt *cmd_ctxt;
 456        struct mhi_cmd *mhi_cmd;
 457        struct mhi_event_ctxt *er_ctxt;
 458        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 459        int ret, i;
 460
 461        dev_dbg(dev, "Processing disable transition with PM state: %s\n",
 462                to_mhi_pm_state_str(mhi_cntrl->pm_state));
 463
 464        mutex_lock(&mhi_cntrl->pm_mutex);
 465
 466        /* Trigger MHI RESET so that the device will not access host memory */
 467        if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
 468                dev_dbg(dev, "Triggering MHI Reset in device\n");
 469                mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
 470
 471                /* Wait for the reset bit to be cleared by the device */
 472                ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
 473                                 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
 474                                 25000);
 475                if (ret)
 476                        dev_err(dev, "Device failed to clear MHI Reset\n");
 477
 478                /*
 479                 * Device will clear BHI_INTVEC as a part of RESET processing,
 480                 * hence re-program it
 481                 */
 482                mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
 483        }
 484
 485        dev_dbg(dev,
 486                 "Waiting for all pending event ring processing to complete\n");
 487        mhi_event = mhi_cntrl->mhi_event;
 488        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 489                if (mhi_event->offload_ev)
 490                        continue;
 491                free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
 492                tasklet_kill(&mhi_event->task);
 493        }
 494
 495        /* Release lock and wait for all pending threads to complete */
 496        mutex_unlock(&mhi_cntrl->pm_mutex);
 497        dev_dbg(dev, "Waiting for all pending threads to complete\n");
 498        wake_up_all(&mhi_cntrl->state_event);
 499
 500        dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
 501        device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
 502
 503        mutex_lock(&mhi_cntrl->pm_mutex);
 504
 505        WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
 506        WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
 507
 508        /* Reset the ev rings and cmd rings */
 509        dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
 510        mhi_cmd = mhi_cntrl->mhi_cmd;
 511        cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
 512        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
 513                struct mhi_ring *ring = &mhi_cmd->ring;
 514
 515                ring->rp = ring->base;
 516                ring->wp = ring->base;
 517                cmd_ctxt->rp = cmd_ctxt->rbase;
 518                cmd_ctxt->wp = cmd_ctxt->rbase;
 519        }
 520
 521        mhi_event = mhi_cntrl->mhi_event;
 522        er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
 523        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
 524                     mhi_event++) {
 525                struct mhi_ring *ring = &mhi_event->ring;
 526
 527                /* Skip offload events */
 528                if (mhi_event->offload_ev)
 529                        continue;
 530
 531                ring->rp = ring->base;
 532                ring->wp = ring->base;
 533                er_ctxt->rp = er_ctxt->rbase;
 534                er_ctxt->wp = er_ctxt->rbase;
 535        }
 536
 537        /* Move to disable state */
 538        write_lock_irq(&mhi_cntrl->pm_lock);
 539        cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
 540        write_unlock_irq(&mhi_cntrl->pm_lock);
 541        if (unlikely(cur_state != MHI_PM_DISABLE))
 542                dev_err(dev, "Error moving from PM state: %s to: %s\n",
 543                        to_mhi_pm_state_str(cur_state),
 544                        to_mhi_pm_state_str(MHI_PM_DISABLE));
 545
 546        dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
 547                to_mhi_pm_state_str(mhi_cntrl->pm_state),
 548                TO_MHI_STATE_STR(mhi_cntrl->dev_state));
 549
 550        mutex_unlock(&mhi_cntrl->pm_mutex);
 551}
 552
 553/* Handle system error transitions */
 554static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
 555{
 556        enum mhi_pm_state cur_state, prev_state;
 557        enum dev_st_transition next_state;
 558        struct mhi_event *mhi_event;
 559        struct mhi_cmd_ctxt *cmd_ctxt;
 560        struct mhi_cmd *mhi_cmd;
 561        struct mhi_event_ctxt *er_ctxt;
 562        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 563        int ret, i;
 564
 565        dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
 566                to_mhi_pm_state_str(mhi_cntrl->pm_state),
 567                to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
 568
 569        /* We must notify MHI control driver so it can clean up first */
 570        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
 571
 572        mutex_lock(&mhi_cntrl->pm_mutex);
 573        write_lock_irq(&mhi_cntrl->pm_lock);
 574        prev_state = mhi_cntrl->pm_state;
 575        cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
 576        write_unlock_irq(&mhi_cntrl->pm_lock);
 577
 578        if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
 579                dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
 580                        to_mhi_pm_state_str(cur_state),
 581                        to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
 582                goto exit_sys_error_transition;
 583        }
 584
 585        mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
 586        mhi_cntrl->dev_state = MHI_STATE_RESET;
 587
 588        /* Wake up threads waiting for state transition */
 589        wake_up_all(&mhi_cntrl->state_event);
 590
 591        /* Trigger MHI RESET so that the device will not access host memory */
 592        if (MHI_REG_ACCESS_VALID(prev_state)) {
 593                u32 in_reset = -1;
 594                unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
 595
 596                dev_dbg(dev, "Triggering MHI Reset in device\n");
 597                mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
 598
 599                /* Wait for the reset bit to be cleared by the device */
 600                ret = wait_event_timeout(mhi_cntrl->state_event,
 601                                         mhi_read_reg_field(mhi_cntrl,
 602                                                            mhi_cntrl->regs,
 603                                                            MHICTRL,
 604                                                            MHICTRL_RESET_MASK,
 605                                                            MHICTRL_RESET_SHIFT,
 606                                                            &in_reset) ||
 607                                        !in_reset, timeout);
 608                if (!ret || in_reset) {
 609                        dev_err(dev, "Device failed to exit MHI Reset state\n");
 610                        goto exit_sys_error_transition;
 611                }
 612
 613                /*
 614                 * Device will clear BHI_INTVEC as a part of RESET processing,
 615                 * hence re-program it
 616                 */
 617                mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
 618        }
 619
 620        dev_dbg(dev,
 621                "Waiting for all pending event ring processing to complete\n");
 622        mhi_event = mhi_cntrl->mhi_event;
 623        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 624                if (mhi_event->offload_ev)
 625                        continue;
 626                tasklet_kill(&mhi_event->task);
 627        }
 628
 629        /* Release lock and wait for all pending threads to complete */
 630        mutex_unlock(&mhi_cntrl->pm_mutex);
 631        dev_dbg(dev, "Waiting for all pending threads to complete\n");
 632        wake_up_all(&mhi_cntrl->state_event);
 633
 634        dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
 635        device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
 636
 637        mutex_lock(&mhi_cntrl->pm_mutex);
 638
 639        WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
 640        WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
 641
 642        /* Reset the ev rings and cmd rings */
 643        dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
 644        mhi_cmd = mhi_cntrl->mhi_cmd;
 645        cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
 646        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
 647                struct mhi_ring *ring = &mhi_cmd->ring;
 648
 649                ring->rp = ring->base;
 650                ring->wp = ring->base;
 651                cmd_ctxt->rp = cmd_ctxt->rbase;
 652                cmd_ctxt->wp = cmd_ctxt->rbase;
 653        }
 654
 655        mhi_event = mhi_cntrl->mhi_event;
 656        er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
 657        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
 658             mhi_event++) {
 659                struct mhi_ring *ring = &mhi_event->ring;
 660
 661                /* Skip offload events */
 662                if (mhi_event->offload_ev)
 663                        continue;
 664
 665                ring->rp = ring->base;
 666                ring->wp = ring->base;
 667                er_ctxt->rp = er_ctxt->rbase;
 668                er_ctxt->wp = er_ctxt->rbase;
 669        }
 670
 671        /* Transition to next state */
 672        if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
 673                write_lock_irq(&mhi_cntrl->pm_lock);
 674                cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
 675                write_unlock_irq(&mhi_cntrl->pm_lock);
 676                if (cur_state != MHI_PM_POR) {
 677                        dev_err(dev, "Error moving to state %s from %s\n",
 678                                to_mhi_pm_state_str(MHI_PM_POR),
 679                                to_mhi_pm_state_str(cur_state));
 680                        goto exit_sys_error_transition;
 681                }
 682                next_state = DEV_ST_TRANSITION_PBL;
 683        } else {
 684                next_state = DEV_ST_TRANSITION_READY;
 685        }
 686
 687        mhi_queue_state_transition(mhi_cntrl, next_state);
 688
 689exit_sys_error_transition:
 690        dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
 691                to_mhi_pm_state_str(mhi_cntrl->pm_state),
 692                TO_MHI_STATE_STR(mhi_cntrl->dev_state));
 693
 694        mutex_unlock(&mhi_cntrl->pm_mutex);
 695}
 696
 697/* Queue a new work item and schedule work */
 698int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
 699                               enum dev_st_transition state)
 700{
 701        struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
 702        unsigned long flags;
 703
 704        if (!item)
 705                return -ENOMEM;
 706
 707        item->state = state;
 708        spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
 709        list_add_tail(&item->node, &mhi_cntrl->transition_list);
 710        spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
 711
 712        queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
 713
 714        return 0;
 715}
 716
 717/* SYS_ERR worker */
 718void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
 719{
 720        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 721
 722        /* skip if controller supports RDDM */
 723        if (mhi_cntrl->rddm_image) {
 724                dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
 725                return;
 726        }
 727
 728        mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
 729}
 730
 731/* Device State Transition worker */
 732void mhi_pm_st_worker(struct work_struct *work)
 733{
 734        struct state_transition *itr, *tmp;
 735        LIST_HEAD(head);
 736        struct mhi_controller *mhi_cntrl = container_of(work,
 737                                                        struct mhi_controller,
 738                                                        st_worker);
 739        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 740
 741        spin_lock_irq(&mhi_cntrl->transition_lock);
 742        list_splice_tail_init(&mhi_cntrl->transition_list, &head);
 743        spin_unlock_irq(&mhi_cntrl->transition_lock);
 744
 745        list_for_each_entry_safe(itr, tmp, &head, node) {
 746                list_del(&itr->node);
 747                dev_dbg(dev, "Handling state transition: %s\n",
 748                        TO_DEV_STATE_TRANS_STR(itr->state));
 749
 750                switch (itr->state) {
 751                case DEV_ST_TRANSITION_PBL:
 752                        write_lock_irq(&mhi_cntrl->pm_lock);
 753                        if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
 754                                mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
 755                        write_unlock_irq(&mhi_cntrl->pm_lock);
 756                        mhi_fw_load_handler(mhi_cntrl);
 757                        break;
 758                case DEV_ST_TRANSITION_SBL:
 759                        write_lock_irq(&mhi_cntrl->pm_lock);
 760                        mhi_cntrl->ee = MHI_EE_SBL;
 761                        write_unlock_irq(&mhi_cntrl->pm_lock);
 762                        /*
 763                         * The MHI devices are only created when the client
 764                         * device switches its Execution Environment (EE) to
 765                         * either SBL or AMSS states
 766                         */
 767                        mhi_create_devices(mhi_cntrl);
 768                        if (mhi_cntrl->fbc_download)
 769                                mhi_download_amss_image(mhi_cntrl);
 770                        break;
 771                case DEV_ST_TRANSITION_MISSION_MODE:
 772                        mhi_pm_mission_mode_transition(mhi_cntrl);
 773                        break;
 774                case DEV_ST_TRANSITION_FP:
 775                        write_lock_irq(&mhi_cntrl->pm_lock);
 776                        mhi_cntrl->ee = MHI_EE_FP;
 777                        write_unlock_irq(&mhi_cntrl->pm_lock);
 778                        mhi_create_devices(mhi_cntrl);
 779                        break;
 780                case DEV_ST_TRANSITION_READY:
 781                        mhi_ready_state_transition(mhi_cntrl);
 782                        break;
 783                case DEV_ST_TRANSITION_SYS_ERR:
 784                        mhi_pm_sys_error_transition(mhi_cntrl);
 785                        break;
 786                case DEV_ST_TRANSITION_DISABLE:
 787                        mhi_pm_disable_transition(mhi_cntrl);
 788                        break;
 789                default:
 790                        break;
 791                }
 792                kfree(itr);
 793        }
 794}
 795
 796int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
 797{
 798        struct mhi_chan *itr, *tmp;
 799        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 800        enum mhi_pm_state new_state;
 801        int ret;
 802
 803        if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
 804                return -EINVAL;
 805
 806        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
 807                return -EIO;
 808
 809        /* Return busy if there are any pending resources */
 810        if (atomic_read(&mhi_cntrl->dev_wake) ||
 811            atomic_read(&mhi_cntrl->pending_pkts))
 812                return -EBUSY;
 813
 814        /* Take MHI out of M2 state */
 815        read_lock_bh(&mhi_cntrl->pm_lock);
 816        mhi_cntrl->wake_get(mhi_cntrl, false);
 817        read_unlock_bh(&mhi_cntrl->pm_lock);
 818
 819        ret = wait_event_timeout(mhi_cntrl->state_event,
 820                                 mhi_cntrl->dev_state == MHI_STATE_M0 ||
 821                                 mhi_cntrl->dev_state == MHI_STATE_M1 ||
 822                                 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
 823                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
 824
 825        read_lock_bh(&mhi_cntrl->pm_lock);
 826        mhi_cntrl->wake_put(mhi_cntrl, false);
 827        read_unlock_bh(&mhi_cntrl->pm_lock);
 828
 829        if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 830                dev_err(dev,
 831                        "Could not enter M0/M1 state");
 832                return -EIO;
 833        }
 834
 835        write_lock_irq(&mhi_cntrl->pm_lock);
 836
 837        if (atomic_read(&mhi_cntrl->dev_wake) ||
 838            atomic_read(&mhi_cntrl->pending_pkts)) {
 839                write_unlock_irq(&mhi_cntrl->pm_lock);
 840                return -EBUSY;
 841        }
 842
 843        dev_dbg(dev, "Allowing M3 transition\n");
 844        new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
 845        if (new_state != MHI_PM_M3_ENTER) {
 846                write_unlock_irq(&mhi_cntrl->pm_lock);
 847                dev_err(dev,
 848                        "Error setting to PM state: %s from: %s\n",
 849                        to_mhi_pm_state_str(MHI_PM_M3_ENTER),
 850                        to_mhi_pm_state_str(mhi_cntrl->pm_state));
 851                return -EIO;
 852        }
 853
 854        /* Set MHI to M3 and wait for completion */
 855        mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
 856        write_unlock_irq(&mhi_cntrl->pm_lock);
 857        dev_dbg(dev, "Waiting for M3 completion\n");
 858
 859        ret = wait_event_timeout(mhi_cntrl->state_event,
 860                                 mhi_cntrl->dev_state == MHI_STATE_M3 ||
 861                                 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
 862                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
 863
 864        if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 865                dev_err(dev,
 866                        "Did not enter M3 state, MHI state: %s, PM state: %s\n",
 867                        TO_MHI_STATE_STR(mhi_cntrl->dev_state),
 868                        to_mhi_pm_state_str(mhi_cntrl->pm_state));
 869                return -EIO;
 870        }
 871
 872        /* Notify clients about entering LPM */
 873        list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
 874                mutex_lock(&itr->mutex);
 875                if (itr->mhi_dev)
 876                        mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
 877                mutex_unlock(&itr->mutex);
 878        }
 879
 880        return 0;
 881}
 882EXPORT_SYMBOL_GPL(mhi_pm_suspend);
 883
 884int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
 885{
 886        struct mhi_chan *itr, *tmp;
 887        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 888        enum mhi_pm_state cur_state;
 889        int ret;
 890
 891        dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
 892                to_mhi_pm_state_str(mhi_cntrl->pm_state),
 893                TO_MHI_STATE_STR(mhi_cntrl->dev_state));
 894
 895        if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
 896                return 0;
 897
 898        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
 899                return -EIO;
 900
 901        if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
 902                return -EINVAL;
 903
 904        /* Notify clients about exiting LPM */
 905        list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
 906                mutex_lock(&itr->mutex);
 907                if (itr->mhi_dev)
 908                        mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
 909                mutex_unlock(&itr->mutex);
 910        }
 911
 912        write_lock_irq(&mhi_cntrl->pm_lock);
 913        cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
 914        if (cur_state != MHI_PM_M3_EXIT) {
 915                write_unlock_irq(&mhi_cntrl->pm_lock);
 916                dev_info(dev,
 917                         "Error setting to PM state: %s from: %s\n",
 918                         to_mhi_pm_state_str(MHI_PM_M3_EXIT),
 919                         to_mhi_pm_state_str(mhi_cntrl->pm_state));
 920                return -EIO;
 921        }
 922
 923        /* Set MHI to M0 and wait for completion */
 924        mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
 925        write_unlock_irq(&mhi_cntrl->pm_lock);
 926
 927        ret = wait_event_timeout(mhi_cntrl->state_event,
 928                                 mhi_cntrl->dev_state == MHI_STATE_M0 ||
 929                                 mhi_cntrl->dev_state == MHI_STATE_M2 ||
 930                                 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
 931                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
 932
 933        if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 934                dev_err(dev,
 935                        "Did not enter M0 state, MHI state: %s, PM state: %s\n",
 936                        TO_MHI_STATE_STR(mhi_cntrl->dev_state),
 937                        to_mhi_pm_state_str(mhi_cntrl->pm_state));
 938                return -EIO;
 939        }
 940
 941        return 0;
 942}
 943EXPORT_SYMBOL_GPL(mhi_pm_resume);
 944
 945int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
 946{
 947        int ret;
 948
 949        /* Wake up the device */
 950        read_lock_bh(&mhi_cntrl->pm_lock);
 951        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 952                read_unlock_bh(&mhi_cntrl->pm_lock);
 953                return -EIO;
 954        }
 955        mhi_cntrl->wake_get(mhi_cntrl, true);
 956        if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
 957                mhi_trigger_resume(mhi_cntrl);
 958        read_unlock_bh(&mhi_cntrl->pm_lock);
 959
 960        ret = wait_event_timeout(mhi_cntrl->state_event,
 961                                 mhi_cntrl->pm_state == MHI_PM_M0 ||
 962                                 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
 963                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
 964
 965        if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 966                read_lock_bh(&mhi_cntrl->pm_lock);
 967                mhi_cntrl->wake_put(mhi_cntrl, false);
 968                read_unlock_bh(&mhi_cntrl->pm_lock);
 969                return -EIO;
 970        }
 971
 972        return 0;
 973}
 974
 975/* Assert device wake db */
 976static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
 977{
 978        unsigned long flags;
 979
 980        /*
 981         * If force flag is set, then increment the wake count value and
 982         * ring wake db
 983         */
 984        if (unlikely(force)) {
 985                spin_lock_irqsave(&mhi_cntrl->wlock, flags);
 986                atomic_inc(&mhi_cntrl->dev_wake);
 987                if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
 988                    !mhi_cntrl->wake_set) {
 989                        mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
 990                        mhi_cntrl->wake_set = true;
 991                }
 992                spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
 993        } else {
 994                /*
 995                 * If resources are already requested, then just increment
 996                 * the wake count value and return
 997                 */
 998                if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
 999                        return;
1000
1001                spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1002                if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1003                    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1004                    !mhi_cntrl->wake_set) {
1005                        mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1006                        mhi_cntrl->wake_set = true;
1007                }
1008                spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1009        }
1010}
1011
1012/* De-assert device wake db */
1013static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1014                                  bool override)
1015{
1016        unsigned long flags;
1017
1018        /*
1019         * Only continue if there is a single resource, else just decrement
1020         * and return
1021         */
1022        if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1023                return;
1024
1025        spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1026        if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1027            MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1028            mhi_cntrl->wake_set) {
1029                mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1030                mhi_cntrl->wake_set = false;
1031        }
1032        spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1033}
1034
1035int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1036{
1037        enum mhi_state state;
1038        enum mhi_ee_type current_ee;
1039        enum dev_st_transition next_state;
1040        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1041        u32 val;
1042        int ret;
1043
1044        dev_info(dev, "Requested to power ON\n");
1045
1046        /* Supply default wake routines if not provided by controller driver */
1047        if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1048            !mhi_cntrl->wake_toggle) {
1049                mhi_cntrl->wake_get = mhi_assert_dev_wake;
1050                mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1051                mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1052                        mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1053        }
1054
1055        mutex_lock(&mhi_cntrl->pm_mutex);
1056        mhi_cntrl->pm_state = MHI_PM_DISABLE;
1057
1058        ret = mhi_init_irq_setup(mhi_cntrl);
1059        if (ret)
1060                goto error_setup_irq;
1061
1062        /* Setup BHI INTVEC */
1063        write_lock_irq(&mhi_cntrl->pm_lock);
1064        mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1065        mhi_cntrl->pm_state = MHI_PM_POR;
1066        mhi_cntrl->ee = MHI_EE_MAX;
1067        current_ee = mhi_get_exec_env(mhi_cntrl);
1068        write_unlock_irq(&mhi_cntrl->pm_lock);
1069
1070        /* Confirm that the device is in valid exec env */
1071        if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
1072                dev_err(dev, "%s is not a valid EE for power on\n",
1073                        TO_MHI_EXEC_STR(current_ee));
1074                ret = -EIO;
1075                goto error_async_power_up;
1076        }
1077
1078        state = mhi_get_mhi_state(mhi_cntrl);
1079        dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1080                TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state));
1081
1082        if (state == MHI_STATE_SYS_ERR) {
1083                mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1084                ret = wait_event_timeout(mhi_cntrl->state_event,
1085                                MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
1086                                        mhi_read_reg_field(mhi_cntrl,
1087                                                           mhi_cntrl->regs,
1088                                                           MHICTRL,
1089                                                           MHICTRL_RESET_MASK,
1090                                                           MHICTRL_RESET_SHIFT,
1091                                                           &val) ||
1092                                        !val,
1093                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
1094                if (!ret) {
1095                        ret = -EIO;
1096                        dev_info(dev, "Failed to reset MHI due to syserr state\n");
1097                        goto error_async_power_up;
1098                }
1099
1100                /*
1101                 * device cleares INTVEC as part of RESET processing,
1102                 * re-program it
1103                 */
1104                mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1105        }
1106
1107        /* Transition to next state */
1108        next_state = MHI_IN_PBL(current_ee) ?
1109                DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1110
1111        mhi_queue_state_transition(mhi_cntrl, next_state);
1112
1113        mutex_unlock(&mhi_cntrl->pm_mutex);
1114
1115        dev_info(dev, "Power on setup success\n");
1116
1117        return 0;
1118
1119error_async_power_up:
1120        mhi_deinit_free_irq(mhi_cntrl);
1121
1122error_setup_irq:
1123        mhi_cntrl->pm_state = MHI_PM_DISABLE;
1124        mutex_unlock(&mhi_cntrl->pm_mutex);
1125
1126        return ret;
1127}
1128EXPORT_SYMBOL_GPL(mhi_async_power_up);
1129
1130void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1131{
1132        enum mhi_pm_state cur_state, transition_state;
1133        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1134
1135        mutex_lock(&mhi_cntrl->pm_mutex);
1136        write_lock_irq(&mhi_cntrl->pm_lock);
1137        cur_state = mhi_cntrl->pm_state;
1138        if (cur_state == MHI_PM_DISABLE) {
1139                write_unlock_irq(&mhi_cntrl->pm_lock);
1140                mutex_unlock(&mhi_cntrl->pm_mutex);
1141                return; /* Already powered down */
1142        }
1143
1144        /* If it's not a graceful shutdown, force MHI to linkdown state */
1145        transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1146                           MHI_PM_LD_ERR_FATAL_DETECT;
1147
1148        cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1149        if (cur_state != transition_state) {
1150                dev_err(dev, "Failed to move to state: %s from: %s\n",
1151                        to_mhi_pm_state_str(transition_state),
1152                        to_mhi_pm_state_str(mhi_cntrl->pm_state));
1153                /* Force link down or error fatal detected state */
1154                mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1155        }
1156
1157        /* mark device inactive to avoid any further host processing */
1158        mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1159        mhi_cntrl->dev_state = MHI_STATE_RESET;
1160
1161        wake_up_all(&mhi_cntrl->state_event);
1162
1163        write_unlock_irq(&mhi_cntrl->pm_lock);
1164        mutex_unlock(&mhi_cntrl->pm_mutex);
1165
1166        mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1167
1168        /* Wait for shutdown to complete */
1169        flush_work(&mhi_cntrl->st_worker);
1170
1171        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
1172}
1173EXPORT_SYMBOL_GPL(mhi_power_down);
1174
1175int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1176{
1177        int ret = mhi_async_power_up(mhi_cntrl);
1178
1179        if (ret)
1180                return ret;
1181
1182        wait_event_timeout(mhi_cntrl->state_event,
1183                           MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1184                           MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1185                           msecs_to_jiffies(mhi_cntrl->timeout_ms));
1186
1187        ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1188        if (ret)
1189                mhi_power_down(mhi_cntrl, false);
1190
1191        return ret;
1192}
1193EXPORT_SYMBOL(mhi_sync_power_up);
1194
1195int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1196{
1197        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1198        int ret;
1199
1200        /* Check if device is already in RDDM */
1201        if (mhi_cntrl->ee == MHI_EE_RDDM)
1202                return 0;
1203
1204        dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1205        mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1206
1207        /* Wait for RDDM event */
1208        ret = wait_event_timeout(mhi_cntrl->state_event,
1209                                 mhi_cntrl->ee == MHI_EE_RDDM,
1210                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1211        ret = ret ? 0 : -EIO;
1212
1213        return ret;
1214}
1215EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1216
1217void mhi_device_get(struct mhi_device *mhi_dev)
1218{
1219        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1220
1221        mhi_dev->dev_wake++;
1222        read_lock_bh(&mhi_cntrl->pm_lock);
1223        if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1224                mhi_trigger_resume(mhi_cntrl);
1225
1226        mhi_cntrl->wake_get(mhi_cntrl, true);
1227        read_unlock_bh(&mhi_cntrl->pm_lock);
1228}
1229EXPORT_SYMBOL_GPL(mhi_device_get);
1230
1231int mhi_device_get_sync(struct mhi_device *mhi_dev)
1232{
1233        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1234        int ret;
1235
1236        ret = __mhi_device_get_sync(mhi_cntrl);
1237        if (!ret)
1238                mhi_dev->dev_wake++;
1239
1240        return ret;
1241}
1242EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1243
1244void mhi_device_put(struct mhi_device *mhi_dev)
1245{
1246        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1247
1248        mhi_dev->dev_wake--;
1249        read_lock_bh(&mhi_cntrl->pm_lock);
1250        if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1251                mhi_trigger_resume(mhi_cntrl);
1252
1253        mhi_cntrl->wake_put(mhi_cntrl, false);
1254        read_unlock_bh(&mhi_cntrl->pm_lock);
1255}
1256EXPORT_SYMBOL_GPL(mhi_device_put);
1257