linux/drivers/bus/mhi/core/main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
   4 *
   5 */
   6
   7#include <linux/device.h>
   8#include <linux/dma-direction.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/interrupt.h>
  11#include <linux/list.h>
  12#include <linux/mhi.h>
  13#include <linux/module.h>
  14#include <linux/skbuff.h>
  15#include <linux/slab.h>
  16#include "internal.h"
  17
  18int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
  19                              void __iomem *base, u32 offset, u32 *out)
  20{
  21        return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
  22}
  23
  24int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
  25                                    void __iomem *base, u32 offset,
  26                                    u32 mask, u32 shift, u32 *out)
  27{
  28        u32 tmp;
  29        int ret;
  30
  31        ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
  32        if (ret)
  33                return ret;
  34
  35        *out = (tmp & mask) >> shift;
  36
  37        return 0;
  38}
  39
  40void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
  41                   u32 offset, u32 val)
  42{
  43        mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
  44}
  45
  46void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
  47                         u32 offset, u32 mask, u32 shift, u32 val)
  48{
  49        int ret;
  50        u32 tmp;
  51
  52        ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
  53        if (ret)
  54                return;
  55
  56        tmp &= ~mask;
  57        tmp |= (val << shift);
  58        mhi_write_reg(mhi_cntrl, base, offset, tmp);
  59}
  60
  61void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
  62                  dma_addr_t db_val)
  63{
  64        mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
  65        mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
  66}
  67
  68void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
  69                     struct db_cfg *db_cfg,
  70                     void __iomem *db_addr,
  71                     dma_addr_t db_val)
  72{
  73        if (db_cfg->db_mode) {
  74                db_cfg->db_val = db_val;
  75                mhi_write_db(mhi_cntrl, db_addr, db_val);
  76                db_cfg->db_mode = 0;
  77        }
  78}
  79
  80void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
  81                             struct db_cfg *db_cfg,
  82                             void __iomem *db_addr,
  83                             dma_addr_t db_val)
  84{
  85        db_cfg->db_val = db_val;
  86        mhi_write_db(mhi_cntrl, db_addr, db_val);
  87}
  88
  89void mhi_ring_er_db(struct mhi_event *mhi_event)
  90{
  91        struct mhi_ring *ring = &mhi_event->ring;
  92
  93        mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
  94                                     ring->db_addr, *ring->ctxt_wp);
  95}
  96
  97void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
  98{
  99        dma_addr_t db;
 100        struct mhi_ring *ring = &mhi_cmd->ring;
 101
 102        db = ring->iommu_base + (ring->wp - ring->base);
 103        *ring->ctxt_wp = db;
 104        mhi_write_db(mhi_cntrl, ring->db_addr, db);
 105}
 106
 107void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
 108                      struct mhi_chan *mhi_chan)
 109{
 110        struct mhi_ring *ring = &mhi_chan->tre_ring;
 111        dma_addr_t db;
 112
 113        db = ring->iommu_base + (ring->wp - ring->base);
 114        *ring->ctxt_wp = db;
 115        mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
 116                                    ring->db_addr, db);
 117}
 118
 119enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
 120{
 121        u32 exec;
 122        int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
 123
 124        return (ret) ? MHI_EE_MAX : exec;
 125}
 126
 127enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
 128{
 129        u32 state;
 130        int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
 131                                     MHISTATUS_MHISTATE_MASK,
 132                                     MHISTATUS_MHISTATE_SHIFT, &state);
 133        return ret ? MHI_STATE_MAX : state;
 134}
 135
 136int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
 137                         struct mhi_buf_info *buf_info)
 138{
 139        buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
 140                                          buf_info->v_addr, buf_info->len,
 141                                          buf_info->dir);
 142        if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
 143                return -ENOMEM;
 144
 145        return 0;
 146}
 147
 148int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
 149                          struct mhi_buf_info *buf_info)
 150{
 151        void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
 152                                       &buf_info->p_addr, GFP_ATOMIC);
 153
 154        if (!buf)
 155                return -ENOMEM;
 156
 157        if (buf_info->dir == DMA_TO_DEVICE)
 158                memcpy(buf, buf_info->v_addr, buf_info->len);
 159
 160        buf_info->bb_addr = buf;
 161
 162        return 0;
 163}
 164
 165void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
 166                            struct mhi_buf_info *buf_info)
 167{
 168        dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
 169                         buf_info->dir);
 170}
 171
 172void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
 173                             struct mhi_buf_info *buf_info)
 174{
 175        if (buf_info->dir == DMA_FROM_DEVICE)
 176                memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
 177
 178        mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
 179                          buf_info->p_addr);
 180}
 181
 182static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
 183                                      struct mhi_ring *ring)
 184{
 185        int nr_el;
 186
 187        if (ring->wp < ring->rp) {
 188                nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
 189        } else {
 190                nr_el = (ring->rp - ring->base) / ring->el_size;
 191                nr_el += ((ring->base + ring->len - ring->wp) /
 192                          ring->el_size) - 1;
 193        }
 194
 195        return nr_el;
 196}
 197
 198static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
 199{
 200        return (addr - ring->iommu_base) + ring->base;
 201}
 202
 203static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
 204                                 struct mhi_ring *ring)
 205{
 206        ring->wp += ring->el_size;
 207        if (ring->wp >= (ring->base + ring->len))
 208                ring->wp = ring->base;
 209        /* smp update */
 210        smp_wmb();
 211}
 212
 213static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
 214                                 struct mhi_ring *ring)
 215{
 216        ring->rp += ring->el_size;
 217        if (ring->rp >= (ring->base + ring->len))
 218                ring->rp = ring->base;
 219        /* smp update */
 220        smp_wmb();
 221}
 222
 223int mhi_destroy_device(struct device *dev, void *data)
 224{
 225        struct mhi_device *mhi_dev;
 226        struct mhi_controller *mhi_cntrl;
 227
 228        if (dev->bus != &mhi_bus_type)
 229                return 0;
 230
 231        mhi_dev = to_mhi_device(dev);
 232        mhi_cntrl = mhi_dev->mhi_cntrl;
 233
 234        /* Only destroy virtual devices thats attached to bus */
 235        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
 236                return 0;
 237
 238        /*
 239         * For the suspend and resume case, this function will get called
 240         * without mhi_unregister_controller(). Hence, we need to drop the
 241         * references to mhi_dev created for ul and dl channels. We can
 242         * be sure that there will be no instances of mhi_dev left after
 243         * this.
 244         */
 245        if (mhi_dev->ul_chan)
 246                put_device(&mhi_dev->ul_chan->mhi_dev->dev);
 247
 248        if (mhi_dev->dl_chan)
 249                put_device(&mhi_dev->dl_chan->mhi_dev->dev);
 250
 251        dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
 252                 mhi_dev->name);
 253
 254        /* Notify the client and remove the device from MHI bus */
 255        device_del(dev);
 256        put_device(dev);
 257
 258        return 0;
 259}
 260
 261void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
 262{
 263        struct mhi_driver *mhi_drv;
 264
 265        if (!mhi_dev->dev.driver)
 266                return;
 267
 268        mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
 269
 270        if (mhi_drv->status_cb)
 271                mhi_drv->status_cb(mhi_dev, cb_reason);
 272}
 273EXPORT_SYMBOL_GPL(mhi_notify);
 274
 275/* Bind MHI channels to MHI devices */
 276void mhi_create_devices(struct mhi_controller *mhi_cntrl)
 277{
 278        struct mhi_chan *mhi_chan;
 279        struct mhi_device *mhi_dev;
 280        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 281        int i, ret;
 282
 283        mhi_chan = mhi_cntrl->mhi_chan;
 284        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 285                if (!mhi_chan->configured || mhi_chan->mhi_dev ||
 286                    !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
 287                        continue;
 288                mhi_dev = mhi_alloc_device(mhi_cntrl);
 289                if (IS_ERR(mhi_dev))
 290                        return;
 291
 292                mhi_dev->dev_type = MHI_DEVICE_XFER;
 293                switch (mhi_chan->dir) {
 294                case DMA_TO_DEVICE:
 295                        mhi_dev->ul_chan = mhi_chan;
 296                        mhi_dev->ul_chan_id = mhi_chan->chan;
 297                        break;
 298                case DMA_FROM_DEVICE:
 299                        /* We use dl_chan as offload channels */
 300                        mhi_dev->dl_chan = mhi_chan;
 301                        mhi_dev->dl_chan_id = mhi_chan->chan;
 302                        break;
 303                default:
 304                        dev_err(dev, "Direction not supported\n");
 305                        put_device(&mhi_dev->dev);
 306                        return;
 307                }
 308
 309                get_device(&mhi_dev->dev);
 310                mhi_chan->mhi_dev = mhi_dev;
 311
 312                /* Check next channel if it matches */
 313                if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
 314                        if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
 315                                i++;
 316                                mhi_chan++;
 317                                if (mhi_chan->dir == DMA_TO_DEVICE) {
 318                                        mhi_dev->ul_chan = mhi_chan;
 319                                        mhi_dev->ul_chan_id = mhi_chan->chan;
 320                                } else {
 321                                        mhi_dev->dl_chan = mhi_chan;
 322                                        mhi_dev->dl_chan_id = mhi_chan->chan;
 323                                }
 324                                get_device(&mhi_dev->dev);
 325                                mhi_chan->mhi_dev = mhi_dev;
 326                        }
 327                }
 328
 329                /* Channel name is same for both UL and DL */
 330                mhi_dev->name = mhi_chan->name;
 331                dev_set_name(&mhi_dev->dev, "%s_%s",
 332                             dev_name(mhi_cntrl->cntrl_dev),
 333                             mhi_dev->name);
 334
 335                /* Init wakeup source if available */
 336                if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
 337                        device_init_wakeup(&mhi_dev->dev, true);
 338
 339                ret = device_add(&mhi_dev->dev);
 340                if (ret)
 341                        put_device(&mhi_dev->dev);
 342        }
 343}
 344
 345irqreturn_t mhi_irq_handler(int irq_number, void *dev)
 346{
 347        struct mhi_event *mhi_event = dev;
 348        struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
 349        struct mhi_event_ctxt *er_ctxt =
 350                &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
 351        struct mhi_ring *ev_ring = &mhi_event->ring;
 352        void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
 353
 354        /* Only proceed if event ring has pending events */
 355        if (ev_ring->rp == dev_rp)
 356                return IRQ_HANDLED;
 357
 358        /* For client managed event ring, notify pending data */
 359        if (mhi_event->cl_manage) {
 360                struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
 361                struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
 362
 363                if (mhi_dev)
 364                        mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
 365        } else {
 366                tasklet_schedule(&mhi_event->task);
 367        }
 368
 369        return IRQ_HANDLED;
 370}
 371
 372irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
 373{
 374        struct mhi_controller *mhi_cntrl = priv;
 375        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 376        enum mhi_state state = MHI_STATE_MAX;
 377        enum mhi_pm_state pm_state = 0;
 378        enum mhi_ee_type ee = 0;
 379
 380        write_lock_irq(&mhi_cntrl->pm_lock);
 381        if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
 382                write_unlock_irq(&mhi_cntrl->pm_lock);
 383                goto exit_intvec;
 384        }
 385
 386        state = mhi_get_mhi_state(mhi_cntrl);
 387        ee = mhi_cntrl->ee;
 388        mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
 389        dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
 390                TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
 391                TO_MHI_STATE_STR(state));
 392
 393        if (state == MHI_STATE_SYS_ERR) {
 394                dev_dbg(dev, "System error detected\n");
 395                pm_state = mhi_tryset_pm_state(mhi_cntrl,
 396                                               MHI_PM_SYS_ERR_DETECT);
 397        }
 398        write_unlock_irq(&mhi_cntrl->pm_lock);
 399
 400         /* If device supports RDDM don't bother processing SYS error */
 401        if (mhi_cntrl->rddm_image) {
 402                if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
 403                        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
 404                        wake_up_all(&mhi_cntrl->state_event);
 405                }
 406                goto exit_intvec;
 407        }
 408
 409        if (pm_state == MHI_PM_SYS_ERR_DETECT) {
 410                wake_up_all(&mhi_cntrl->state_event);
 411
 412                /* For fatal errors, we let controller decide next step */
 413                if (MHI_IN_PBL(ee))
 414                        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
 415                else
 416                        mhi_pm_sys_err_handler(mhi_cntrl);
 417        }
 418
 419exit_intvec:
 420
 421        return IRQ_HANDLED;
 422}
 423
 424irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
 425{
 426        struct mhi_controller *mhi_cntrl = dev;
 427
 428        /* Wake up events waiting for state change */
 429        wake_up_all(&mhi_cntrl->state_event);
 430
 431        return IRQ_WAKE_THREAD;
 432}
 433
 434static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
 435                                        struct mhi_ring *ring)
 436{
 437        dma_addr_t ctxt_wp;
 438
 439        /* Update the WP */
 440        ring->wp += ring->el_size;
 441        ctxt_wp = *ring->ctxt_wp + ring->el_size;
 442
 443        if (ring->wp >= (ring->base + ring->len)) {
 444                ring->wp = ring->base;
 445                ctxt_wp = ring->iommu_base;
 446        }
 447
 448        *ring->ctxt_wp = ctxt_wp;
 449
 450        /* Update the RP */
 451        ring->rp += ring->el_size;
 452        if (ring->rp >= (ring->base + ring->len))
 453                ring->rp = ring->base;
 454
 455        /* Update to all cores */
 456        smp_wmb();
 457}
 458
 459static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
 460                            struct mhi_tre *event,
 461                            struct mhi_chan *mhi_chan)
 462{
 463        struct mhi_ring *buf_ring, *tre_ring;
 464        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 465        struct mhi_result result;
 466        unsigned long flags = 0;
 467        u32 ev_code;
 468
 469        ev_code = MHI_TRE_GET_EV_CODE(event);
 470        buf_ring = &mhi_chan->buf_ring;
 471        tre_ring = &mhi_chan->tre_ring;
 472
 473        result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
 474                -EOVERFLOW : 0;
 475
 476        /*
 477         * If it's a DB Event then we need to grab the lock
 478         * with preemption disabled and as a write because we
 479         * have to update db register and there are chances that
 480         * another thread could be doing the same.
 481         */
 482        if (ev_code >= MHI_EV_CC_OOB)
 483                write_lock_irqsave(&mhi_chan->lock, flags);
 484        else
 485                read_lock_bh(&mhi_chan->lock);
 486
 487        if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
 488                goto end_process_tx_event;
 489
 490        switch (ev_code) {
 491        case MHI_EV_CC_OVERFLOW:
 492        case MHI_EV_CC_EOB:
 493        case MHI_EV_CC_EOT:
 494        {
 495                dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
 496                struct mhi_tre *local_rp, *ev_tre;
 497                void *dev_rp;
 498                struct mhi_buf_info *buf_info;
 499                u16 xfer_len;
 500
 501                /* Get the TRB this event points to */
 502                ev_tre = mhi_to_virtual(tre_ring, ptr);
 503
 504                dev_rp = ev_tre + 1;
 505                if (dev_rp >= (tre_ring->base + tre_ring->len))
 506                        dev_rp = tre_ring->base;
 507
 508                result.dir = mhi_chan->dir;
 509
 510                local_rp = tre_ring->rp;
 511                while (local_rp != dev_rp) {
 512                        buf_info = buf_ring->rp;
 513                        /* If it's the last TRE, get length from the event */
 514                        if (local_rp == ev_tre)
 515                                xfer_len = MHI_TRE_GET_EV_LEN(event);
 516                        else
 517                                xfer_len = buf_info->len;
 518
 519                        /* Unmap if it's not pre-mapped by client */
 520                        if (likely(!buf_info->pre_mapped))
 521                                mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
 522
 523                        result.buf_addr = buf_info->cb_buf;
 524
 525                        /* truncate to buf len if xfer_len is larger */
 526                        result.bytes_xferd =
 527                                min_t(u16, xfer_len, buf_info->len);
 528                        mhi_del_ring_element(mhi_cntrl, buf_ring);
 529                        mhi_del_ring_element(mhi_cntrl, tre_ring);
 530                        local_rp = tre_ring->rp;
 531
 532                        /* notify client */
 533                        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 534
 535                        if (mhi_chan->dir == DMA_TO_DEVICE)
 536                                atomic_dec(&mhi_cntrl->pending_pkts);
 537
 538                        /*
 539                         * Recycle the buffer if buffer is pre-allocated,
 540                         * if there is an error, not much we can do apart
 541                         * from dropping the packet
 542                         */
 543                        if (mhi_chan->pre_alloc) {
 544                                if (mhi_queue_buf(mhi_chan->mhi_dev,
 545                                                  mhi_chan->dir,
 546                                                  buf_info->cb_buf,
 547                                                  buf_info->len, MHI_EOT)) {
 548                                        dev_err(dev,
 549                                                "Error recycling buffer for chan:%d\n",
 550                                                mhi_chan->chan);
 551                                        kfree(buf_info->cb_buf);
 552                                }
 553                        }
 554                }
 555                break;
 556        } /* CC_EOT */
 557        case MHI_EV_CC_OOB:
 558        case MHI_EV_CC_DB_MODE:
 559        {
 560                unsigned long flags;
 561
 562                mhi_chan->db_cfg.db_mode = 1;
 563                read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
 564                if (tre_ring->wp != tre_ring->rp &&
 565                    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
 566                        mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 567                }
 568                read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
 569                break;
 570        }
 571        case MHI_EV_CC_BAD_TRE:
 572        default:
 573                dev_err(dev, "Unknown event 0x%x\n", ev_code);
 574                break;
 575        } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
 576
 577end_process_tx_event:
 578        if (ev_code >= MHI_EV_CC_OOB)
 579                write_unlock_irqrestore(&mhi_chan->lock, flags);
 580        else
 581                read_unlock_bh(&mhi_chan->lock);
 582
 583        return 0;
 584}
 585
 586static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
 587                           struct mhi_tre *event,
 588                           struct mhi_chan *mhi_chan)
 589{
 590        struct mhi_ring *buf_ring, *tre_ring;
 591        struct mhi_buf_info *buf_info;
 592        struct mhi_result result;
 593        int ev_code;
 594        u32 cookie; /* offset to local descriptor */
 595        u16 xfer_len;
 596
 597        buf_ring = &mhi_chan->buf_ring;
 598        tre_ring = &mhi_chan->tre_ring;
 599
 600        ev_code = MHI_TRE_GET_EV_CODE(event);
 601        cookie = MHI_TRE_GET_EV_COOKIE(event);
 602        xfer_len = MHI_TRE_GET_EV_LEN(event);
 603
 604        /* Received out of bound cookie */
 605        WARN_ON(cookie >= buf_ring->len);
 606
 607        buf_info = buf_ring->base + cookie;
 608
 609        result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
 610                -EOVERFLOW : 0;
 611
 612        /* truncate to buf len if xfer_len is larger */
 613        result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
 614        result.buf_addr = buf_info->cb_buf;
 615        result.dir = mhi_chan->dir;
 616
 617        read_lock_bh(&mhi_chan->lock);
 618
 619        if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
 620                goto end_process_rsc_event;
 621
 622        WARN_ON(!buf_info->used);
 623
 624        /* notify the client */
 625        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 626
 627        /*
 628         * Note: We're arbitrarily incrementing RP even though, completion
 629         * packet we processed might not be the same one, reason we can do this
 630         * is because device guaranteed to cache descriptors in order it
 631         * receive, so even though completion event is different we can re-use
 632         * all descriptors in between.
 633         * Example:
 634         * Transfer Ring has descriptors: A, B, C, D
 635         * Last descriptor host queue is D (WP) and first descriptor
 636         * host queue is A (RP).
 637         * The completion event we just serviced is descriptor C.
 638         * Then we can safely queue descriptors to replace A, B, and C
 639         * even though host did not receive any completions.
 640         */
 641        mhi_del_ring_element(mhi_cntrl, tre_ring);
 642        buf_info->used = false;
 643
 644end_process_rsc_event:
 645        read_unlock_bh(&mhi_chan->lock);
 646
 647        return 0;
 648}
 649
 650static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
 651                                       struct mhi_tre *tre)
 652{
 653        dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
 654        struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
 655        struct mhi_ring *mhi_ring = &cmd_ring->ring;
 656        struct mhi_tre *cmd_pkt;
 657        struct mhi_chan *mhi_chan;
 658        u32 chan;
 659
 660        cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
 661
 662        chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
 663        mhi_chan = &mhi_cntrl->mhi_chan[chan];
 664        write_lock_bh(&mhi_chan->lock);
 665        mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
 666        complete(&mhi_chan->completion);
 667        write_unlock_bh(&mhi_chan->lock);
 668
 669        mhi_del_ring_element(mhi_cntrl, mhi_ring);
 670}
 671
 672int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 673                             struct mhi_event *mhi_event,
 674                             u32 event_quota)
 675{
 676        struct mhi_tre *dev_rp, *local_rp;
 677        struct mhi_ring *ev_ring = &mhi_event->ring;
 678        struct mhi_event_ctxt *er_ctxt =
 679                &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
 680        struct mhi_chan *mhi_chan;
 681        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 682        u32 chan;
 683        int count = 0;
 684
 685        /*
 686         * This is a quick check to avoid unnecessary event processing
 687         * in case MHI is already in error state, but it's still possible
 688         * to transition to error state while processing events
 689         */
 690        if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
 691                return -EIO;
 692
 693        dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
 694        local_rp = ev_ring->rp;
 695
 696        while (dev_rp != local_rp) {
 697                enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
 698
 699                switch (type) {
 700                case MHI_PKT_TYPE_BW_REQ_EVENT:
 701                {
 702                        struct mhi_link_info *link_info;
 703
 704                        link_info = &mhi_cntrl->mhi_link_info;
 705                        write_lock_irq(&mhi_cntrl->pm_lock);
 706                        link_info->target_link_speed =
 707                                MHI_TRE_GET_EV_LINKSPEED(local_rp);
 708                        link_info->target_link_width =
 709                                MHI_TRE_GET_EV_LINKWIDTH(local_rp);
 710                        write_unlock_irq(&mhi_cntrl->pm_lock);
 711                        dev_dbg(dev, "Received BW_REQ event\n");
 712                        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
 713                        break;
 714                }
 715                case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
 716                {
 717                        enum mhi_state new_state;
 718
 719                        new_state = MHI_TRE_GET_EV_STATE(local_rp);
 720
 721                        dev_dbg(dev, "State change event to state: %s\n",
 722                                TO_MHI_STATE_STR(new_state));
 723
 724                        switch (new_state) {
 725                        case MHI_STATE_M0:
 726                                mhi_pm_m0_transition(mhi_cntrl);
 727                                break;
 728                        case MHI_STATE_M1:
 729                                mhi_pm_m1_transition(mhi_cntrl);
 730                                break;
 731                        case MHI_STATE_M3:
 732                                mhi_pm_m3_transition(mhi_cntrl);
 733                                break;
 734                        case MHI_STATE_SYS_ERR:
 735                        {
 736                                enum mhi_pm_state new_state;
 737
 738                                /* skip SYS_ERROR handling if RDDM supported */
 739                                if (mhi_cntrl->ee == MHI_EE_RDDM ||
 740                                    mhi_cntrl->rddm_image)
 741                                        break;
 742
 743                                dev_dbg(dev, "System error detected\n");
 744                                write_lock_irq(&mhi_cntrl->pm_lock);
 745                                new_state = mhi_tryset_pm_state(mhi_cntrl,
 746                                                        MHI_PM_SYS_ERR_DETECT);
 747                                write_unlock_irq(&mhi_cntrl->pm_lock);
 748                                if (new_state == MHI_PM_SYS_ERR_DETECT)
 749                                        mhi_pm_sys_err_handler(mhi_cntrl);
 750                                break;
 751                        }
 752                        default:
 753                                dev_err(dev, "Invalid state: %s\n",
 754                                        TO_MHI_STATE_STR(new_state));
 755                        }
 756
 757                        break;
 758                }
 759                case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
 760                        mhi_process_cmd_completion(mhi_cntrl, local_rp);
 761                        break;
 762                case MHI_PKT_TYPE_EE_EVENT:
 763                {
 764                        enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
 765                        enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
 766
 767                        dev_dbg(dev, "Received EE event: %s\n",
 768                                TO_MHI_EXEC_STR(event));
 769                        switch (event) {
 770                        case MHI_EE_SBL:
 771                                st = DEV_ST_TRANSITION_SBL;
 772                                break;
 773                        case MHI_EE_WFW:
 774                        case MHI_EE_AMSS:
 775                                st = DEV_ST_TRANSITION_MISSION_MODE;
 776                                break;
 777                        case MHI_EE_RDDM:
 778                                mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
 779                                write_lock_irq(&mhi_cntrl->pm_lock);
 780                                mhi_cntrl->ee = event;
 781                                write_unlock_irq(&mhi_cntrl->pm_lock);
 782                                wake_up_all(&mhi_cntrl->state_event);
 783                                break;
 784                        default:
 785                                dev_err(dev,
 786                                        "Unhandled EE event: 0x%x\n", type);
 787                        }
 788                        if (st != DEV_ST_TRANSITION_MAX)
 789                                mhi_queue_state_transition(mhi_cntrl, st);
 790
 791                        break;
 792                }
 793                case MHI_PKT_TYPE_TX_EVENT:
 794                        chan = MHI_TRE_GET_EV_CHID(local_rp);
 795
 796                        WARN_ON(chan >= mhi_cntrl->max_chan);
 797
 798                        /*
 799                         * Only process the event ring elements whose channel
 800                         * ID is within the maximum supported range.
 801                         */
 802                        if (chan < mhi_cntrl->max_chan) {
 803                                mhi_chan = &mhi_cntrl->mhi_chan[chan];
 804                                parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
 805                                event_quota--;
 806                        }
 807                        break;
 808                default:
 809                        dev_err(dev, "Unhandled event type: %d\n", type);
 810                        break;
 811                }
 812
 813                mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
 814                local_rp = ev_ring->rp;
 815                dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
 816                count++;
 817        }
 818
 819        read_lock_bh(&mhi_cntrl->pm_lock);
 820        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
 821                mhi_ring_er_db(mhi_event);
 822        read_unlock_bh(&mhi_cntrl->pm_lock);
 823
 824        return count;
 825}
 826
 827int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
 828                                struct mhi_event *mhi_event,
 829                                u32 event_quota)
 830{
 831        struct mhi_tre *dev_rp, *local_rp;
 832        struct mhi_ring *ev_ring = &mhi_event->ring;
 833        struct mhi_event_ctxt *er_ctxt =
 834                &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
 835        int count = 0;
 836        u32 chan;
 837        struct mhi_chan *mhi_chan;
 838
 839        if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
 840                return -EIO;
 841
 842        dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
 843        local_rp = ev_ring->rp;
 844
 845        while (dev_rp != local_rp && event_quota > 0) {
 846                enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
 847
 848                chan = MHI_TRE_GET_EV_CHID(local_rp);
 849
 850                WARN_ON(chan >= mhi_cntrl->max_chan);
 851
 852                /*
 853                 * Only process the event ring elements whose channel
 854                 * ID is within the maximum supported range.
 855                 */
 856                if (chan < mhi_cntrl->max_chan) {
 857                        mhi_chan = &mhi_cntrl->mhi_chan[chan];
 858
 859                        if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
 860                                parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
 861                                event_quota--;
 862                        } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
 863                                parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
 864                                event_quota--;
 865                        }
 866                }
 867
 868                mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
 869                local_rp = ev_ring->rp;
 870                dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
 871                count++;
 872        }
 873        read_lock_bh(&mhi_cntrl->pm_lock);
 874        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
 875                mhi_ring_er_db(mhi_event);
 876        read_unlock_bh(&mhi_cntrl->pm_lock);
 877
 878        return count;
 879}
 880
 881void mhi_ev_task(unsigned long data)
 882{
 883        struct mhi_event *mhi_event = (struct mhi_event *)data;
 884        struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
 885
 886        /* process all pending events */
 887        spin_lock_bh(&mhi_event->lock);
 888        mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
 889        spin_unlock_bh(&mhi_event->lock);
 890}
 891
 892void mhi_ctrl_ev_task(unsigned long data)
 893{
 894        struct mhi_event *mhi_event = (struct mhi_event *)data;
 895        struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
 896        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 897        enum mhi_state state;
 898        enum mhi_pm_state pm_state = 0;
 899        int ret;
 900
 901        /*
 902         * We can check PM state w/o a lock here because there is no way
 903         * PM state can change from reg access valid to no access while this
 904         * thread being executed.
 905         */
 906        if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
 907                /*
 908                 * We may have a pending event but not allowed to
 909                 * process it since we are probably in a suspended state,
 910                 * so trigger a resume.
 911                 */
 912                mhi_trigger_resume(mhi_cntrl);
 913
 914                return;
 915        }
 916
 917        /* Process ctrl events events */
 918        ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
 919
 920        /*
 921         * We received an IRQ but no events to process, maybe device went to
 922         * SYS_ERR state? Check the state to confirm.
 923         */
 924        if (!ret) {
 925                write_lock_irq(&mhi_cntrl->pm_lock);
 926                state = mhi_get_mhi_state(mhi_cntrl);
 927                if (state == MHI_STATE_SYS_ERR) {
 928                        dev_dbg(dev, "System error detected\n");
 929                        pm_state = mhi_tryset_pm_state(mhi_cntrl,
 930                                                       MHI_PM_SYS_ERR_DETECT);
 931                }
 932                write_unlock_irq(&mhi_cntrl->pm_lock);
 933                if (pm_state == MHI_PM_SYS_ERR_DETECT)
 934                        mhi_pm_sys_err_handler(mhi_cntrl);
 935        }
 936}
 937
 938static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
 939                             struct mhi_ring *ring)
 940{
 941        void *tmp = ring->wp + ring->el_size;
 942
 943        if (tmp >= (ring->base + ring->len))
 944                tmp = ring->base;
 945
 946        return (tmp == ring->rp);
 947}
 948
 949int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
 950                  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
 951{
 952        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 953        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
 954                                                             mhi_dev->dl_chan;
 955        struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
 956        struct mhi_buf_info buf_info = { };
 957        int ret;
 958
 959        /* If MHI host pre-allocates buffers then client drivers cannot queue */
 960        if (mhi_chan->pre_alloc)
 961                return -EINVAL;
 962
 963        if (mhi_is_ring_full(mhi_cntrl, tre_ring))
 964                return -ENOMEM;
 965
 966        read_lock_bh(&mhi_cntrl->pm_lock);
 967        if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
 968                read_unlock_bh(&mhi_cntrl->pm_lock);
 969                return -EIO;
 970        }
 971
 972        /* we're in M3 or transitioning to M3 */
 973        if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
 974                mhi_trigger_resume(mhi_cntrl);
 975
 976        /* Toggle wake to exit out of M2 */
 977        mhi_cntrl->wake_toggle(mhi_cntrl);
 978
 979        buf_info.v_addr = skb->data;
 980        buf_info.cb_buf = skb;
 981        buf_info.len = len;
 982
 983        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
 984        if (unlikely(ret)) {
 985                read_unlock_bh(&mhi_cntrl->pm_lock);
 986                return ret;
 987        }
 988
 989        if (mhi_chan->dir == DMA_TO_DEVICE)
 990                atomic_inc(&mhi_cntrl->pending_pkts);
 991
 992        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
 993                read_lock_bh(&mhi_chan->lock);
 994                mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 995                read_unlock_bh(&mhi_chan->lock);
 996        }
 997
 998        read_unlock_bh(&mhi_cntrl->pm_lock);
 999
1000        return 0;
1001}
1002EXPORT_SYMBOL_GPL(mhi_queue_skb);
1003
1004int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1005                  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1006{
1007        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1008        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1009                                                             mhi_dev->dl_chan;
1010        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1011        struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1012        struct mhi_buf_info buf_info = { };
1013        int ret;
1014
1015        /* If MHI host pre-allocates buffers then client drivers cannot queue */
1016        if (mhi_chan->pre_alloc)
1017                return -EINVAL;
1018
1019        if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1020                return -ENOMEM;
1021
1022        read_lock_bh(&mhi_cntrl->pm_lock);
1023        if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1024                dev_err(dev, "MHI is not in activate state, PM state: %s\n",
1025                        to_mhi_pm_state_str(mhi_cntrl->pm_state));
1026                read_unlock_bh(&mhi_cntrl->pm_lock);
1027
1028                return -EIO;
1029        }
1030
1031        /* we're in M3 or transitioning to M3 */
1032        if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1033                mhi_trigger_resume(mhi_cntrl);
1034
1035        /* Toggle wake to exit out of M2 */
1036        mhi_cntrl->wake_toggle(mhi_cntrl);
1037
1038        buf_info.p_addr = mhi_buf->dma_addr;
1039        buf_info.cb_buf = mhi_buf;
1040        buf_info.pre_mapped = true;
1041        buf_info.len = len;
1042
1043        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1044        if (unlikely(ret)) {
1045                read_unlock_bh(&mhi_cntrl->pm_lock);
1046                return ret;
1047        }
1048
1049        if (mhi_chan->dir == DMA_TO_DEVICE)
1050                atomic_inc(&mhi_cntrl->pending_pkts);
1051
1052        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1053                read_lock_bh(&mhi_chan->lock);
1054                mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1055                read_unlock_bh(&mhi_chan->lock);
1056        }
1057
1058        read_unlock_bh(&mhi_cntrl->pm_lock);
1059
1060        return 0;
1061}
1062EXPORT_SYMBOL_GPL(mhi_queue_dma);
1063
1064int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1065                        struct mhi_buf_info *info, enum mhi_flags flags)
1066{
1067        struct mhi_ring *buf_ring, *tre_ring;
1068        struct mhi_tre *mhi_tre;
1069        struct mhi_buf_info *buf_info;
1070        int eot, eob, chain, bei;
1071        int ret;
1072
1073        buf_ring = &mhi_chan->buf_ring;
1074        tre_ring = &mhi_chan->tre_ring;
1075
1076        buf_info = buf_ring->wp;
1077        WARN_ON(buf_info->used);
1078        buf_info->pre_mapped = info->pre_mapped;
1079        if (info->pre_mapped)
1080                buf_info->p_addr = info->p_addr;
1081        else
1082                buf_info->v_addr = info->v_addr;
1083        buf_info->cb_buf = info->cb_buf;
1084        buf_info->wp = tre_ring->wp;
1085        buf_info->dir = mhi_chan->dir;
1086        buf_info->len = info->len;
1087
1088        if (!info->pre_mapped) {
1089                ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1090                if (ret)
1091                        return ret;
1092        }
1093
1094        eob = !!(flags & MHI_EOB);
1095        eot = !!(flags & MHI_EOT);
1096        chain = !!(flags & MHI_CHAIN);
1097        bei = !!(mhi_chan->intmod);
1098
1099        mhi_tre = tre_ring->wp;
1100        mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1101        mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1102        mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1103
1104        /* increment WP */
1105        mhi_add_ring_element(mhi_cntrl, tre_ring);
1106        mhi_add_ring_element(mhi_cntrl, buf_ring);
1107
1108        return 0;
1109}
1110
1111int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1112                  void *buf, size_t len, enum mhi_flags mflags)
1113{
1114        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1115        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1116                                                             mhi_dev->dl_chan;
1117        struct mhi_ring *tre_ring;
1118        struct mhi_buf_info buf_info = { };
1119        unsigned long flags;
1120        int ret;
1121
1122        /*
1123         * this check here only as a guard, it's always
1124         * possible mhi can enter error while executing rest of function,
1125         * which is not fatal so we do not need to hold pm_lock
1126         */
1127        if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1128                return -EIO;
1129
1130        tre_ring = &mhi_chan->tre_ring;
1131        if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1132                return -ENOMEM;
1133
1134        buf_info.v_addr = buf;
1135        buf_info.cb_buf = buf;
1136        buf_info.len = len;
1137
1138        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1139        if (unlikely(ret))
1140                return ret;
1141
1142        read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1143
1144        /* we're in M3 or transitioning to M3 */
1145        if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1146                mhi_trigger_resume(mhi_cntrl);
1147
1148        /* Toggle wake to exit out of M2 */
1149        mhi_cntrl->wake_toggle(mhi_cntrl);
1150
1151        if (mhi_chan->dir == DMA_TO_DEVICE)
1152                atomic_inc(&mhi_cntrl->pending_pkts);
1153
1154        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1155                unsigned long flags;
1156
1157                read_lock_irqsave(&mhi_chan->lock, flags);
1158                mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1159                read_unlock_irqrestore(&mhi_chan->lock, flags);
1160        }
1161
1162        read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1163
1164        return 0;
1165}
1166EXPORT_SYMBOL_GPL(mhi_queue_buf);
1167
1168int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1169                 struct mhi_chan *mhi_chan,
1170                 enum mhi_cmd_type cmd)
1171{
1172        struct mhi_tre *cmd_tre = NULL;
1173        struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1174        struct mhi_ring *ring = &mhi_cmd->ring;
1175        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1176        int chan = 0;
1177
1178        if (mhi_chan)
1179                chan = mhi_chan->chan;
1180
1181        spin_lock_bh(&mhi_cmd->lock);
1182        if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1183                spin_unlock_bh(&mhi_cmd->lock);
1184                return -ENOMEM;
1185        }
1186
1187        /* prepare the cmd tre */
1188        cmd_tre = ring->wp;
1189        switch (cmd) {
1190        case MHI_CMD_RESET_CHAN:
1191                cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1192                cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1193                cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1194                break;
1195        case MHI_CMD_START_CHAN:
1196                cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1197                cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1198                cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1199                break;
1200        default:
1201                dev_err(dev, "Command not supported\n");
1202                break;
1203        }
1204
1205        /* queue to hardware */
1206        mhi_add_ring_element(mhi_cntrl, ring);
1207        read_lock_bh(&mhi_cntrl->pm_lock);
1208        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1209                mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1210        read_unlock_bh(&mhi_cntrl->pm_lock);
1211        spin_unlock_bh(&mhi_cmd->lock);
1212
1213        return 0;
1214}
1215
1216static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1217                                    struct mhi_chan *mhi_chan)
1218{
1219        int ret;
1220        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1221
1222        dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1223
1224        /* no more processing events for this channel */
1225        mutex_lock(&mhi_chan->mutex);
1226        write_lock_irq(&mhi_chan->lock);
1227        if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1228                write_unlock_irq(&mhi_chan->lock);
1229                mutex_unlock(&mhi_chan->mutex);
1230                return;
1231        }
1232
1233        mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1234        write_unlock_irq(&mhi_chan->lock);
1235
1236        reinit_completion(&mhi_chan->completion);
1237        read_lock_bh(&mhi_cntrl->pm_lock);
1238        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1239                read_unlock_bh(&mhi_cntrl->pm_lock);
1240                goto error_invalid_state;
1241        }
1242
1243        mhi_cntrl->wake_toggle(mhi_cntrl);
1244        read_unlock_bh(&mhi_cntrl->pm_lock);
1245
1246        mhi_cntrl->runtime_get(mhi_cntrl);
1247        mhi_cntrl->runtime_put(mhi_cntrl);
1248        ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1249        if (ret)
1250                goto error_invalid_state;
1251
1252        /* even if it fails we will still reset */
1253        ret = wait_for_completion_timeout(&mhi_chan->completion,
1254                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
1255        if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1256                dev_err(dev,
1257                        "Failed to receive cmd completion, still resetting\n");
1258
1259error_invalid_state:
1260        if (!mhi_chan->offload_ch) {
1261                mhi_reset_chan(mhi_cntrl, mhi_chan);
1262                mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1263        }
1264        dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1265        mutex_unlock(&mhi_chan->mutex);
1266}
1267
1268int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1269                        struct mhi_chan *mhi_chan)
1270{
1271        int ret = 0;
1272        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1273
1274        dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1275
1276        if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1277                dev_err(dev,
1278                        "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1279                        TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1280                        mhi_chan->name);
1281                return -ENOTCONN;
1282        }
1283
1284        mutex_lock(&mhi_chan->mutex);
1285
1286        /* If channel is not in disable state, do not allow it to start */
1287        if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1288                ret = -EIO;
1289                dev_dbg(dev, "channel: %d is not in disabled state\n",
1290                        mhi_chan->chan);
1291                goto error_init_chan;
1292        }
1293
1294        /* Check of client manages channel context for offload channels */
1295        if (!mhi_chan->offload_ch) {
1296                ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1297                if (ret)
1298                        goto error_init_chan;
1299        }
1300
1301        reinit_completion(&mhi_chan->completion);
1302        read_lock_bh(&mhi_cntrl->pm_lock);
1303        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1304                read_unlock_bh(&mhi_cntrl->pm_lock);
1305                ret = -EIO;
1306                goto error_pm_state;
1307        }
1308
1309        mhi_cntrl->wake_toggle(mhi_cntrl);
1310        read_unlock_bh(&mhi_cntrl->pm_lock);
1311        mhi_cntrl->runtime_get(mhi_cntrl);
1312        mhi_cntrl->runtime_put(mhi_cntrl);
1313
1314        ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1315        if (ret)
1316                goto error_pm_state;
1317
1318        ret = wait_for_completion_timeout(&mhi_chan->completion,
1319                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
1320        if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1321                ret = -EIO;
1322                goto error_pm_state;
1323        }
1324
1325        write_lock_irq(&mhi_chan->lock);
1326        mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1327        write_unlock_irq(&mhi_chan->lock);
1328
1329        /* Pre-allocate buffer for xfer ring */
1330        if (mhi_chan->pre_alloc) {
1331                int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1332                                                       &mhi_chan->tre_ring);
1333                size_t len = mhi_cntrl->buffer_len;
1334
1335                while (nr_el--) {
1336                        void *buf;
1337                        struct mhi_buf_info info = { };
1338                        buf = kmalloc(len, GFP_KERNEL);
1339                        if (!buf) {
1340                                ret = -ENOMEM;
1341                                goto error_pre_alloc;
1342                        }
1343
1344                        /* Prepare transfer descriptors */
1345                        info.v_addr = buf;
1346                        info.cb_buf = buf;
1347                        info.len = len;
1348                        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1349                        if (ret) {
1350                                kfree(buf);
1351                                goto error_pre_alloc;
1352                        }
1353                }
1354
1355                read_lock_bh(&mhi_cntrl->pm_lock);
1356                if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1357                        read_lock_irq(&mhi_chan->lock);
1358                        mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1359                        read_unlock_irq(&mhi_chan->lock);
1360                }
1361                read_unlock_bh(&mhi_cntrl->pm_lock);
1362        }
1363
1364        mutex_unlock(&mhi_chan->mutex);
1365
1366        dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1367                mhi_chan->chan);
1368
1369        return 0;
1370
1371error_pm_state:
1372        if (!mhi_chan->offload_ch)
1373                mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1374
1375error_init_chan:
1376        mutex_unlock(&mhi_chan->mutex);
1377
1378        return ret;
1379
1380error_pre_alloc:
1381        mutex_unlock(&mhi_chan->mutex);
1382        __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1383
1384        return ret;
1385}
1386
1387static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1388                                  struct mhi_event *mhi_event,
1389                                  struct mhi_event_ctxt *er_ctxt,
1390                                  int chan)
1391
1392{
1393        struct mhi_tre *dev_rp, *local_rp;
1394        struct mhi_ring *ev_ring;
1395        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1396        unsigned long flags;
1397
1398        dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1399
1400        ev_ring = &mhi_event->ring;
1401
1402        /* mark all stale events related to channel as STALE event */
1403        spin_lock_irqsave(&mhi_event->lock, flags);
1404        dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
1405
1406        local_rp = ev_ring->rp;
1407        while (dev_rp != local_rp) {
1408                if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1409                    chan == MHI_TRE_GET_EV_CHID(local_rp))
1410                        local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1411                                        MHI_PKT_TYPE_STALE_EVENT);
1412                local_rp++;
1413                if (local_rp == (ev_ring->base + ev_ring->len))
1414                        local_rp = ev_ring->base;
1415        }
1416
1417        dev_dbg(dev, "Finished marking events as stale events\n");
1418        spin_unlock_irqrestore(&mhi_event->lock, flags);
1419}
1420
1421static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1422                                struct mhi_chan *mhi_chan)
1423{
1424        struct mhi_ring *buf_ring, *tre_ring;
1425        struct mhi_result result;
1426
1427        /* Reset any pending buffers */
1428        buf_ring = &mhi_chan->buf_ring;
1429        tre_ring = &mhi_chan->tre_ring;
1430        result.transaction_status = -ENOTCONN;
1431        result.bytes_xferd = 0;
1432        while (tre_ring->rp != tre_ring->wp) {
1433                struct mhi_buf_info *buf_info = buf_ring->rp;
1434
1435                if (mhi_chan->dir == DMA_TO_DEVICE)
1436                        atomic_dec(&mhi_cntrl->pending_pkts);
1437
1438                if (!buf_info->pre_mapped)
1439                        mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1440
1441                mhi_del_ring_element(mhi_cntrl, buf_ring);
1442                mhi_del_ring_element(mhi_cntrl, tre_ring);
1443
1444                if (mhi_chan->pre_alloc) {
1445                        kfree(buf_info->cb_buf);
1446                } else {
1447                        result.buf_addr = buf_info->cb_buf;
1448                        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1449                }
1450        }
1451}
1452
1453void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1454{
1455        struct mhi_event *mhi_event;
1456        struct mhi_event_ctxt *er_ctxt;
1457        int chan = mhi_chan->chan;
1458
1459        /* Nothing to reset, client doesn't queue buffers */
1460        if (mhi_chan->offload_ch)
1461                return;
1462
1463        read_lock_bh(&mhi_cntrl->pm_lock);
1464        mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1465        er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1466
1467        mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1468
1469        mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1470
1471        read_unlock_bh(&mhi_cntrl->pm_lock);
1472}
1473
1474/* Move channel to start state */
1475int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1476{
1477        int ret, dir;
1478        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1479        struct mhi_chan *mhi_chan;
1480
1481        for (dir = 0; dir < 2; dir++) {
1482                mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1483                if (!mhi_chan)
1484                        continue;
1485
1486                ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1487                if (ret)
1488                        goto error_open_chan;
1489        }
1490
1491        return 0;
1492
1493error_open_chan:
1494        for (--dir; dir >= 0; dir--) {
1495                mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1496                if (!mhi_chan)
1497                        continue;
1498
1499                __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1500        }
1501
1502        return ret;
1503}
1504EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1505
1506void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1507{
1508        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1509        struct mhi_chan *mhi_chan;
1510        int dir;
1511
1512        for (dir = 0; dir < 2; dir++) {
1513                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1514                if (!mhi_chan)
1515                        continue;
1516
1517                __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1518        }
1519}
1520EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1521
1522int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1523{
1524        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1525        struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1526        struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1527        int ret;
1528
1529        spin_lock_bh(&mhi_event->lock);
1530        ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1531        spin_unlock_bh(&mhi_event->lock);
1532
1533        return ret;
1534}
1535EXPORT_SYMBOL_GPL(mhi_poll);
1536