linux/drivers/bus/mhi/core/main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
   4 *
   5 */
   6
   7#include <linux/delay.h>
   8#include <linux/device.h>
   9#include <linux/dma-direction.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/interrupt.h>
  12#include <linux/list.h>
  13#include <linux/mhi.h>
  14#include <linux/module.h>
  15#include <linux/skbuff.h>
  16#include <linux/slab.h>
  17#include "internal.h"
  18
  19int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
  20                              void __iomem *base, u32 offset, u32 *out)
  21{
  22        return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
  23}
  24
  25int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
  26                                    void __iomem *base, u32 offset,
  27                                    u32 mask, u32 shift, u32 *out)
  28{
  29        u32 tmp;
  30        int ret;
  31
  32        ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
  33        if (ret)
  34                return ret;
  35
  36        *out = (tmp & mask) >> shift;
  37
  38        return 0;
  39}
  40
  41int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
  42                                    void __iomem *base, u32 offset,
  43                                    u32 mask, u32 shift, u32 val, u32 delayus)
  44{
  45        int ret;
  46        u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
  47
  48        while (retry--) {
  49                ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift,
  50                                         &out);
  51                if (ret)
  52                        return ret;
  53
  54                if (out == val)
  55                        return 0;
  56
  57                fsleep(delayus);
  58        }
  59
  60        return -ETIMEDOUT;
  61}
  62
  63void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
  64                   u32 offset, u32 val)
  65{
  66        mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
  67}
  68
  69void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
  70                         u32 offset, u32 mask, u32 shift, u32 val)
  71{
  72        int ret;
  73        u32 tmp;
  74
  75        ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
  76        if (ret)
  77                return;
  78
  79        tmp &= ~mask;
  80        tmp |= (val << shift);
  81        mhi_write_reg(mhi_cntrl, base, offset, tmp);
  82}
  83
  84void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
  85                  dma_addr_t db_val)
  86{
  87        mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
  88        mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
  89}
  90
  91void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
  92                     struct db_cfg *db_cfg,
  93                     void __iomem *db_addr,
  94                     dma_addr_t db_val)
  95{
  96        if (db_cfg->db_mode) {
  97                db_cfg->db_val = db_val;
  98                mhi_write_db(mhi_cntrl, db_addr, db_val);
  99                db_cfg->db_mode = 0;
 100        }
 101}
 102
 103void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
 104                             struct db_cfg *db_cfg,
 105                             void __iomem *db_addr,
 106                             dma_addr_t db_val)
 107{
 108        db_cfg->db_val = db_val;
 109        mhi_write_db(mhi_cntrl, db_addr, db_val);
 110}
 111
 112void mhi_ring_er_db(struct mhi_event *mhi_event)
 113{
 114        struct mhi_ring *ring = &mhi_event->ring;
 115
 116        mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
 117                                     ring->db_addr, *ring->ctxt_wp);
 118}
 119
 120void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
 121{
 122        dma_addr_t db;
 123        struct mhi_ring *ring = &mhi_cmd->ring;
 124
 125        db = ring->iommu_base + (ring->wp - ring->base);
 126        *ring->ctxt_wp = db;
 127        mhi_write_db(mhi_cntrl, ring->db_addr, db);
 128}
 129
 130void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
 131                      struct mhi_chan *mhi_chan)
 132{
 133        struct mhi_ring *ring = &mhi_chan->tre_ring;
 134        dma_addr_t db;
 135
 136        db = ring->iommu_base + (ring->wp - ring->base);
 137
 138        /*
 139         * Writes to the new ring element must be visible to the hardware
 140         * before letting h/w know there is new element to fetch.
 141         */
 142        dma_wmb();
 143        *ring->ctxt_wp = db;
 144
 145        mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
 146                                    ring->db_addr, db);
 147}
 148
 149enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
 150{
 151        u32 exec;
 152        int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
 153
 154        return (ret) ? MHI_EE_MAX : exec;
 155}
 156EXPORT_SYMBOL_GPL(mhi_get_exec_env);
 157
 158enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
 159{
 160        u32 state;
 161        int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
 162                                     MHISTATUS_MHISTATE_MASK,
 163                                     MHISTATUS_MHISTATE_SHIFT, &state);
 164        return ret ? MHI_STATE_MAX : state;
 165}
 166EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
 167
 168void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
 169{
 170        if (mhi_cntrl->reset) {
 171                mhi_cntrl->reset(mhi_cntrl);
 172                return;
 173        }
 174
 175        /* Generic MHI SoC reset */
 176        mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
 177                      MHI_SOC_RESET_REQ);
 178}
 179EXPORT_SYMBOL_GPL(mhi_soc_reset);
 180
 181int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
 182                         struct mhi_buf_info *buf_info)
 183{
 184        buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
 185                                          buf_info->v_addr, buf_info->len,
 186                                          buf_info->dir);
 187        if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
 188                return -ENOMEM;
 189
 190        return 0;
 191}
 192
 193int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
 194                          struct mhi_buf_info *buf_info)
 195{
 196        void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
 197                                       &buf_info->p_addr, GFP_ATOMIC);
 198
 199        if (!buf)
 200                return -ENOMEM;
 201
 202        if (buf_info->dir == DMA_TO_DEVICE)
 203                memcpy(buf, buf_info->v_addr, buf_info->len);
 204
 205        buf_info->bb_addr = buf;
 206
 207        return 0;
 208}
 209
 210void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
 211                            struct mhi_buf_info *buf_info)
 212{
 213        dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
 214                         buf_info->dir);
 215}
 216
 217void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
 218                             struct mhi_buf_info *buf_info)
 219{
 220        if (buf_info->dir == DMA_FROM_DEVICE)
 221                memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
 222
 223        dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
 224                          buf_info->bb_addr, buf_info->p_addr);
 225}
 226
 227static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
 228                                      struct mhi_ring *ring)
 229{
 230        int nr_el;
 231
 232        if (ring->wp < ring->rp) {
 233                nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
 234        } else {
 235                nr_el = (ring->rp - ring->base) / ring->el_size;
 236                nr_el += ((ring->base + ring->len - ring->wp) /
 237                          ring->el_size) - 1;
 238        }
 239
 240        return nr_el;
 241}
 242
 243static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
 244{
 245        return (addr - ring->iommu_base) + ring->base;
 246}
 247
 248static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
 249                                 struct mhi_ring *ring)
 250{
 251        ring->wp += ring->el_size;
 252        if (ring->wp >= (ring->base + ring->len))
 253                ring->wp = ring->base;
 254        /* smp update */
 255        smp_wmb();
 256}
 257
 258static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
 259                                 struct mhi_ring *ring)
 260{
 261        ring->rp += ring->el_size;
 262        if (ring->rp >= (ring->base + ring->len))
 263                ring->rp = ring->base;
 264        /* smp update */
 265        smp_wmb();
 266}
 267
 268static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
 269{
 270        return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
 271}
 272
 273int mhi_destroy_device(struct device *dev, void *data)
 274{
 275        struct mhi_chan *ul_chan, *dl_chan;
 276        struct mhi_device *mhi_dev;
 277        struct mhi_controller *mhi_cntrl;
 278        enum mhi_ee_type ee = MHI_EE_MAX;
 279
 280        if (dev->bus != &mhi_bus_type)
 281                return 0;
 282
 283        mhi_dev = to_mhi_device(dev);
 284        mhi_cntrl = mhi_dev->mhi_cntrl;
 285
 286        /* Only destroy virtual devices thats attached to bus */
 287        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
 288                return 0;
 289
 290        ul_chan = mhi_dev->ul_chan;
 291        dl_chan = mhi_dev->dl_chan;
 292
 293        /*
 294         * If execution environment is specified, remove only those devices that
 295         * started in them based on ee_mask for the channels as we move on to a
 296         * different execution environment
 297         */
 298        if (data)
 299                ee = *(enum mhi_ee_type *)data;
 300
 301        /*
 302         * For the suspend and resume case, this function will get called
 303         * without mhi_unregister_controller(). Hence, we need to drop the
 304         * references to mhi_dev created for ul and dl channels. We can
 305         * be sure that there will be no instances of mhi_dev left after
 306         * this.
 307         */
 308        if (ul_chan) {
 309                if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
 310                        return 0;
 311
 312                put_device(&ul_chan->mhi_dev->dev);
 313        }
 314
 315        if (dl_chan) {
 316                if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
 317                        return 0;
 318
 319                put_device(&dl_chan->mhi_dev->dev);
 320        }
 321
 322        dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
 323                 mhi_dev->name);
 324
 325        /* Notify the client and remove the device from MHI bus */
 326        device_del(dev);
 327        put_device(dev);
 328
 329        return 0;
 330}
 331
 332int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
 333                                enum dma_data_direction dir)
 334{
 335        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 336        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
 337                mhi_dev->ul_chan : mhi_dev->dl_chan;
 338        struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
 339
 340        return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
 341}
 342EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
 343
 344void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
 345{
 346        struct mhi_driver *mhi_drv;
 347
 348        if (!mhi_dev->dev.driver)
 349                return;
 350
 351        mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
 352
 353        if (mhi_drv->status_cb)
 354                mhi_drv->status_cb(mhi_dev, cb_reason);
 355}
 356EXPORT_SYMBOL_GPL(mhi_notify);
 357
 358/* Bind MHI channels to MHI devices */
 359void mhi_create_devices(struct mhi_controller *mhi_cntrl)
 360{
 361        struct mhi_chan *mhi_chan;
 362        struct mhi_device *mhi_dev;
 363        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 364        int i, ret;
 365
 366        mhi_chan = mhi_cntrl->mhi_chan;
 367        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 368                if (!mhi_chan->configured || mhi_chan->mhi_dev ||
 369                    !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
 370                        continue;
 371                mhi_dev = mhi_alloc_device(mhi_cntrl);
 372                if (IS_ERR(mhi_dev))
 373                        return;
 374
 375                mhi_dev->dev_type = MHI_DEVICE_XFER;
 376                switch (mhi_chan->dir) {
 377                case DMA_TO_DEVICE:
 378                        mhi_dev->ul_chan = mhi_chan;
 379                        mhi_dev->ul_chan_id = mhi_chan->chan;
 380                        break;
 381                case DMA_FROM_DEVICE:
 382                        /* We use dl_chan as offload channels */
 383                        mhi_dev->dl_chan = mhi_chan;
 384                        mhi_dev->dl_chan_id = mhi_chan->chan;
 385                        break;
 386                default:
 387                        dev_err(dev, "Direction not supported\n");
 388                        put_device(&mhi_dev->dev);
 389                        return;
 390                }
 391
 392                get_device(&mhi_dev->dev);
 393                mhi_chan->mhi_dev = mhi_dev;
 394
 395                /* Check next channel if it matches */
 396                if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
 397                        if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
 398                                i++;
 399                                mhi_chan++;
 400                                if (mhi_chan->dir == DMA_TO_DEVICE) {
 401                                        mhi_dev->ul_chan = mhi_chan;
 402                                        mhi_dev->ul_chan_id = mhi_chan->chan;
 403                                } else {
 404                                        mhi_dev->dl_chan = mhi_chan;
 405                                        mhi_dev->dl_chan_id = mhi_chan->chan;
 406                                }
 407                                get_device(&mhi_dev->dev);
 408                                mhi_chan->mhi_dev = mhi_dev;
 409                        }
 410                }
 411
 412                /* Channel name is same for both UL and DL */
 413                mhi_dev->name = mhi_chan->name;
 414                dev_set_name(&mhi_dev->dev, "%s_%s",
 415                             dev_name(&mhi_cntrl->mhi_dev->dev),
 416                             mhi_dev->name);
 417
 418                /* Init wakeup source if available */
 419                if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
 420                        device_init_wakeup(&mhi_dev->dev, true);
 421
 422                ret = device_add(&mhi_dev->dev);
 423                if (ret)
 424                        put_device(&mhi_dev->dev);
 425        }
 426}
 427
 428irqreturn_t mhi_irq_handler(int irq_number, void *dev)
 429{
 430        struct mhi_event *mhi_event = dev;
 431        struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
 432        struct mhi_event_ctxt *er_ctxt =
 433                &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
 434        struct mhi_ring *ev_ring = &mhi_event->ring;
 435        dma_addr_t ptr = er_ctxt->rp;
 436        void *dev_rp;
 437
 438        if (!is_valid_ring_ptr(ev_ring, ptr)) {
 439                dev_err(&mhi_cntrl->mhi_dev->dev,
 440                        "Event ring rp points outside of the event ring\n");
 441                return IRQ_HANDLED;
 442        }
 443
 444        dev_rp = mhi_to_virtual(ev_ring, ptr);
 445
 446        /* Only proceed if event ring has pending events */
 447        if (ev_ring->rp == dev_rp)
 448                return IRQ_HANDLED;
 449
 450        /* For client managed event ring, notify pending data */
 451        if (mhi_event->cl_manage) {
 452                struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
 453                struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
 454
 455                if (mhi_dev)
 456                        mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
 457        } else {
 458                tasklet_schedule(&mhi_event->task);
 459        }
 460
 461        return IRQ_HANDLED;
 462}
 463
 464irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
 465{
 466        struct mhi_controller *mhi_cntrl = priv;
 467        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 468        enum mhi_state state;
 469        enum mhi_pm_state pm_state = 0;
 470        enum mhi_ee_type ee;
 471
 472        write_lock_irq(&mhi_cntrl->pm_lock);
 473        if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
 474                write_unlock_irq(&mhi_cntrl->pm_lock);
 475                goto exit_intvec;
 476        }
 477
 478        state = mhi_get_mhi_state(mhi_cntrl);
 479        ee = mhi_get_exec_env(mhi_cntrl);
 480        dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
 481                TO_MHI_EXEC_STR(mhi_cntrl->ee),
 482                TO_MHI_STATE_STR(mhi_cntrl->dev_state),
 483                TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state));
 484
 485        if (state == MHI_STATE_SYS_ERR) {
 486                dev_dbg(dev, "System error detected\n");
 487                pm_state = mhi_tryset_pm_state(mhi_cntrl,
 488                                               MHI_PM_SYS_ERR_DETECT);
 489        }
 490        write_unlock_irq(&mhi_cntrl->pm_lock);
 491
 492        if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
 493                goto exit_intvec;
 494
 495        switch (ee) {
 496        case MHI_EE_RDDM:
 497                /* proceed if power down is not already in progress */
 498                if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
 499                        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
 500                        mhi_cntrl->ee = ee;
 501                        wake_up_all(&mhi_cntrl->state_event);
 502                }
 503                break;
 504        case MHI_EE_PBL:
 505        case MHI_EE_EDL:
 506        case MHI_EE_PTHRU:
 507                mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
 508                mhi_cntrl->ee = ee;
 509                wake_up_all(&mhi_cntrl->state_event);
 510                mhi_pm_sys_err_handler(mhi_cntrl);
 511                break;
 512        default:
 513                wake_up_all(&mhi_cntrl->state_event);
 514                mhi_pm_sys_err_handler(mhi_cntrl);
 515                break;
 516        }
 517
 518exit_intvec:
 519
 520        return IRQ_HANDLED;
 521}
 522
 523irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
 524{
 525        struct mhi_controller *mhi_cntrl = dev;
 526
 527        /* Wake up events waiting for state change */
 528        wake_up_all(&mhi_cntrl->state_event);
 529
 530        return IRQ_WAKE_THREAD;
 531}
 532
 533static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
 534                                        struct mhi_ring *ring)
 535{
 536        dma_addr_t ctxt_wp;
 537
 538        /* Update the WP */
 539        ring->wp += ring->el_size;
 540        ctxt_wp = *ring->ctxt_wp + ring->el_size;
 541
 542        if (ring->wp >= (ring->base + ring->len)) {
 543                ring->wp = ring->base;
 544                ctxt_wp = ring->iommu_base;
 545        }
 546
 547        *ring->ctxt_wp = ctxt_wp;
 548
 549        /* Update the RP */
 550        ring->rp += ring->el_size;
 551        if (ring->rp >= (ring->base + ring->len))
 552                ring->rp = ring->base;
 553
 554        /* Update to all cores */
 555        smp_wmb();
 556}
 557
 558static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
 559                            struct mhi_tre *event,
 560                            struct mhi_chan *mhi_chan)
 561{
 562        struct mhi_ring *buf_ring, *tre_ring;
 563        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 564        struct mhi_result result;
 565        unsigned long flags = 0;
 566        u32 ev_code;
 567
 568        ev_code = MHI_TRE_GET_EV_CODE(event);
 569        buf_ring = &mhi_chan->buf_ring;
 570        tre_ring = &mhi_chan->tre_ring;
 571
 572        result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
 573                -EOVERFLOW : 0;
 574
 575        /*
 576         * If it's a DB Event then we need to grab the lock
 577         * with preemption disabled and as a write because we
 578         * have to update db register and there are chances that
 579         * another thread could be doing the same.
 580         */
 581        if (ev_code >= MHI_EV_CC_OOB)
 582                write_lock_irqsave(&mhi_chan->lock, flags);
 583        else
 584                read_lock_bh(&mhi_chan->lock);
 585
 586        if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
 587                goto end_process_tx_event;
 588
 589        switch (ev_code) {
 590        case MHI_EV_CC_OVERFLOW:
 591        case MHI_EV_CC_EOB:
 592        case MHI_EV_CC_EOT:
 593        {
 594                dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
 595                struct mhi_tre *local_rp, *ev_tre;
 596                void *dev_rp;
 597                struct mhi_buf_info *buf_info;
 598                u16 xfer_len;
 599
 600                if (!is_valid_ring_ptr(tre_ring, ptr)) {
 601                        dev_err(&mhi_cntrl->mhi_dev->dev,
 602                                "Event element points outside of the tre ring\n");
 603                        break;
 604                }
 605                /* Get the TRB this event points to */
 606                ev_tre = mhi_to_virtual(tre_ring, ptr);
 607
 608                dev_rp = ev_tre + 1;
 609                if (dev_rp >= (tre_ring->base + tre_ring->len))
 610                        dev_rp = tre_ring->base;
 611
 612                result.dir = mhi_chan->dir;
 613
 614                local_rp = tre_ring->rp;
 615                while (local_rp != dev_rp) {
 616                        buf_info = buf_ring->rp;
 617                        /* If it's the last TRE, get length from the event */
 618                        if (local_rp == ev_tre)
 619                                xfer_len = MHI_TRE_GET_EV_LEN(event);
 620                        else
 621                                xfer_len = buf_info->len;
 622
 623                        /* Unmap if it's not pre-mapped by client */
 624                        if (likely(!buf_info->pre_mapped))
 625                                mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
 626
 627                        result.buf_addr = buf_info->cb_buf;
 628
 629                        /* truncate to buf len if xfer_len is larger */
 630                        result.bytes_xferd =
 631                                min_t(u16, xfer_len, buf_info->len);
 632                        mhi_del_ring_element(mhi_cntrl, buf_ring);
 633                        mhi_del_ring_element(mhi_cntrl, tre_ring);
 634                        local_rp = tre_ring->rp;
 635
 636                        /* notify client */
 637                        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 638
 639                        if (mhi_chan->dir == DMA_TO_DEVICE) {
 640                                atomic_dec(&mhi_cntrl->pending_pkts);
 641                                /* Release the reference got from mhi_queue() */
 642                                mhi_cntrl->runtime_put(mhi_cntrl);
 643                        }
 644
 645                        /*
 646                         * Recycle the buffer if buffer is pre-allocated,
 647                         * if there is an error, not much we can do apart
 648                         * from dropping the packet
 649                         */
 650                        if (mhi_chan->pre_alloc) {
 651                                if (mhi_queue_buf(mhi_chan->mhi_dev,
 652                                                  mhi_chan->dir,
 653                                                  buf_info->cb_buf,
 654                                                  buf_info->len, MHI_EOT)) {
 655                                        dev_err(dev,
 656                                                "Error recycling buffer for chan:%d\n",
 657                                                mhi_chan->chan);
 658                                        kfree(buf_info->cb_buf);
 659                                }
 660                        }
 661                }
 662                break;
 663        } /* CC_EOT */
 664        case MHI_EV_CC_OOB:
 665        case MHI_EV_CC_DB_MODE:
 666        {
 667                unsigned long pm_lock_flags;
 668
 669                mhi_chan->db_cfg.db_mode = 1;
 670                read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
 671                if (tre_ring->wp != tre_ring->rp &&
 672                    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
 673                        mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 674                }
 675                read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
 676                break;
 677        }
 678        case MHI_EV_CC_BAD_TRE:
 679        default:
 680                dev_err(dev, "Unknown event 0x%x\n", ev_code);
 681                break;
 682        } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
 683
 684end_process_tx_event:
 685        if (ev_code >= MHI_EV_CC_OOB)
 686                write_unlock_irqrestore(&mhi_chan->lock, flags);
 687        else
 688                read_unlock_bh(&mhi_chan->lock);
 689
 690        return 0;
 691}
 692
 693static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
 694                           struct mhi_tre *event,
 695                           struct mhi_chan *mhi_chan)
 696{
 697        struct mhi_ring *buf_ring, *tre_ring;
 698        struct mhi_buf_info *buf_info;
 699        struct mhi_result result;
 700        int ev_code;
 701        u32 cookie; /* offset to local descriptor */
 702        u16 xfer_len;
 703
 704        buf_ring = &mhi_chan->buf_ring;
 705        tre_ring = &mhi_chan->tre_ring;
 706
 707        ev_code = MHI_TRE_GET_EV_CODE(event);
 708        cookie = MHI_TRE_GET_EV_COOKIE(event);
 709        xfer_len = MHI_TRE_GET_EV_LEN(event);
 710
 711        /* Received out of bound cookie */
 712        WARN_ON(cookie >= buf_ring->len);
 713
 714        buf_info = buf_ring->base + cookie;
 715
 716        result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
 717                -EOVERFLOW : 0;
 718
 719        /* truncate to buf len if xfer_len is larger */
 720        result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
 721        result.buf_addr = buf_info->cb_buf;
 722        result.dir = mhi_chan->dir;
 723
 724        read_lock_bh(&mhi_chan->lock);
 725
 726        if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
 727                goto end_process_rsc_event;
 728
 729        WARN_ON(!buf_info->used);
 730
 731        /* notify the client */
 732        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 733
 734        /*
 735         * Note: We're arbitrarily incrementing RP even though, completion
 736         * packet we processed might not be the same one, reason we can do this
 737         * is because device guaranteed to cache descriptors in order it
 738         * receive, so even though completion event is different we can re-use
 739         * all descriptors in between.
 740         * Example:
 741         * Transfer Ring has descriptors: A, B, C, D
 742         * Last descriptor host queue is D (WP) and first descriptor
 743         * host queue is A (RP).
 744         * The completion event we just serviced is descriptor C.
 745         * Then we can safely queue descriptors to replace A, B, and C
 746         * even though host did not receive any completions.
 747         */
 748        mhi_del_ring_element(mhi_cntrl, tre_ring);
 749        buf_info->used = false;
 750
 751end_process_rsc_event:
 752        read_unlock_bh(&mhi_chan->lock);
 753
 754        return 0;
 755}
 756
 757static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
 758                                       struct mhi_tre *tre)
 759{
 760        dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
 761        struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
 762        struct mhi_ring *mhi_ring = &cmd_ring->ring;
 763        struct mhi_tre *cmd_pkt;
 764        struct mhi_chan *mhi_chan;
 765        u32 chan;
 766
 767        if (!is_valid_ring_ptr(mhi_ring, ptr)) {
 768                dev_err(&mhi_cntrl->mhi_dev->dev,
 769                        "Event element points outside of the cmd ring\n");
 770                return;
 771        }
 772
 773        cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
 774
 775        chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
 776
 777        if (chan < mhi_cntrl->max_chan &&
 778            mhi_cntrl->mhi_chan[chan].configured) {
 779                mhi_chan = &mhi_cntrl->mhi_chan[chan];
 780                write_lock_bh(&mhi_chan->lock);
 781                mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
 782                complete(&mhi_chan->completion);
 783                write_unlock_bh(&mhi_chan->lock);
 784        } else {
 785                dev_err(&mhi_cntrl->mhi_dev->dev,
 786                        "Completion packet for invalid channel ID: %d\n", chan);
 787        }
 788
 789        mhi_del_ring_element(mhi_cntrl, mhi_ring);
 790}
 791
 792int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 793                             struct mhi_event *mhi_event,
 794                             u32 event_quota)
 795{
 796        struct mhi_tre *dev_rp, *local_rp;
 797        struct mhi_ring *ev_ring = &mhi_event->ring;
 798        struct mhi_event_ctxt *er_ctxt =
 799                &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
 800        struct mhi_chan *mhi_chan;
 801        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 802        u32 chan;
 803        int count = 0;
 804        dma_addr_t ptr = er_ctxt->rp;
 805
 806        /*
 807         * This is a quick check to avoid unnecessary event processing
 808         * in case MHI is already in error state, but it's still possible
 809         * to transition to error state while processing events
 810         */
 811        if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
 812                return -EIO;
 813
 814        if (!is_valid_ring_ptr(ev_ring, ptr)) {
 815                dev_err(&mhi_cntrl->mhi_dev->dev,
 816                        "Event ring rp points outside of the event ring\n");
 817                return -EIO;
 818        }
 819
 820        dev_rp = mhi_to_virtual(ev_ring, ptr);
 821        local_rp = ev_ring->rp;
 822
 823        while (dev_rp != local_rp) {
 824                enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
 825
 826                switch (type) {
 827                case MHI_PKT_TYPE_BW_REQ_EVENT:
 828                {
 829                        struct mhi_link_info *link_info;
 830
 831                        link_info = &mhi_cntrl->mhi_link_info;
 832                        write_lock_irq(&mhi_cntrl->pm_lock);
 833                        link_info->target_link_speed =
 834                                MHI_TRE_GET_EV_LINKSPEED(local_rp);
 835                        link_info->target_link_width =
 836                                MHI_TRE_GET_EV_LINKWIDTH(local_rp);
 837                        write_unlock_irq(&mhi_cntrl->pm_lock);
 838                        dev_dbg(dev, "Received BW_REQ event\n");
 839                        mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
 840                        break;
 841                }
 842                case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
 843                {
 844                        enum mhi_state new_state;
 845
 846                        new_state = MHI_TRE_GET_EV_STATE(local_rp);
 847
 848                        dev_dbg(dev, "State change event to state: %s\n",
 849                                TO_MHI_STATE_STR(new_state));
 850
 851                        switch (new_state) {
 852                        case MHI_STATE_M0:
 853                                mhi_pm_m0_transition(mhi_cntrl);
 854                                break;
 855                        case MHI_STATE_M1:
 856                                mhi_pm_m1_transition(mhi_cntrl);
 857                                break;
 858                        case MHI_STATE_M3:
 859                                mhi_pm_m3_transition(mhi_cntrl);
 860                                break;
 861                        case MHI_STATE_SYS_ERR:
 862                        {
 863                                enum mhi_pm_state pm_state;
 864
 865                                dev_dbg(dev, "System error detected\n");
 866                                write_lock_irq(&mhi_cntrl->pm_lock);
 867                                pm_state = mhi_tryset_pm_state(mhi_cntrl,
 868                                                        MHI_PM_SYS_ERR_DETECT);
 869                                write_unlock_irq(&mhi_cntrl->pm_lock);
 870                                if (pm_state == MHI_PM_SYS_ERR_DETECT)
 871                                        mhi_pm_sys_err_handler(mhi_cntrl);
 872                                break;
 873                        }
 874                        default:
 875                                dev_err(dev, "Invalid state: %s\n",
 876                                        TO_MHI_STATE_STR(new_state));
 877                        }
 878
 879                        break;
 880                }
 881                case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
 882                        mhi_process_cmd_completion(mhi_cntrl, local_rp);
 883                        break;
 884                case MHI_PKT_TYPE_EE_EVENT:
 885                {
 886                        enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
 887                        enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
 888
 889                        dev_dbg(dev, "Received EE event: %s\n",
 890                                TO_MHI_EXEC_STR(event));
 891                        switch (event) {
 892                        case MHI_EE_SBL:
 893                                st = DEV_ST_TRANSITION_SBL;
 894                                break;
 895                        case MHI_EE_WFW:
 896                        case MHI_EE_AMSS:
 897                                st = DEV_ST_TRANSITION_MISSION_MODE;
 898                                break;
 899                        case MHI_EE_FP:
 900                                st = DEV_ST_TRANSITION_FP;
 901                                break;
 902                        case MHI_EE_RDDM:
 903                                mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
 904                                write_lock_irq(&mhi_cntrl->pm_lock);
 905                                mhi_cntrl->ee = event;
 906                                write_unlock_irq(&mhi_cntrl->pm_lock);
 907                                wake_up_all(&mhi_cntrl->state_event);
 908                                break;
 909                        default:
 910                                dev_err(dev,
 911                                        "Unhandled EE event: 0x%x\n", type);
 912                        }
 913                        if (st != DEV_ST_TRANSITION_MAX)
 914                                mhi_queue_state_transition(mhi_cntrl, st);
 915
 916                        break;
 917                }
 918                case MHI_PKT_TYPE_TX_EVENT:
 919                        chan = MHI_TRE_GET_EV_CHID(local_rp);
 920
 921                        WARN_ON(chan >= mhi_cntrl->max_chan);
 922
 923                        /*
 924                         * Only process the event ring elements whose channel
 925                         * ID is within the maximum supported range.
 926                         */
 927                        if (chan < mhi_cntrl->max_chan) {
 928                                mhi_chan = &mhi_cntrl->mhi_chan[chan];
 929                                if (!mhi_chan->configured)
 930                                        break;
 931                                parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
 932                                event_quota--;
 933                        }
 934                        break;
 935                default:
 936                        dev_err(dev, "Unhandled event type: %d\n", type);
 937                        break;
 938                }
 939
 940                mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
 941                local_rp = ev_ring->rp;
 942
 943                ptr = er_ctxt->rp;
 944                if (!is_valid_ring_ptr(ev_ring, ptr)) {
 945                        dev_err(&mhi_cntrl->mhi_dev->dev,
 946                                "Event ring rp points outside of the event ring\n");
 947                        return -EIO;
 948                }
 949
 950                dev_rp = mhi_to_virtual(ev_ring, ptr);
 951                count++;
 952        }
 953
 954        read_lock_bh(&mhi_cntrl->pm_lock);
 955        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
 956                mhi_ring_er_db(mhi_event);
 957        read_unlock_bh(&mhi_cntrl->pm_lock);
 958
 959        return count;
 960}
 961
 962int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
 963                                struct mhi_event *mhi_event,
 964                                u32 event_quota)
 965{
 966        struct mhi_tre *dev_rp, *local_rp;
 967        struct mhi_ring *ev_ring = &mhi_event->ring;
 968        struct mhi_event_ctxt *er_ctxt =
 969                &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
 970        int count = 0;
 971        u32 chan;
 972        struct mhi_chan *mhi_chan;
 973        dma_addr_t ptr = er_ctxt->rp;
 974
 975        if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
 976                return -EIO;
 977
 978        if (!is_valid_ring_ptr(ev_ring, ptr)) {
 979                dev_err(&mhi_cntrl->mhi_dev->dev,
 980                        "Event ring rp points outside of the event ring\n");
 981                return -EIO;
 982        }
 983
 984        dev_rp = mhi_to_virtual(ev_ring, ptr);
 985        local_rp = ev_ring->rp;
 986
 987        while (dev_rp != local_rp && event_quota > 0) {
 988                enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
 989
 990                chan = MHI_TRE_GET_EV_CHID(local_rp);
 991
 992                WARN_ON(chan >= mhi_cntrl->max_chan);
 993
 994                /*
 995                 * Only process the event ring elements whose channel
 996                 * ID is within the maximum supported range.
 997                 */
 998                if (chan < mhi_cntrl->max_chan &&
 999                    mhi_cntrl->mhi_chan[chan].configured) {
1000                        mhi_chan = &mhi_cntrl->mhi_chan[chan];
1001
1002                        if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
1003                                parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1004                                event_quota--;
1005                        } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
1006                                parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1007                                event_quota--;
1008                        }
1009                }
1010
1011                mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1012                local_rp = ev_ring->rp;
1013
1014                ptr = er_ctxt->rp;
1015                if (!is_valid_ring_ptr(ev_ring, ptr)) {
1016                        dev_err(&mhi_cntrl->mhi_dev->dev,
1017                                "Event ring rp points outside of the event ring\n");
1018                        return -EIO;
1019                }
1020
1021                dev_rp = mhi_to_virtual(ev_ring, ptr);
1022                count++;
1023        }
1024        read_lock_bh(&mhi_cntrl->pm_lock);
1025        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1026                mhi_ring_er_db(mhi_event);
1027        read_unlock_bh(&mhi_cntrl->pm_lock);
1028
1029        return count;
1030}
1031
1032void mhi_ev_task(unsigned long data)
1033{
1034        struct mhi_event *mhi_event = (struct mhi_event *)data;
1035        struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1036
1037        /* process all pending events */
1038        spin_lock_bh(&mhi_event->lock);
1039        mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1040        spin_unlock_bh(&mhi_event->lock);
1041}
1042
1043void mhi_ctrl_ev_task(unsigned long data)
1044{
1045        struct mhi_event *mhi_event = (struct mhi_event *)data;
1046        struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1047        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1048        enum mhi_state state;
1049        enum mhi_pm_state pm_state = 0;
1050        int ret;
1051
1052        /*
1053         * We can check PM state w/o a lock here because there is no way
1054         * PM state can change from reg access valid to no access while this
1055         * thread being executed.
1056         */
1057        if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1058                /*
1059                 * We may have a pending event but not allowed to
1060                 * process it since we are probably in a suspended state,
1061                 * so trigger a resume.
1062                 */
1063                mhi_trigger_resume(mhi_cntrl);
1064
1065                return;
1066        }
1067
1068        /* Process ctrl events events */
1069        ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1070
1071        /*
1072         * We received an IRQ but no events to process, maybe device went to
1073         * SYS_ERR state? Check the state to confirm.
1074         */
1075        if (!ret) {
1076                write_lock_irq(&mhi_cntrl->pm_lock);
1077                state = mhi_get_mhi_state(mhi_cntrl);
1078                if (state == MHI_STATE_SYS_ERR) {
1079                        dev_dbg(dev, "System error detected\n");
1080                        pm_state = mhi_tryset_pm_state(mhi_cntrl,
1081                                                       MHI_PM_SYS_ERR_DETECT);
1082                }
1083                write_unlock_irq(&mhi_cntrl->pm_lock);
1084                if (pm_state == MHI_PM_SYS_ERR_DETECT)
1085                        mhi_pm_sys_err_handler(mhi_cntrl);
1086        }
1087}
1088
1089static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1090                             struct mhi_ring *ring)
1091{
1092        void *tmp = ring->wp + ring->el_size;
1093
1094        if (tmp >= (ring->base + ring->len))
1095                tmp = ring->base;
1096
1097        return (tmp == ring->rp);
1098}
1099
1100static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1101                     enum dma_data_direction dir, enum mhi_flags mflags)
1102{
1103        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1104        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1105                                                             mhi_dev->dl_chan;
1106        struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1107        unsigned long flags;
1108        int ret;
1109
1110        if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1111                return -EIO;
1112
1113        read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1114
1115        ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1116        if (unlikely(ret)) {
1117                ret = -EAGAIN;
1118                goto exit_unlock;
1119        }
1120
1121        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1122        if (unlikely(ret))
1123                goto exit_unlock;
1124
1125        /* Packet is queued, take a usage ref to exit M3 if necessary
1126         * for host->device buffer, balanced put is done on buffer completion
1127         * for device->host buffer, balanced put is after ringing the DB
1128         */
1129        mhi_cntrl->runtime_get(mhi_cntrl);
1130
1131        /* Assert dev_wake (to exit/prevent M1/M2)*/
1132        mhi_cntrl->wake_toggle(mhi_cntrl);
1133
1134        if (mhi_chan->dir == DMA_TO_DEVICE)
1135                atomic_inc(&mhi_cntrl->pending_pkts);
1136
1137        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1138                mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1139
1140        if (dir == DMA_FROM_DEVICE)
1141                mhi_cntrl->runtime_put(mhi_cntrl);
1142
1143exit_unlock:
1144        read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1145
1146        return ret;
1147}
1148
1149int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1150                  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1151{
1152        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1153                                                             mhi_dev->dl_chan;
1154        struct mhi_buf_info buf_info = { };
1155
1156        buf_info.v_addr = skb->data;
1157        buf_info.cb_buf = skb;
1158        buf_info.len = len;
1159
1160        if (unlikely(mhi_chan->pre_alloc))
1161                return -EINVAL;
1162
1163        return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1164}
1165EXPORT_SYMBOL_GPL(mhi_queue_skb);
1166
1167int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1168                  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1169{
1170        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1171                                                             mhi_dev->dl_chan;
1172        struct mhi_buf_info buf_info = { };
1173
1174        buf_info.p_addr = mhi_buf->dma_addr;
1175        buf_info.cb_buf = mhi_buf;
1176        buf_info.pre_mapped = true;
1177        buf_info.len = len;
1178
1179        if (unlikely(mhi_chan->pre_alloc))
1180                return -EINVAL;
1181
1182        return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1183}
1184EXPORT_SYMBOL_GPL(mhi_queue_dma);
1185
1186int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1187                        struct mhi_buf_info *info, enum mhi_flags flags)
1188{
1189        struct mhi_ring *buf_ring, *tre_ring;
1190        struct mhi_tre *mhi_tre;
1191        struct mhi_buf_info *buf_info;
1192        int eot, eob, chain, bei;
1193        int ret;
1194
1195        buf_ring = &mhi_chan->buf_ring;
1196        tre_ring = &mhi_chan->tre_ring;
1197
1198        buf_info = buf_ring->wp;
1199        WARN_ON(buf_info->used);
1200        buf_info->pre_mapped = info->pre_mapped;
1201        if (info->pre_mapped)
1202                buf_info->p_addr = info->p_addr;
1203        else
1204                buf_info->v_addr = info->v_addr;
1205        buf_info->cb_buf = info->cb_buf;
1206        buf_info->wp = tre_ring->wp;
1207        buf_info->dir = mhi_chan->dir;
1208        buf_info->len = info->len;
1209
1210        if (!info->pre_mapped) {
1211                ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1212                if (ret)
1213                        return ret;
1214        }
1215
1216        eob = !!(flags & MHI_EOB);
1217        eot = !!(flags & MHI_EOT);
1218        chain = !!(flags & MHI_CHAIN);
1219        bei = !!(mhi_chan->intmod);
1220
1221        mhi_tre = tre_ring->wp;
1222        mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1223        mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1224        mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1225
1226        /* increment WP */
1227        mhi_add_ring_element(mhi_cntrl, tre_ring);
1228        mhi_add_ring_element(mhi_cntrl, buf_ring);
1229
1230        return 0;
1231}
1232
1233int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1234                  void *buf, size_t len, enum mhi_flags mflags)
1235{
1236        struct mhi_buf_info buf_info = { };
1237
1238        buf_info.v_addr = buf;
1239        buf_info.cb_buf = buf;
1240        buf_info.len = len;
1241
1242        return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1243}
1244EXPORT_SYMBOL_GPL(mhi_queue_buf);
1245
1246bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1247{
1248        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1249        struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1250                                        mhi_dev->ul_chan : mhi_dev->dl_chan;
1251        struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1252
1253        return mhi_is_ring_full(mhi_cntrl, tre_ring);
1254}
1255EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1256
1257int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1258                 struct mhi_chan *mhi_chan,
1259                 enum mhi_cmd_type cmd)
1260{
1261        struct mhi_tre *cmd_tre = NULL;
1262        struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1263        struct mhi_ring *ring = &mhi_cmd->ring;
1264        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1265        int chan = 0;
1266
1267        if (mhi_chan)
1268                chan = mhi_chan->chan;
1269
1270        spin_lock_bh(&mhi_cmd->lock);
1271        if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1272                spin_unlock_bh(&mhi_cmd->lock);
1273                return -ENOMEM;
1274        }
1275
1276        /* prepare the cmd tre */
1277        cmd_tre = ring->wp;
1278        switch (cmd) {
1279        case MHI_CMD_RESET_CHAN:
1280                cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1281                cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1282                cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1283                break;
1284        case MHI_CMD_STOP_CHAN:
1285                cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
1286                cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
1287                cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
1288                break;
1289        case MHI_CMD_START_CHAN:
1290                cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1291                cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1292                cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1293                break;
1294        default:
1295                dev_err(dev, "Command not supported\n");
1296                break;
1297        }
1298
1299        /* queue to hardware */
1300        mhi_add_ring_element(mhi_cntrl, ring);
1301        read_lock_bh(&mhi_cntrl->pm_lock);
1302        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1303                mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1304        read_unlock_bh(&mhi_cntrl->pm_lock);
1305        spin_unlock_bh(&mhi_cmd->lock);
1306
1307        return 0;
1308}
1309
1310static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1311                                    struct mhi_chan *mhi_chan,
1312                                    enum mhi_ch_state_type to_state)
1313{
1314        struct device *dev = &mhi_chan->mhi_dev->dev;
1315        enum mhi_cmd_type cmd = MHI_CMD_NOP;
1316        int ret;
1317
1318        dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
1319                TO_CH_STATE_TYPE_STR(to_state));
1320
1321        switch (to_state) {
1322        case MHI_CH_STATE_TYPE_RESET:
1323                write_lock_irq(&mhi_chan->lock);
1324                if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1325                    mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1326                    mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1327                        write_unlock_irq(&mhi_chan->lock);
1328                        return -EINVAL;
1329                }
1330                mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1331                write_unlock_irq(&mhi_chan->lock);
1332
1333                cmd = MHI_CMD_RESET_CHAN;
1334                break;
1335        case MHI_CH_STATE_TYPE_STOP:
1336                if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1337                        return -EINVAL;
1338
1339                cmd = MHI_CMD_STOP_CHAN;
1340                break;
1341        case MHI_CH_STATE_TYPE_START:
1342                if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1343                    mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1344                        return -EINVAL;
1345
1346                cmd = MHI_CMD_START_CHAN;
1347                break;
1348        default:
1349                dev_err(dev, "%d: Channel state update to %s not allowed\n",
1350                        mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1351                return -EINVAL;
1352        }
1353
1354        /* bring host and device out of suspended states */
1355        ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1356        if (ret)
1357                return ret;
1358        mhi_cntrl->runtime_get(mhi_cntrl);
1359
1360        reinit_completion(&mhi_chan->completion);
1361        ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1362        if (ret) {
1363                dev_err(dev, "%d: Failed to send %s channel command\n",
1364                        mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1365                goto exit_channel_update;
1366        }
1367
1368        ret = wait_for_completion_timeout(&mhi_chan->completion,
1369                                       msecs_to_jiffies(mhi_cntrl->timeout_ms));
1370        if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1371                dev_err(dev,
1372                        "%d: Failed to receive %s channel command completion\n",
1373                        mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1374                ret = -EIO;
1375                goto exit_channel_update;
1376        }
1377
1378        ret = 0;
1379
1380        if (to_state != MHI_CH_STATE_TYPE_RESET) {
1381                write_lock_irq(&mhi_chan->lock);
1382                mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1383                                      MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
1384                write_unlock_irq(&mhi_chan->lock);
1385        }
1386
1387        dev_dbg(dev, "%d: Channel state change to %s successful\n",
1388                mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1389
1390exit_channel_update:
1391        mhi_cntrl->runtime_put(mhi_cntrl);
1392        mhi_device_put(mhi_cntrl->mhi_dev);
1393
1394        return ret;
1395}
1396
1397static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1398                                  struct mhi_chan *mhi_chan)
1399{
1400        int ret;
1401        struct device *dev = &mhi_chan->mhi_dev->dev;
1402
1403        mutex_lock(&mhi_chan->mutex);
1404
1405        if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1406                dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1407                        TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1408                goto exit_unprepare_channel;
1409        }
1410
1411        /* no more processing events for this channel */
1412        ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1413                                       MHI_CH_STATE_TYPE_RESET);
1414        if (ret)
1415                dev_err(dev, "%d: Failed to reset channel, still resetting\n",
1416                        mhi_chan->chan);
1417
1418exit_unprepare_channel:
1419        write_lock_irq(&mhi_chan->lock);
1420        mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1421        write_unlock_irq(&mhi_chan->lock);
1422
1423        if (!mhi_chan->offload_ch) {
1424                mhi_reset_chan(mhi_cntrl, mhi_chan);
1425                mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1426        }
1427        dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1428
1429        mutex_unlock(&mhi_chan->mutex);
1430}
1431
1432int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1433                        struct mhi_chan *mhi_chan)
1434{
1435        int ret = 0;
1436        struct device *dev = &mhi_chan->mhi_dev->dev;
1437
1438        if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1439                dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1440                        TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1441                return -ENOTCONN;
1442        }
1443
1444        mutex_lock(&mhi_chan->mutex);
1445
1446        /* Check of client manages channel context for offload channels */
1447        if (!mhi_chan->offload_ch) {
1448                ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1449                if (ret)
1450                        goto error_init_chan;
1451        }
1452
1453        ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1454                                       MHI_CH_STATE_TYPE_START);
1455        if (ret)
1456                goto error_pm_state;
1457
1458        /* Pre-allocate buffer for xfer ring */
1459        if (mhi_chan->pre_alloc) {
1460                int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1461                                                       &mhi_chan->tre_ring);
1462                size_t len = mhi_cntrl->buffer_len;
1463
1464                while (nr_el--) {
1465                        void *buf;
1466                        struct mhi_buf_info info = { };
1467                        buf = kmalloc(len, GFP_KERNEL);
1468                        if (!buf) {
1469                                ret = -ENOMEM;
1470                                goto error_pre_alloc;
1471                        }
1472
1473                        /* Prepare transfer descriptors */
1474                        info.v_addr = buf;
1475                        info.cb_buf = buf;
1476                        info.len = len;
1477                        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1478                        if (ret) {
1479                                kfree(buf);
1480                                goto error_pre_alloc;
1481                        }
1482                }
1483
1484                read_lock_bh(&mhi_cntrl->pm_lock);
1485                if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1486                        read_lock_irq(&mhi_chan->lock);
1487                        mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1488                        read_unlock_irq(&mhi_chan->lock);
1489                }
1490                read_unlock_bh(&mhi_cntrl->pm_lock);
1491        }
1492
1493        mutex_unlock(&mhi_chan->mutex);
1494
1495        return 0;
1496
1497error_pm_state:
1498        if (!mhi_chan->offload_ch)
1499                mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1500
1501error_init_chan:
1502        mutex_unlock(&mhi_chan->mutex);
1503
1504        return ret;
1505
1506error_pre_alloc:
1507        mutex_unlock(&mhi_chan->mutex);
1508        mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1509
1510        return ret;
1511}
1512
1513static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1514                                  struct mhi_event *mhi_event,
1515                                  struct mhi_event_ctxt *er_ctxt,
1516                                  int chan)
1517
1518{
1519        struct mhi_tre *dev_rp, *local_rp;
1520        struct mhi_ring *ev_ring;
1521        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1522        unsigned long flags;
1523        dma_addr_t ptr;
1524
1525        dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1526
1527        ev_ring = &mhi_event->ring;
1528
1529        /* mark all stale events related to channel as STALE event */
1530        spin_lock_irqsave(&mhi_event->lock, flags);
1531
1532        ptr = er_ctxt->rp;
1533        if (!is_valid_ring_ptr(ev_ring, ptr)) {
1534                dev_err(&mhi_cntrl->mhi_dev->dev,
1535                        "Event ring rp points outside of the event ring\n");
1536                dev_rp = ev_ring->rp;
1537        } else {
1538                dev_rp = mhi_to_virtual(ev_ring, ptr);
1539        }
1540
1541        local_rp = ev_ring->rp;
1542        while (dev_rp != local_rp) {
1543                if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1544                    chan == MHI_TRE_GET_EV_CHID(local_rp))
1545                        local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1546                                        MHI_PKT_TYPE_STALE_EVENT);
1547                local_rp++;
1548                if (local_rp == (ev_ring->base + ev_ring->len))
1549                        local_rp = ev_ring->base;
1550        }
1551
1552        dev_dbg(dev, "Finished marking events as stale events\n");
1553        spin_unlock_irqrestore(&mhi_event->lock, flags);
1554}
1555
1556static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1557                                struct mhi_chan *mhi_chan)
1558{
1559        struct mhi_ring *buf_ring, *tre_ring;
1560        struct mhi_result result;
1561
1562        /* Reset any pending buffers */
1563        buf_ring = &mhi_chan->buf_ring;
1564        tre_ring = &mhi_chan->tre_ring;
1565        result.transaction_status = -ENOTCONN;
1566        result.bytes_xferd = 0;
1567        while (tre_ring->rp != tre_ring->wp) {
1568                struct mhi_buf_info *buf_info = buf_ring->rp;
1569
1570                if (mhi_chan->dir == DMA_TO_DEVICE) {
1571                        atomic_dec(&mhi_cntrl->pending_pkts);
1572                        /* Release the reference got from mhi_queue() */
1573                        mhi_cntrl->runtime_put(mhi_cntrl);
1574                }
1575
1576                if (!buf_info->pre_mapped)
1577                        mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1578
1579                mhi_del_ring_element(mhi_cntrl, buf_ring);
1580                mhi_del_ring_element(mhi_cntrl, tre_ring);
1581
1582                if (mhi_chan->pre_alloc) {
1583                        kfree(buf_info->cb_buf);
1584                } else {
1585                        result.buf_addr = buf_info->cb_buf;
1586                        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1587                }
1588        }
1589}
1590
1591void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1592{
1593        struct mhi_event *mhi_event;
1594        struct mhi_event_ctxt *er_ctxt;
1595        int chan = mhi_chan->chan;
1596
1597        /* Nothing to reset, client doesn't queue buffers */
1598        if (mhi_chan->offload_ch)
1599                return;
1600
1601        read_lock_bh(&mhi_cntrl->pm_lock);
1602        mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1603        er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1604
1605        mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1606
1607        mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1608
1609        read_unlock_bh(&mhi_cntrl->pm_lock);
1610}
1611
1612/* Move channel to start state */
1613int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1614{
1615        int ret, dir;
1616        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1617        struct mhi_chan *mhi_chan;
1618
1619        for (dir = 0; dir < 2; dir++) {
1620                mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1621                if (!mhi_chan)
1622                        continue;
1623
1624                ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1625                if (ret)
1626                        goto error_open_chan;
1627        }
1628
1629        return 0;
1630
1631error_open_chan:
1632        for (--dir; dir >= 0; dir--) {
1633                mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1634                if (!mhi_chan)
1635                        continue;
1636
1637                mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1638        }
1639
1640        return ret;
1641}
1642EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1643
1644void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1645{
1646        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1647        struct mhi_chan *mhi_chan;
1648        int dir;
1649
1650        for (dir = 0; dir < 2; dir++) {
1651                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1652                if (!mhi_chan)
1653                        continue;
1654
1655                mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1656        }
1657}
1658EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1659
1660int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1661{
1662        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1663        struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1664        struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1665        int ret;
1666
1667        spin_lock_bh(&mhi_event->lock);
1668        ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1669        spin_unlock_bh(&mhi_event->lock);
1670
1671        return ret;
1672}
1673EXPORT_SYMBOL_GPL(mhi_poll);
1674