linux/drivers/bus/mhi/core/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
   4 *
   5 */
   6
   7#include <linux/device.h>
   8#include <linux/dma-direction.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/interrupt.h>
  11#include <linux/list.h>
  12#include <linux/mhi.h>
  13#include <linux/mod_devicetable.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/vmalloc.h>
  17#include <linux/wait.h>
  18#include "internal.h"
  19
  20const char * const mhi_ee_str[MHI_EE_MAX] = {
  21        [MHI_EE_PBL] = "PBL",
  22        [MHI_EE_SBL] = "SBL",
  23        [MHI_EE_AMSS] = "AMSS",
  24        [MHI_EE_RDDM] = "RDDM",
  25        [MHI_EE_WFW] = "WFW",
  26        [MHI_EE_PTHRU] = "PASS THRU",
  27        [MHI_EE_EDL] = "EDL",
  28        [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
  29        [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
  30};
  31
  32const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
  33        [DEV_ST_TRANSITION_PBL] = "PBL",
  34        [DEV_ST_TRANSITION_READY] = "READY",
  35        [DEV_ST_TRANSITION_SBL] = "SBL",
  36        [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
  37        [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
  38        [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
  39};
  40
  41const char * const mhi_state_str[MHI_STATE_MAX] = {
  42        [MHI_STATE_RESET] = "RESET",
  43        [MHI_STATE_READY] = "READY",
  44        [MHI_STATE_M0] = "M0",
  45        [MHI_STATE_M1] = "M1",
  46        [MHI_STATE_M2] = "M2",
  47        [MHI_STATE_M3] = "M3",
  48        [MHI_STATE_M3_FAST] = "M3_FAST",
  49        [MHI_STATE_BHI] = "BHI",
  50        [MHI_STATE_SYS_ERR] = "SYS_ERR",
  51};
  52
  53static const char * const mhi_pm_state_str[] = {
  54        [MHI_PM_STATE_DISABLE] = "DISABLE",
  55        [MHI_PM_STATE_POR] = "POR",
  56        [MHI_PM_STATE_M0] = "M0",
  57        [MHI_PM_STATE_M2] = "M2",
  58        [MHI_PM_STATE_M3_ENTER] = "M?->M3",
  59        [MHI_PM_STATE_M3] = "M3",
  60        [MHI_PM_STATE_M3_EXIT] = "M3->M0",
  61        [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
  62        [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
  63        [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
  64        [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
  65        [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
  66};
  67
  68const char *to_mhi_pm_state_str(enum mhi_pm_state state)
  69{
  70        int index = find_last_bit((unsigned long *)&state, 32);
  71
  72        if (index >= ARRAY_SIZE(mhi_pm_state_str))
  73                return "Invalid State";
  74
  75        return mhi_pm_state_str[index];
  76}
  77
  78/* MHI protocol requires the transfer ring to be aligned with ring length */
  79static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
  80                                  struct mhi_ring *ring,
  81                                  u64 len)
  82{
  83        ring->alloc_size = len + (len - 1);
  84        ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
  85                                               &ring->dma_handle, GFP_KERNEL);
  86        if (!ring->pre_aligned)
  87                return -ENOMEM;
  88
  89        ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
  90        ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
  91
  92        return 0;
  93}
  94
  95void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
  96{
  97        int i;
  98        struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
  99
 100        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 101                if (mhi_event->offload_ev)
 102                        continue;
 103
 104                free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
 105        }
 106
 107        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
 108}
 109
 110int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
 111{
 112        struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 113        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 114        int i, ret;
 115
 116        /* Setup BHI_INTVEC IRQ */
 117        ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
 118                                   mhi_intvec_threaded_handler,
 119                                   IRQF_SHARED | IRQF_NO_SUSPEND,
 120                                   "bhi", mhi_cntrl);
 121        if (ret)
 122                return ret;
 123
 124        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 125                if (mhi_event->offload_ev)
 126                        continue;
 127
 128                ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
 129                                  mhi_irq_handler,
 130                                  IRQF_SHARED | IRQF_NO_SUSPEND,
 131                                  "mhi", mhi_event);
 132                if (ret) {
 133                        dev_err(dev, "Error requesting irq:%d for ev:%d\n",
 134                                mhi_cntrl->irq[mhi_event->irq], i);
 135                        goto error_request;
 136                }
 137        }
 138
 139        return 0;
 140
 141error_request:
 142        for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
 143                if (mhi_event->offload_ev)
 144                        continue;
 145
 146                free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
 147        }
 148        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
 149
 150        return ret;
 151}
 152
 153void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
 154{
 155        int i;
 156        struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
 157        struct mhi_cmd *mhi_cmd;
 158        struct mhi_event *mhi_event;
 159        struct mhi_ring *ring;
 160
 161        mhi_cmd = mhi_cntrl->mhi_cmd;
 162        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
 163                ring = &mhi_cmd->ring;
 164                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 165                                  ring->pre_aligned, ring->dma_handle);
 166                ring->base = NULL;
 167                ring->iommu_base = 0;
 168        }
 169
 170        mhi_free_coherent(mhi_cntrl,
 171                          sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
 172                          mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
 173
 174        mhi_event = mhi_cntrl->mhi_event;
 175        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 176                if (mhi_event->offload_ev)
 177                        continue;
 178
 179                ring = &mhi_event->ring;
 180                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 181                                  ring->pre_aligned, ring->dma_handle);
 182                ring->base = NULL;
 183                ring->iommu_base = 0;
 184        }
 185
 186        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
 187                          mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
 188                          mhi_ctxt->er_ctxt_addr);
 189
 190        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
 191                          mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
 192                          mhi_ctxt->chan_ctxt_addr);
 193
 194        kfree(mhi_ctxt);
 195        mhi_cntrl->mhi_ctxt = NULL;
 196}
 197
 198int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
 199{
 200        struct mhi_ctxt *mhi_ctxt;
 201        struct mhi_chan_ctxt *chan_ctxt;
 202        struct mhi_event_ctxt *er_ctxt;
 203        struct mhi_cmd_ctxt *cmd_ctxt;
 204        struct mhi_chan *mhi_chan;
 205        struct mhi_event *mhi_event;
 206        struct mhi_cmd *mhi_cmd;
 207        u32 tmp;
 208        int ret = -ENOMEM, i;
 209
 210        atomic_set(&mhi_cntrl->dev_wake, 0);
 211        atomic_set(&mhi_cntrl->pending_pkts, 0);
 212
 213        mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
 214        if (!mhi_ctxt)
 215                return -ENOMEM;
 216
 217        /* Setup channel ctxt */
 218        mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
 219                                                 sizeof(*mhi_ctxt->chan_ctxt) *
 220                                                 mhi_cntrl->max_chan,
 221                                                 &mhi_ctxt->chan_ctxt_addr,
 222                                                 GFP_KERNEL);
 223        if (!mhi_ctxt->chan_ctxt)
 224                goto error_alloc_chan_ctxt;
 225
 226        mhi_chan = mhi_cntrl->mhi_chan;
 227        chan_ctxt = mhi_ctxt->chan_ctxt;
 228        for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
 229                /* Skip if it is an offload channel */
 230                if (mhi_chan->offload_ch)
 231                        continue;
 232
 233                tmp = chan_ctxt->chcfg;
 234                tmp &= ~CHAN_CTX_CHSTATE_MASK;
 235                tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
 236                tmp &= ~CHAN_CTX_BRSTMODE_MASK;
 237                tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
 238                tmp &= ~CHAN_CTX_POLLCFG_MASK;
 239                tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
 240                chan_ctxt->chcfg = tmp;
 241
 242                chan_ctxt->chtype = mhi_chan->type;
 243                chan_ctxt->erindex = mhi_chan->er_index;
 244
 245                mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
 246                mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
 247        }
 248
 249        /* Setup event context */
 250        mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
 251                                               sizeof(*mhi_ctxt->er_ctxt) *
 252                                               mhi_cntrl->total_ev_rings,
 253                                               &mhi_ctxt->er_ctxt_addr,
 254                                               GFP_KERNEL);
 255        if (!mhi_ctxt->er_ctxt)
 256                goto error_alloc_er_ctxt;
 257
 258        er_ctxt = mhi_ctxt->er_ctxt;
 259        mhi_event = mhi_cntrl->mhi_event;
 260        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
 261                     mhi_event++) {
 262                struct mhi_ring *ring = &mhi_event->ring;
 263
 264                /* Skip if it is an offload event */
 265                if (mhi_event->offload_ev)
 266                        continue;
 267
 268                tmp = er_ctxt->intmod;
 269                tmp &= ~EV_CTX_INTMODC_MASK;
 270                tmp &= ~EV_CTX_INTMODT_MASK;
 271                tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
 272                er_ctxt->intmod = tmp;
 273
 274                er_ctxt->ertype = MHI_ER_TYPE_VALID;
 275                er_ctxt->msivec = mhi_event->irq;
 276                mhi_event->db_cfg.db_mode = true;
 277
 278                ring->el_size = sizeof(struct mhi_tre);
 279                ring->len = ring->el_size * ring->elements;
 280                ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
 281                if (ret)
 282                        goto error_alloc_er;
 283
 284                /*
 285                 * If the read pointer equals to the write pointer, then the
 286                 * ring is empty
 287                 */
 288                ring->rp = ring->wp = ring->base;
 289                er_ctxt->rbase = ring->iommu_base;
 290                er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
 291                er_ctxt->rlen = ring->len;
 292                ring->ctxt_wp = &er_ctxt->wp;
 293        }
 294
 295        /* Setup cmd context */
 296        ret = -ENOMEM;
 297        mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
 298                                                sizeof(*mhi_ctxt->cmd_ctxt) *
 299                                                NR_OF_CMD_RINGS,
 300                                                &mhi_ctxt->cmd_ctxt_addr,
 301                                                GFP_KERNEL);
 302        if (!mhi_ctxt->cmd_ctxt)
 303                goto error_alloc_er;
 304
 305        mhi_cmd = mhi_cntrl->mhi_cmd;
 306        cmd_ctxt = mhi_ctxt->cmd_ctxt;
 307        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
 308                struct mhi_ring *ring = &mhi_cmd->ring;
 309
 310                ring->el_size = sizeof(struct mhi_tre);
 311                ring->elements = CMD_EL_PER_RING;
 312                ring->len = ring->el_size * ring->elements;
 313                ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
 314                if (ret)
 315                        goto error_alloc_cmd;
 316
 317                ring->rp = ring->wp = ring->base;
 318                cmd_ctxt->rbase = ring->iommu_base;
 319                cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
 320                cmd_ctxt->rlen = ring->len;
 321                ring->ctxt_wp = &cmd_ctxt->wp;
 322        }
 323
 324        mhi_cntrl->mhi_ctxt = mhi_ctxt;
 325
 326        return 0;
 327
 328error_alloc_cmd:
 329        for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
 330                struct mhi_ring *ring = &mhi_cmd->ring;
 331
 332                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 333                                  ring->pre_aligned, ring->dma_handle);
 334        }
 335        mhi_free_coherent(mhi_cntrl,
 336                          sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
 337                          mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
 338        i = mhi_cntrl->total_ev_rings;
 339        mhi_event = mhi_cntrl->mhi_event + i;
 340
 341error_alloc_er:
 342        for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
 343                struct mhi_ring *ring = &mhi_event->ring;
 344
 345                if (mhi_event->offload_ev)
 346                        continue;
 347
 348                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 349                                  ring->pre_aligned, ring->dma_handle);
 350        }
 351        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
 352                          mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
 353                          mhi_ctxt->er_ctxt_addr);
 354
 355error_alloc_er_ctxt:
 356        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
 357                          mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
 358                          mhi_ctxt->chan_ctxt_addr);
 359
 360error_alloc_chan_ctxt:
 361        kfree(mhi_ctxt);
 362
 363        return ret;
 364}
 365
 366int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 367{
 368        u32 val;
 369        int i, ret;
 370        struct mhi_chan *mhi_chan;
 371        struct mhi_event *mhi_event;
 372        void __iomem *base = mhi_cntrl->regs;
 373        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 374        struct {
 375                u32 offset;
 376                u32 mask;
 377                u32 shift;
 378                u32 val;
 379        } reg_info[] = {
 380                {
 381                        CCABAP_HIGHER, U32_MAX, 0,
 382                        upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
 383                },
 384                {
 385                        CCABAP_LOWER, U32_MAX, 0,
 386                        lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
 387                },
 388                {
 389                        ECABAP_HIGHER, U32_MAX, 0,
 390                        upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
 391                },
 392                {
 393                        ECABAP_LOWER, U32_MAX, 0,
 394                        lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
 395                },
 396                {
 397                        CRCBAP_HIGHER, U32_MAX, 0,
 398                        upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
 399                },
 400                {
 401                        CRCBAP_LOWER, U32_MAX, 0,
 402                        lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
 403                },
 404                {
 405                        MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
 406                        mhi_cntrl->total_ev_rings,
 407                },
 408                {
 409                        MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
 410                        mhi_cntrl->hw_ev_rings,
 411                },
 412                {
 413                        MHICTRLBASE_HIGHER, U32_MAX, 0,
 414                        upper_32_bits(mhi_cntrl->iova_start),
 415                },
 416                {
 417                        MHICTRLBASE_LOWER, U32_MAX, 0,
 418                        lower_32_bits(mhi_cntrl->iova_start),
 419                },
 420                {
 421                        MHIDATABASE_HIGHER, U32_MAX, 0,
 422                        upper_32_bits(mhi_cntrl->iova_start),
 423                },
 424                {
 425                        MHIDATABASE_LOWER, U32_MAX, 0,
 426                        lower_32_bits(mhi_cntrl->iova_start),
 427                },
 428                {
 429                        MHICTRLLIMIT_HIGHER, U32_MAX, 0,
 430                        upper_32_bits(mhi_cntrl->iova_stop),
 431                },
 432                {
 433                        MHICTRLLIMIT_LOWER, U32_MAX, 0,
 434                        lower_32_bits(mhi_cntrl->iova_stop),
 435                },
 436                {
 437                        MHIDATALIMIT_HIGHER, U32_MAX, 0,
 438                        upper_32_bits(mhi_cntrl->iova_stop),
 439                },
 440                {
 441                        MHIDATALIMIT_LOWER, U32_MAX, 0,
 442                        lower_32_bits(mhi_cntrl->iova_stop),
 443                },
 444                { 0, 0, 0 }
 445        };
 446
 447        dev_dbg(dev, "Initializing MHI registers\n");
 448
 449        /* Read channel db offset */
 450        ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
 451                                 CHDBOFF_CHDBOFF_SHIFT, &val);
 452        if (ret) {
 453                dev_err(dev, "Unable to read CHDBOFF register\n");
 454                return -EIO;
 455        }
 456
 457        /* Setup wake db */
 458        mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
 459        mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
 460        mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
 461        mhi_cntrl->wake_set = false;
 462
 463        /* Setup channel db address for each channel in tre_ring */
 464        mhi_chan = mhi_cntrl->mhi_chan;
 465        for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
 466                mhi_chan->tre_ring.db_addr = base + val;
 467
 468        /* Read event ring db offset */
 469        ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
 470                                 ERDBOFF_ERDBOFF_SHIFT, &val);
 471        if (ret) {
 472                dev_err(dev, "Unable to read ERDBOFF register\n");
 473                return -EIO;
 474        }
 475
 476        /* Setup event db address for each ev_ring */
 477        mhi_event = mhi_cntrl->mhi_event;
 478        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
 479                if (mhi_event->offload_ev)
 480                        continue;
 481
 482                mhi_event->ring.db_addr = base + val;
 483        }
 484
 485        /* Setup DB register for primary CMD rings */
 486        mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
 487
 488        /* Write to MMIO registers */
 489        for (i = 0; reg_info[i].offset; i++)
 490                mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
 491                                    reg_info[i].mask, reg_info[i].shift,
 492                                    reg_info[i].val);
 493
 494        return 0;
 495}
 496
 497void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
 498                          struct mhi_chan *mhi_chan)
 499{
 500        struct mhi_ring *buf_ring;
 501        struct mhi_ring *tre_ring;
 502        struct mhi_chan_ctxt *chan_ctxt;
 503
 504        buf_ring = &mhi_chan->buf_ring;
 505        tre_ring = &mhi_chan->tre_ring;
 506        chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
 507
 508        mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
 509                          tre_ring->pre_aligned, tre_ring->dma_handle);
 510        vfree(buf_ring->base);
 511
 512        buf_ring->base = tre_ring->base = NULL;
 513        chan_ctxt->rbase = 0;
 514}
 515
 516int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
 517                       struct mhi_chan *mhi_chan)
 518{
 519        struct mhi_ring *buf_ring;
 520        struct mhi_ring *tre_ring;
 521        struct mhi_chan_ctxt *chan_ctxt;
 522        u32 tmp;
 523        int ret;
 524
 525        buf_ring = &mhi_chan->buf_ring;
 526        tre_ring = &mhi_chan->tre_ring;
 527        tre_ring->el_size = sizeof(struct mhi_tre);
 528        tre_ring->len = tre_ring->el_size * tre_ring->elements;
 529        chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
 530        ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
 531        if (ret)
 532                return -ENOMEM;
 533
 534        buf_ring->el_size = sizeof(struct mhi_buf_info);
 535        buf_ring->len = buf_ring->el_size * buf_ring->elements;
 536        buf_ring->base = vzalloc(buf_ring->len);
 537
 538        if (!buf_ring->base) {
 539                mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
 540                                  tre_ring->pre_aligned, tre_ring->dma_handle);
 541                return -ENOMEM;
 542        }
 543
 544        tmp = chan_ctxt->chcfg;
 545        tmp &= ~CHAN_CTX_CHSTATE_MASK;
 546        tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
 547        chan_ctxt->chcfg = tmp;
 548
 549        chan_ctxt->rbase = tre_ring->iommu_base;
 550        chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
 551        chan_ctxt->rlen = tre_ring->len;
 552        tre_ring->ctxt_wp = &chan_ctxt->wp;
 553
 554        tre_ring->rp = tre_ring->wp = tre_ring->base;
 555        buf_ring->rp = buf_ring->wp = buf_ring->base;
 556        mhi_chan->db_cfg.db_mode = 1;
 557
 558        /* Update to all cores */
 559        smp_wmb();
 560
 561        return 0;
 562}
 563
 564static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
 565                        struct mhi_controller_config *config)
 566{
 567        struct mhi_event *mhi_event;
 568        struct mhi_event_config *event_cfg;
 569        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 570        int i, num;
 571
 572        num = config->num_events;
 573        mhi_cntrl->total_ev_rings = num;
 574        mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
 575                                       GFP_KERNEL);
 576        if (!mhi_cntrl->mhi_event)
 577                return -ENOMEM;
 578
 579        /* Populate event ring */
 580        mhi_event = mhi_cntrl->mhi_event;
 581        for (i = 0; i < num; i++) {
 582                event_cfg = &config->event_cfg[i];
 583
 584                mhi_event->er_index = i;
 585                mhi_event->ring.elements = event_cfg->num_elements;
 586                mhi_event->intmod = event_cfg->irq_moderation_ms;
 587                mhi_event->irq = event_cfg->irq;
 588
 589                if (event_cfg->channel != U32_MAX) {
 590                        /* This event ring has a dedicated channel */
 591                        mhi_event->chan = event_cfg->channel;
 592                        if (mhi_event->chan >= mhi_cntrl->max_chan) {
 593                                dev_err(dev,
 594                                        "Event Ring channel not available\n");
 595                                goto error_ev_cfg;
 596                        }
 597
 598                        mhi_event->mhi_chan =
 599                                &mhi_cntrl->mhi_chan[mhi_event->chan];
 600                }
 601
 602                /* Priority is fixed to 1 for now */
 603                mhi_event->priority = 1;
 604
 605                mhi_event->db_cfg.brstmode = event_cfg->mode;
 606                if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
 607                        goto error_ev_cfg;
 608
 609                if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
 610                        mhi_event->db_cfg.process_db = mhi_db_brstmode;
 611                else
 612                        mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
 613
 614                mhi_event->data_type = event_cfg->data_type;
 615
 616                switch (mhi_event->data_type) {
 617                case MHI_ER_DATA:
 618                        mhi_event->process_event = mhi_process_data_event_ring;
 619                        break;
 620                case MHI_ER_CTRL:
 621                        mhi_event->process_event = mhi_process_ctrl_ev_ring;
 622                        break;
 623                default:
 624                        dev_err(dev, "Event Ring type not supported\n");
 625                        goto error_ev_cfg;
 626                }
 627
 628                mhi_event->hw_ring = event_cfg->hardware_event;
 629                if (mhi_event->hw_ring)
 630                        mhi_cntrl->hw_ev_rings++;
 631                else
 632                        mhi_cntrl->sw_ev_rings++;
 633
 634                mhi_event->cl_manage = event_cfg->client_managed;
 635                mhi_event->offload_ev = event_cfg->offload_channel;
 636                mhi_event++;
 637        }
 638
 639        /* We need IRQ for each event ring + additional one for BHI */
 640        mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1;
 641
 642        return 0;
 643
 644error_ev_cfg:
 645
 646        kfree(mhi_cntrl->mhi_event);
 647        return -EINVAL;
 648}
 649
 650static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
 651                        struct mhi_controller_config *config)
 652{
 653        struct mhi_channel_config *ch_cfg;
 654        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 655        int i;
 656        u32 chan;
 657
 658        mhi_cntrl->max_chan = config->max_channels;
 659
 660        /*
 661         * The allocation of MHI channels can exceed 32KB in some scenarios,
 662         * so to avoid any memory possible allocation failures, vzalloc is
 663         * used here
 664         */
 665        mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
 666                                      sizeof(*mhi_cntrl->mhi_chan));
 667        if (!mhi_cntrl->mhi_chan)
 668                return -ENOMEM;
 669
 670        INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
 671
 672        /* Populate channel configurations */
 673        for (i = 0; i < config->num_channels; i++) {
 674                struct mhi_chan *mhi_chan;
 675
 676                ch_cfg = &config->ch_cfg[i];
 677
 678                chan = ch_cfg->num;
 679                if (chan >= mhi_cntrl->max_chan) {
 680                        dev_err(dev, "Channel %d not available\n", chan);
 681                        goto error_chan_cfg;
 682                }
 683
 684                mhi_chan = &mhi_cntrl->mhi_chan[chan];
 685                mhi_chan->name = ch_cfg->name;
 686                mhi_chan->chan = chan;
 687
 688                mhi_chan->tre_ring.elements = ch_cfg->num_elements;
 689                if (!mhi_chan->tre_ring.elements)
 690                        goto error_chan_cfg;
 691
 692                /*
 693                 * For some channels, local ring length should be bigger than
 694                 * the transfer ring length due to internal logical channels
 695                 * in device. So host can queue much more buffers than transfer
 696                 * ring length. Example, RSC channels should have a larger local
 697                 * channel length than transfer ring length.
 698                 */
 699                mhi_chan->buf_ring.elements = ch_cfg->local_elements;
 700                if (!mhi_chan->buf_ring.elements)
 701                        mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
 702                mhi_chan->er_index = ch_cfg->event_ring;
 703                mhi_chan->dir = ch_cfg->dir;
 704
 705                /*
 706                 * For most channels, chtype is identical to channel directions.
 707                 * So, if it is not defined then assign channel direction to
 708                 * chtype
 709                 */
 710                mhi_chan->type = ch_cfg->type;
 711                if (!mhi_chan->type)
 712                        mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
 713
 714                mhi_chan->ee_mask = ch_cfg->ee_mask;
 715                mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
 716                mhi_chan->lpm_notify = ch_cfg->lpm_notify;
 717                mhi_chan->offload_ch = ch_cfg->offload_channel;
 718                mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
 719                mhi_chan->pre_alloc = ch_cfg->auto_queue;
 720                mhi_chan->auto_start = ch_cfg->auto_start;
 721
 722                /*
 723                 * If MHI host allocates buffers, then the channel direction
 724                 * should be DMA_FROM_DEVICE
 725                 */
 726                if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
 727                        dev_err(dev, "Invalid channel configuration\n");
 728                        goto error_chan_cfg;
 729                }
 730
 731                /*
 732                 * Bi-directional and direction less channel must be an
 733                 * offload channel
 734                 */
 735                if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
 736                     mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
 737                        dev_err(dev, "Invalid channel configuration\n");
 738                        goto error_chan_cfg;
 739                }
 740
 741                if (!mhi_chan->offload_ch) {
 742                        mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
 743                        if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
 744                                dev_err(dev, "Invalid Door bell mode\n");
 745                                goto error_chan_cfg;
 746                        }
 747                }
 748
 749                if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
 750                        mhi_chan->db_cfg.process_db = mhi_db_brstmode;
 751                else
 752                        mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
 753
 754                mhi_chan->configured = true;
 755
 756                if (mhi_chan->lpm_notify)
 757                        list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
 758        }
 759
 760        return 0;
 761
 762error_chan_cfg:
 763        vfree(mhi_cntrl->mhi_chan);
 764
 765        return -EINVAL;
 766}
 767
 768static int parse_config(struct mhi_controller *mhi_cntrl,
 769                        struct mhi_controller_config *config)
 770{
 771        int ret;
 772
 773        /* Parse MHI channel configuration */
 774        ret = parse_ch_cfg(mhi_cntrl, config);
 775        if (ret)
 776                return ret;
 777
 778        /* Parse MHI event configuration */
 779        ret = parse_ev_cfg(mhi_cntrl, config);
 780        if (ret)
 781                goto error_ev_cfg;
 782
 783        mhi_cntrl->timeout_ms = config->timeout_ms;
 784        if (!mhi_cntrl->timeout_ms)
 785                mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
 786
 787        mhi_cntrl->bounce_buf = config->use_bounce_buf;
 788        mhi_cntrl->buffer_len = config->buf_len;
 789        if (!mhi_cntrl->buffer_len)
 790                mhi_cntrl->buffer_len = MHI_MAX_MTU;
 791
 792        /* By default, host is allowed to ring DB in both M0 and M2 states */
 793        mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
 794        if (config->m2_no_db)
 795                mhi_cntrl->db_access &= ~MHI_PM_M2;
 796
 797        return 0;
 798
 799error_ev_cfg:
 800        vfree(mhi_cntrl->mhi_chan);
 801
 802        return ret;
 803}
 804
 805int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 806                            struct mhi_controller_config *config)
 807{
 808        struct mhi_event *mhi_event;
 809        struct mhi_chan *mhi_chan;
 810        struct mhi_cmd *mhi_cmd;
 811        struct mhi_device *mhi_dev;
 812        u32 soc_info;
 813        int ret, i;
 814
 815        if (!mhi_cntrl)
 816                return -EINVAL;
 817
 818        if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
 819            !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
 820            !mhi_cntrl->write_reg)
 821                return -EINVAL;
 822
 823        ret = parse_config(mhi_cntrl, config);
 824        if (ret)
 825                return -EINVAL;
 826
 827        mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
 828                                     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
 829        if (!mhi_cntrl->mhi_cmd) {
 830                ret = -ENOMEM;
 831                goto error_alloc_cmd;
 832        }
 833
 834        INIT_LIST_HEAD(&mhi_cntrl->transition_list);
 835        mutex_init(&mhi_cntrl->pm_mutex);
 836        rwlock_init(&mhi_cntrl->pm_lock);
 837        spin_lock_init(&mhi_cntrl->transition_lock);
 838        spin_lock_init(&mhi_cntrl->wlock);
 839        INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
 840        init_waitqueue_head(&mhi_cntrl->state_event);
 841
 842        mhi_cmd = mhi_cntrl->mhi_cmd;
 843        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
 844                spin_lock_init(&mhi_cmd->lock);
 845
 846        mhi_event = mhi_cntrl->mhi_event;
 847        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 848                /* Skip for offload events */
 849                if (mhi_event->offload_ev)
 850                        continue;
 851
 852                mhi_event->mhi_cntrl = mhi_cntrl;
 853                spin_lock_init(&mhi_event->lock);
 854                if (mhi_event->data_type == MHI_ER_CTRL)
 855                        tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
 856                                     (ulong)mhi_event);
 857                else
 858                        tasklet_init(&mhi_event->task, mhi_ev_task,
 859                                     (ulong)mhi_event);
 860        }
 861
 862        mhi_chan = mhi_cntrl->mhi_chan;
 863        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 864                mutex_init(&mhi_chan->mutex);
 865                init_completion(&mhi_chan->completion);
 866                rwlock_init(&mhi_chan->lock);
 867
 868                /* used in setting bei field of TRE */
 869                mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
 870                mhi_chan->intmod = mhi_event->intmod;
 871        }
 872
 873        if (mhi_cntrl->bounce_buf) {
 874                mhi_cntrl->map_single = mhi_map_single_use_bb;
 875                mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
 876        } else {
 877                mhi_cntrl->map_single = mhi_map_single_no_bb;
 878                mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
 879        }
 880
 881        /* Read the MHI device info */
 882        ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
 883                           SOC_HW_VERSION_OFFS, &soc_info);
 884        if (ret)
 885                goto error_alloc_dev;
 886
 887        mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
 888                                        SOC_HW_VERSION_FAM_NUM_SHFT;
 889        mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
 890                                        SOC_HW_VERSION_DEV_NUM_SHFT;
 891        mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
 892                                        SOC_HW_VERSION_MAJOR_VER_SHFT;
 893        mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
 894                                        SOC_HW_VERSION_MINOR_VER_SHFT;
 895
 896        /* Register controller with MHI bus */
 897        mhi_dev = mhi_alloc_device(mhi_cntrl);
 898        if (IS_ERR(mhi_dev)) {
 899                dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
 900                ret = PTR_ERR(mhi_dev);
 901                goto error_alloc_dev;
 902        }
 903
 904        mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
 905        mhi_dev->mhi_cntrl = mhi_cntrl;
 906        dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
 907
 908        /* Init wakeup source */
 909        device_init_wakeup(&mhi_dev->dev, true);
 910
 911        ret = device_add(&mhi_dev->dev);
 912        if (ret)
 913                goto error_add_dev;
 914
 915        mhi_cntrl->mhi_dev = mhi_dev;
 916
 917        return 0;
 918
 919error_add_dev:
 920        put_device(&mhi_dev->dev);
 921
 922error_alloc_dev:
 923        kfree(mhi_cntrl->mhi_cmd);
 924
 925error_alloc_cmd:
 926        vfree(mhi_cntrl->mhi_chan);
 927        kfree(mhi_cntrl->mhi_event);
 928
 929        return ret;
 930}
 931EXPORT_SYMBOL_GPL(mhi_register_controller);
 932
 933void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
 934{
 935        struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 936        struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
 937        unsigned int i;
 938
 939        kfree(mhi_cntrl->mhi_cmd);
 940        kfree(mhi_cntrl->mhi_event);
 941
 942        /* Drop the references to MHI devices created for channels */
 943        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 944                if (!mhi_chan->mhi_dev)
 945                        continue;
 946
 947                put_device(&mhi_chan->mhi_dev->dev);
 948        }
 949        vfree(mhi_cntrl->mhi_chan);
 950
 951        device_del(&mhi_dev->dev);
 952        put_device(&mhi_dev->dev);
 953}
 954EXPORT_SYMBOL_GPL(mhi_unregister_controller);
 955
 956int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
 957{
 958        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 959        u32 bhie_off;
 960        int ret;
 961
 962        mutex_lock(&mhi_cntrl->pm_mutex);
 963
 964        ret = mhi_init_dev_ctxt(mhi_cntrl);
 965        if (ret)
 966                goto error_dev_ctxt;
 967
 968        /*
 969         * Allocate RDDM table if specified, this table is for debugging purpose
 970         */
 971        if (mhi_cntrl->rddm_size) {
 972                mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
 973                                     mhi_cntrl->rddm_size);
 974
 975                /*
 976                 * This controller supports RDDM, so we need to manually clear
 977                 * BHIE RX registers since POR values are undefined.
 978                 */
 979                ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
 980                                   &bhie_off);
 981                if (ret) {
 982                        dev_err(dev, "Error getting BHIE offset\n");
 983                        goto bhie_error;
 984                }
 985
 986                mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
 987                memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
 988                          0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
 989                          4);
 990
 991                if (mhi_cntrl->rddm_image)
 992                        mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
 993        }
 994
 995        mhi_cntrl->pre_init = true;
 996
 997        mutex_unlock(&mhi_cntrl->pm_mutex);
 998
 999        return 0;
1000
1001bhie_error:
1002        if (mhi_cntrl->rddm_image) {
1003                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1004                mhi_cntrl->rddm_image = NULL;
1005        }
1006
1007error_dev_ctxt:
1008        mutex_unlock(&mhi_cntrl->pm_mutex);
1009
1010        return ret;
1011}
1012EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1013
1014void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1015{
1016        if (mhi_cntrl->fbc_image) {
1017                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1018                mhi_cntrl->fbc_image = NULL;
1019        }
1020
1021        if (mhi_cntrl->rddm_image) {
1022                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1023                mhi_cntrl->rddm_image = NULL;
1024        }
1025
1026        mhi_deinit_dev_ctxt(mhi_cntrl);
1027        mhi_cntrl->pre_init = false;
1028}
1029EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1030
1031static void mhi_release_device(struct device *dev)
1032{
1033        struct mhi_device *mhi_dev = to_mhi_device(dev);
1034
1035        /*
1036         * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1037         * devices for the channels will only get created if the mhi_dev
1038         * associated with it is NULL. This scenario will happen during the
1039         * controller suspend and resume.
1040         */
1041        if (mhi_dev->ul_chan)
1042                mhi_dev->ul_chan->mhi_dev = NULL;
1043
1044        if (mhi_dev->dl_chan)
1045                mhi_dev->dl_chan->mhi_dev = NULL;
1046
1047        kfree(mhi_dev);
1048}
1049
1050struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1051{
1052        struct mhi_device *mhi_dev;
1053        struct device *dev;
1054
1055        mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1056        if (!mhi_dev)
1057                return ERR_PTR(-ENOMEM);
1058
1059        dev = &mhi_dev->dev;
1060        device_initialize(dev);
1061        dev->bus = &mhi_bus_type;
1062        dev->release = mhi_release_device;
1063        dev->parent = mhi_cntrl->cntrl_dev;
1064        mhi_dev->mhi_cntrl = mhi_cntrl;
1065        mhi_dev->dev_wake = 0;
1066
1067        return mhi_dev;
1068}
1069
1070static int mhi_driver_probe(struct device *dev)
1071{
1072        struct mhi_device *mhi_dev = to_mhi_device(dev);
1073        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1074        struct device_driver *drv = dev->driver;
1075        struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1076        struct mhi_event *mhi_event;
1077        struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1078        struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1079        int ret;
1080
1081        /* Bring device out of LPM */
1082        ret = mhi_device_get_sync(mhi_dev);
1083        if (ret)
1084                return ret;
1085
1086        ret = -EINVAL;
1087
1088        if (ul_chan) {
1089                /*
1090                 * If channel supports LPM notifications then status_cb should
1091                 * be provided
1092                 */
1093                if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1094                        goto exit_probe;
1095
1096                /* For non-offload channels then xfer_cb should be provided */
1097                if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1098                        goto exit_probe;
1099
1100                ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1101                if (ul_chan->auto_start) {
1102                        ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
1103                        if (ret)
1104                                goto exit_probe;
1105                }
1106        }
1107
1108        ret = -EINVAL;
1109        if (dl_chan) {
1110                /*
1111                 * If channel supports LPM notifications then status_cb should
1112                 * be provided
1113                 */
1114                if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1115                        goto exit_probe;
1116
1117                /* For non-offload channels then xfer_cb should be provided */
1118                if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1119                        goto exit_probe;
1120
1121                mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1122
1123                /*
1124                 * If the channel event ring is managed by client, then
1125                 * status_cb must be provided so that the framework can
1126                 * notify pending data
1127                 */
1128                if (mhi_event->cl_manage && !mhi_drv->status_cb)
1129                        goto exit_probe;
1130
1131                dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1132        }
1133
1134        /* Call the user provided probe function */
1135        ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1136        if (ret)
1137                goto exit_probe;
1138
1139        if (dl_chan && dl_chan->auto_start)
1140                mhi_prepare_channel(mhi_cntrl, dl_chan);
1141
1142        mhi_device_put(mhi_dev);
1143
1144        return ret;
1145
1146exit_probe:
1147        mhi_unprepare_from_transfer(mhi_dev);
1148
1149        mhi_device_put(mhi_dev);
1150
1151        return ret;
1152}
1153
1154static int mhi_driver_remove(struct device *dev)
1155{
1156        struct mhi_device *mhi_dev = to_mhi_device(dev);
1157        struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1158        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1159        struct mhi_chan *mhi_chan;
1160        enum mhi_ch_state ch_state[] = {
1161                MHI_CH_STATE_DISABLED,
1162                MHI_CH_STATE_DISABLED
1163        };
1164        int dir;
1165
1166        /* Skip if it is a controller device */
1167        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1168                return 0;
1169
1170        /* Reset both channels */
1171        for (dir = 0; dir < 2; dir++) {
1172                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1173
1174                if (!mhi_chan)
1175                        continue;
1176
1177                /* Wake all threads waiting for completion */
1178                write_lock_irq(&mhi_chan->lock);
1179                mhi_chan->ccs = MHI_EV_CC_INVALID;
1180                complete_all(&mhi_chan->completion);
1181                write_unlock_irq(&mhi_chan->lock);
1182
1183                /* Set the channel state to disabled */
1184                mutex_lock(&mhi_chan->mutex);
1185                write_lock_irq(&mhi_chan->lock);
1186                ch_state[dir] = mhi_chan->ch_state;
1187                mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1188                write_unlock_irq(&mhi_chan->lock);
1189
1190                /* Reset the non-offload channel */
1191                if (!mhi_chan->offload_ch)
1192                        mhi_reset_chan(mhi_cntrl, mhi_chan);
1193
1194                mutex_unlock(&mhi_chan->mutex);
1195        }
1196
1197        mhi_drv->remove(mhi_dev);
1198
1199        /* De-init channel if it was enabled */
1200        for (dir = 0; dir < 2; dir++) {
1201                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1202
1203                if (!mhi_chan)
1204                        continue;
1205
1206                mutex_lock(&mhi_chan->mutex);
1207
1208                if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
1209                    !mhi_chan->offload_ch)
1210                        mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1211
1212                mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1213
1214                mutex_unlock(&mhi_chan->mutex);
1215        }
1216
1217        read_lock_bh(&mhi_cntrl->pm_lock);
1218        while (mhi_dev->dev_wake)
1219                mhi_device_put(mhi_dev);
1220        read_unlock_bh(&mhi_cntrl->pm_lock);
1221
1222        return 0;
1223}
1224
1225int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1226{
1227        struct device_driver *driver = &mhi_drv->driver;
1228
1229        if (!mhi_drv->probe || !mhi_drv->remove)
1230                return -EINVAL;
1231
1232        driver->bus = &mhi_bus_type;
1233        driver->owner = owner;
1234        driver->probe = mhi_driver_probe;
1235        driver->remove = mhi_driver_remove;
1236
1237        return driver_register(driver);
1238}
1239EXPORT_SYMBOL_GPL(__mhi_driver_register);
1240
1241void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1242{
1243        driver_unregister(&mhi_drv->driver);
1244}
1245EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1246
1247static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1248{
1249        struct mhi_device *mhi_dev = to_mhi_device(dev);
1250
1251        return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1252                                        mhi_dev->chan_name);
1253}
1254
1255static int mhi_match(struct device *dev, struct device_driver *drv)
1256{
1257        struct mhi_device *mhi_dev = to_mhi_device(dev);
1258        struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1259        const struct mhi_device_id *id;
1260
1261        /*
1262         * If the device is a controller type then there is no client driver
1263         * associated with it
1264         */
1265        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1266                return 0;
1267
1268        for (id = mhi_drv->id_table; id->chan[0]; id++)
1269                if (!strcmp(mhi_dev->chan_name, id->chan)) {
1270                        mhi_dev->id = id;
1271                        return 1;
1272                }
1273
1274        return 0;
1275};
1276
1277struct bus_type mhi_bus_type = {
1278        .name = "mhi",
1279        .dev_name = "mhi",
1280        .match = mhi_match,
1281        .uevent = mhi_uevent,
1282};
1283
1284static int __init mhi_init(void)
1285{
1286        return bus_register(&mhi_bus_type);
1287}
1288
1289static void __exit mhi_exit(void)
1290{
1291        bus_unregister(&mhi_bus_type);
1292}
1293
1294postcore_initcall(mhi_init);
1295module_exit(mhi_exit);
1296
1297MODULE_LICENSE("GPL v2");
1298MODULE_DESCRIPTION("MHI Host Interface");
1299