linux/drivers/bus/mhi/core/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
   4 *
   5 */
   6
   7#include <linux/debugfs.h>
   8#include <linux/device.h>
   9#include <linux/dma-direction.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/idr.h>
  12#include <linux/interrupt.h>
  13#include <linux/list.h>
  14#include <linux/mhi.h>
  15#include <linux/mod_devicetable.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/vmalloc.h>
  19#include <linux/wait.h>
  20#include "internal.h"
  21
  22static DEFINE_IDA(mhi_controller_ida);
  23
  24const char * const mhi_ee_str[MHI_EE_MAX] = {
  25        [MHI_EE_PBL] = "PBL",
  26        [MHI_EE_SBL] = "SBL",
  27        [MHI_EE_AMSS] = "AMSS",
  28        [MHI_EE_RDDM] = "RDDM",
  29        [MHI_EE_WFW] = "WFW",
  30        [MHI_EE_PTHRU] = "PASS THRU",
  31        [MHI_EE_EDL] = "EDL",
  32        [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
  33        [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
  34};
  35
  36const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
  37        [DEV_ST_TRANSITION_PBL] = "PBL",
  38        [DEV_ST_TRANSITION_READY] = "READY",
  39        [DEV_ST_TRANSITION_SBL] = "SBL",
  40        [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
  41        [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
  42        [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
  43};
  44
  45const char * const mhi_state_str[MHI_STATE_MAX] = {
  46        [MHI_STATE_RESET] = "RESET",
  47        [MHI_STATE_READY] = "READY",
  48        [MHI_STATE_M0] = "M0",
  49        [MHI_STATE_M1] = "M1",
  50        [MHI_STATE_M2] = "M2",
  51        [MHI_STATE_M3] = "M3",
  52        [MHI_STATE_M3_FAST] = "M3_FAST",
  53        [MHI_STATE_BHI] = "BHI",
  54        [MHI_STATE_SYS_ERR] = "SYS_ERR",
  55};
  56
  57static const char * const mhi_pm_state_str[] = {
  58        [MHI_PM_STATE_DISABLE] = "DISABLE",
  59        [MHI_PM_STATE_POR] = "POR",
  60        [MHI_PM_STATE_M0] = "M0",
  61        [MHI_PM_STATE_M2] = "M2",
  62        [MHI_PM_STATE_M3_ENTER] = "M?->M3",
  63        [MHI_PM_STATE_M3] = "M3",
  64        [MHI_PM_STATE_M3_EXIT] = "M3->M0",
  65        [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
  66        [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
  67        [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
  68        [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
  69        [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
  70};
  71
  72const char *to_mhi_pm_state_str(enum mhi_pm_state state)
  73{
  74        int index = find_last_bit((unsigned long *)&state, 32);
  75
  76        if (index >= ARRAY_SIZE(mhi_pm_state_str))
  77                return "Invalid State";
  78
  79        return mhi_pm_state_str[index];
  80}
  81
  82static ssize_t serial_number_show(struct device *dev,
  83                                  struct device_attribute *attr,
  84                                  char *buf)
  85{
  86        struct mhi_device *mhi_dev = to_mhi_device(dev);
  87        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  88
  89        return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
  90                        mhi_cntrl->serial_number);
  91}
  92static DEVICE_ATTR_RO(serial_number);
  93
  94static ssize_t oem_pk_hash_show(struct device *dev,
  95                                struct device_attribute *attr,
  96                                char *buf)
  97{
  98        struct mhi_device *mhi_dev = to_mhi_device(dev);
  99        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 100        int i, cnt = 0;
 101
 102        for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
 103                cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
 104                                "OEMPKHASH[%d]: 0x%x\n", i,
 105                                mhi_cntrl->oem_pk_hash[i]);
 106
 107        return cnt;
 108}
 109static DEVICE_ATTR_RO(oem_pk_hash);
 110
 111static struct attribute *mhi_dev_attrs[] = {
 112        &dev_attr_serial_number.attr,
 113        &dev_attr_oem_pk_hash.attr,
 114        NULL,
 115};
 116ATTRIBUTE_GROUPS(mhi_dev);
 117
 118/* MHI protocol requires the transfer ring to be aligned with ring length */
 119static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
 120                                  struct mhi_ring *ring,
 121                                  u64 len)
 122{
 123        ring->alloc_size = len + (len - 1);
 124        ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
 125                                               &ring->dma_handle, GFP_KERNEL);
 126        if (!ring->pre_aligned)
 127                return -ENOMEM;
 128
 129        ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
 130        ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
 131
 132        return 0;
 133}
 134
 135void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
 136{
 137        int i;
 138        struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 139
 140        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 141                if (mhi_event->offload_ev)
 142                        continue;
 143
 144                free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
 145        }
 146
 147        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
 148}
 149
 150int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
 151{
 152        struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 153        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 154        int i, ret;
 155
 156        /* Setup BHI_INTVEC IRQ */
 157        ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
 158                                   mhi_intvec_threaded_handler,
 159                                   IRQF_SHARED | IRQF_NO_SUSPEND,
 160                                   "bhi", mhi_cntrl);
 161        if (ret)
 162                return ret;
 163
 164        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 165                if (mhi_event->offload_ev)
 166                        continue;
 167
 168                if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
 169                        dev_err(dev, "irq %d not available for event ring\n",
 170                                mhi_event->irq);
 171                        ret = -EINVAL;
 172                        goto error_request;
 173                }
 174
 175                ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
 176                                  mhi_irq_handler,
 177                                  IRQF_SHARED | IRQF_NO_SUSPEND,
 178                                  "mhi", mhi_event);
 179                if (ret) {
 180                        dev_err(dev, "Error requesting irq:%d for ev:%d\n",
 181                                mhi_cntrl->irq[mhi_event->irq], i);
 182                        goto error_request;
 183                }
 184        }
 185
 186        return 0;
 187
 188error_request:
 189        for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
 190                if (mhi_event->offload_ev)
 191                        continue;
 192
 193                free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
 194        }
 195        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
 196
 197        return ret;
 198}
 199
 200void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
 201{
 202        int i;
 203        struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
 204        struct mhi_cmd *mhi_cmd;
 205        struct mhi_event *mhi_event;
 206        struct mhi_ring *ring;
 207
 208        mhi_cmd = mhi_cntrl->mhi_cmd;
 209        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
 210                ring = &mhi_cmd->ring;
 211                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 212                                  ring->pre_aligned, ring->dma_handle);
 213                ring->base = NULL;
 214                ring->iommu_base = 0;
 215        }
 216
 217        mhi_free_coherent(mhi_cntrl,
 218                          sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
 219                          mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
 220
 221        mhi_event = mhi_cntrl->mhi_event;
 222        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 223                if (mhi_event->offload_ev)
 224                        continue;
 225
 226                ring = &mhi_event->ring;
 227                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 228                                  ring->pre_aligned, ring->dma_handle);
 229                ring->base = NULL;
 230                ring->iommu_base = 0;
 231        }
 232
 233        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
 234                          mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
 235                          mhi_ctxt->er_ctxt_addr);
 236
 237        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
 238                          mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
 239                          mhi_ctxt->chan_ctxt_addr);
 240
 241        kfree(mhi_ctxt);
 242        mhi_cntrl->mhi_ctxt = NULL;
 243}
 244
 245int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
 246{
 247        struct mhi_ctxt *mhi_ctxt;
 248        struct mhi_chan_ctxt *chan_ctxt;
 249        struct mhi_event_ctxt *er_ctxt;
 250        struct mhi_cmd_ctxt *cmd_ctxt;
 251        struct mhi_chan *mhi_chan;
 252        struct mhi_event *mhi_event;
 253        struct mhi_cmd *mhi_cmd;
 254        u32 tmp;
 255        int ret = -ENOMEM, i;
 256
 257        atomic_set(&mhi_cntrl->dev_wake, 0);
 258        atomic_set(&mhi_cntrl->pending_pkts, 0);
 259
 260        mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
 261        if (!mhi_ctxt)
 262                return -ENOMEM;
 263
 264        /* Setup channel ctxt */
 265        mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
 266                                                 sizeof(*mhi_ctxt->chan_ctxt) *
 267                                                 mhi_cntrl->max_chan,
 268                                                 &mhi_ctxt->chan_ctxt_addr,
 269                                                 GFP_KERNEL);
 270        if (!mhi_ctxt->chan_ctxt)
 271                goto error_alloc_chan_ctxt;
 272
 273        mhi_chan = mhi_cntrl->mhi_chan;
 274        chan_ctxt = mhi_ctxt->chan_ctxt;
 275        for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
 276                /* Skip if it is an offload channel */
 277                if (mhi_chan->offload_ch)
 278                        continue;
 279
 280                tmp = chan_ctxt->chcfg;
 281                tmp &= ~CHAN_CTX_CHSTATE_MASK;
 282                tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
 283                tmp &= ~CHAN_CTX_BRSTMODE_MASK;
 284                tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
 285                tmp &= ~CHAN_CTX_POLLCFG_MASK;
 286                tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
 287                chan_ctxt->chcfg = tmp;
 288
 289                chan_ctxt->chtype = mhi_chan->type;
 290                chan_ctxt->erindex = mhi_chan->er_index;
 291
 292                mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
 293                mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
 294        }
 295
 296        /* Setup event context */
 297        mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
 298                                               sizeof(*mhi_ctxt->er_ctxt) *
 299                                               mhi_cntrl->total_ev_rings,
 300                                               &mhi_ctxt->er_ctxt_addr,
 301                                               GFP_KERNEL);
 302        if (!mhi_ctxt->er_ctxt)
 303                goto error_alloc_er_ctxt;
 304
 305        er_ctxt = mhi_ctxt->er_ctxt;
 306        mhi_event = mhi_cntrl->mhi_event;
 307        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
 308                     mhi_event++) {
 309                struct mhi_ring *ring = &mhi_event->ring;
 310
 311                /* Skip if it is an offload event */
 312                if (mhi_event->offload_ev)
 313                        continue;
 314
 315                tmp = er_ctxt->intmod;
 316                tmp &= ~EV_CTX_INTMODC_MASK;
 317                tmp &= ~EV_CTX_INTMODT_MASK;
 318                tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
 319                er_ctxt->intmod = tmp;
 320
 321                er_ctxt->ertype = MHI_ER_TYPE_VALID;
 322                er_ctxt->msivec = mhi_event->irq;
 323                mhi_event->db_cfg.db_mode = true;
 324
 325                ring->el_size = sizeof(struct mhi_tre);
 326                ring->len = ring->el_size * ring->elements;
 327                ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
 328                if (ret)
 329                        goto error_alloc_er;
 330
 331                /*
 332                 * If the read pointer equals to the write pointer, then the
 333                 * ring is empty
 334                 */
 335                ring->rp = ring->wp = ring->base;
 336                er_ctxt->rbase = ring->iommu_base;
 337                er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
 338                er_ctxt->rlen = ring->len;
 339                ring->ctxt_wp = &er_ctxt->wp;
 340        }
 341
 342        /* Setup cmd context */
 343        ret = -ENOMEM;
 344        mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
 345                                                sizeof(*mhi_ctxt->cmd_ctxt) *
 346                                                NR_OF_CMD_RINGS,
 347                                                &mhi_ctxt->cmd_ctxt_addr,
 348                                                GFP_KERNEL);
 349        if (!mhi_ctxt->cmd_ctxt)
 350                goto error_alloc_er;
 351
 352        mhi_cmd = mhi_cntrl->mhi_cmd;
 353        cmd_ctxt = mhi_ctxt->cmd_ctxt;
 354        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
 355                struct mhi_ring *ring = &mhi_cmd->ring;
 356
 357                ring->el_size = sizeof(struct mhi_tre);
 358                ring->elements = CMD_EL_PER_RING;
 359                ring->len = ring->el_size * ring->elements;
 360                ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
 361                if (ret)
 362                        goto error_alloc_cmd;
 363
 364                ring->rp = ring->wp = ring->base;
 365                cmd_ctxt->rbase = ring->iommu_base;
 366                cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
 367                cmd_ctxt->rlen = ring->len;
 368                ring->ctxt_wp = &cmd_ctxt->wp;
 369        }
 370
 371        mhi_cntrl->mhi_ctxt = mhi_ctxt;
 372
 373        return 0;
 374
 375error_alloc_cmd:
 376        for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
 377                struct mhi_ring *ring = &mhi_cmd->ring;
 378
 379                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 380                                  ring->pre_aligned, ring->dma_handle);
 381        }
 382        mhi_free_coherent(mhi_cntrl,
 383                          sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
 384                          mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
 385        i = mhi_cntrl->total_ev_rings;
 386        mhi_event = mhi_cntrl->mhi_event + i;
 387
 388error_alloc_er:
 389        for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
 390                struct mhi_ring *ring = &mhi_event->ring;
 391
 392                if (mhi_event->offload_ev)
 393                        continue;
 394
 395                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 396                                  ring->pre_aligned, ring->dma_handle);
 397        }
 398        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
 399                          mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
 400                          mhi_ctxt->er_ctxt_addr);
 401
 402error_alloc_er_ctxt:
 403        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
 404                          mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
 405                          mhi_ctxt->chan_ctxt_addr);
 406
 407error_alloc_chan_ctxt:
 408        kfree(mhi_ctxt);
 409
 410        return ret;
 411}
 412
 413int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 414{
 415        u32 val;
 416        int i, ret;
 417        struct mhi_chan *mhi_chan;
 418        struct mhi_event *mhi_event;
 419        void __iomem *base = mhi_cntrl->regs;
 420        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 421        struct {
 422                u32 offset;
 423                u32 mask;
 424                u32 shift;
 425                u32 val;
 426        } reg_info[] = {
 427                {
 428                        CCABAP_HIGHER, U32_MAX, 0,
 429                        upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
 430                },
 431                {
 432                        CCABAP_LOWER, U32_MAX, 0,
 433                        lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
 434                },
 435                {
 436                        ECABAP_HIGHER, U32_MAX, 0,
 437                        upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
 438                },
 439                {
 440                        ECABAP_LOWER, U32_MAX, 0,
 441                        lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
 442                },
 443                {
 444                        CRCBAP_HIGHER, U32_MAX, 0,
 445                        upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
 446                },
 447                {
 448                        CRCBAP_LOWER, U32_MAX, 0,
 449                        lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
 450                },
 451                {
 452                        MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
 453                        mhi_cntrl->total_ev_rings,
 454                },
 455                {
 456                        MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
 457                        mhi_cntrl->hw_ev_rings,
 458                },
 459                {
 460                        MHICTRLBASE_HIGHER, U32_MAX, 0,
 461                        upper_32_bits(mhi_cntrl->iova_start),
 462                },
 463                {
 464                        MHICTRLBASE_LOWER, U32_MAX, 0,
 465                        lower_32_bits(mhi_cntrl->iova_start),
 466                },
 467                {
 468                        MHIDATABASE_HIGHER, U32_MAX, 0,
 469                        upper_32_bits(mhi_cntrl->iova_start),
 470                },
 471                {
 472                        MHIDATABASE_LOWER, U32_MAX, 0,
 473                        lower_32_bits(mhi_cntrl->iova_start),
 474                },
 475                {
 476                        MHICTRLLIMIT_HIGHER, U32_MAX, 0,
 477                        upper_32_bits(mhi_cntrl->iova_stop),
 478                },
 479                {
 480                        MHICTRLLIMIT_LOWER, U32_MAX, 0,
 481                        lower_32_bits(mhi_cntrl->iova_stop),
 482                },
 483                {
 484                        MHIDATALIMIT_HIGHER, U32_MAX, 0,
 485                        upper_32_bits(mhi_cntrl->iova_stop),
 486                },
 487                {
 488                        MHIDATALIMIT_LOWER, U32_MAX, 0,
 489                        lower_32_bits(mhi_cntrl->iova_stop),
 490                },
 491                { 0, 0, 0 }
 492        };
 493
 494        dev_dbg(dev, "Initializing MHI registers\n");
 495
 496        /* Read channel db offset */
 497        ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
 498                                 CHDBOFF_CHDBOFF_SHIFT, &val);
 499        if (ret) {
 500                dev_err(dev, "Unable to read CHDBOFF register\n");
 501                return -EIO;
 502        }
 503
 504        /* Setup wake db */
 505        mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
 506        mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
 507        mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
 508        mhi_cntrl->wake_set = false;
 509
 510        /* Setup channel db address for each channel in tre_ring */
 511        mhi_chan = mhi_cntrl->mhi_chan;
 512        for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
 513                mhi_chan->tre_ring.db_addr = base + val;
 514
 515        /* Read event ring db offset */
 516        ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
 517                                 ERDBOFF_ERDBOFF_SHIFT, &val);
 518        if (ret) {
 519                dev_err(dev, "Unable to read ERDBOFF register\n");
 520                return -EIO;
 521        }
 522
 523        /* Setup event db address for each ev_ring */
 524        mhi_event = mhi_cntrl->mhi_event;
 525        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
 526                if (mhi_event->offload_ev)
 527                        continue;
 528
 529                mhi_event->ring.db_addr = base + val;
 530        }
 531
 532        /* Setup DB register for primary CMD rings */
 533        mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
 534
 535        /* Write to MMIO registers */
 536        for (i = 0; reg_info[i].offset; i++)
 537                mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
 538                                    reg_info[i].mask, reg_info[i].shift,
 539                                    reg_info[i].val);
 540
 541        return 0;
 542}
 543
 544void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
 545                          struct mhi_chan *mhi_chan)
 546{
 547        struct mhi_ring *buf_ring;
 548        struct mhi_ring *tre_ring;
 549        struct mhi_chan_ctxt *chan_ctxt;
 550
 551        buf_ring = &mhi_chan->buf_ring;
 552        tre_ring = &mhi_chan->tre_ring;
 553        chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
 554
 555        mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
 556                          tre_ring->pre_aligned, tre_ring->dma_handle);
 557        vfree(buf_ring->base);
 558
 559        buf_ring->base = tre_ring->base = NULL;
 560        chan_ctxt->rbase = 0;
 561}
 562
 563int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
 564                       struct mhi_chan *mhi_chan)
 565{
 566        struct mhi_ring *buf_ring;
 567        struct mhi_ring *tre_ring;
 568        struct mhi_chan_ctxt *chan_ctxt;
 569        u32 tmp;
 570        int ret;
 571
 572        buf_ring = &mhi_chan->buf_ring;
 573        tre_ring = &mhi_chan->tre_ring;
 574        tre_ring->el_size = sizeof(struct mhi_tre);
 575        tre_ring->len = tre_ring->el_size * tre_ring->elements;
 576        chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
 577        ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
 578        if (ret)
 579                return -ENOMEM;
 580
 581        buf_ring->el_size = sizeof(struct mhi_buf_info);
 582        buf_ring->len = buf_ring->el_size * buf_ring->elements;
 583        buf_ring->base = vzalloc(buf_ring->len);
 584
 585        if (!buf_ring->base) {
 586                mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
 587                                  tre_ring->pre_aligned, tre_ring->dma_handle);
 588                return -ENOMEM;
 589        }
 590
 591        tmp = chan_ctxt->chcfg;
 592        tmp &= ~CHAN_CTX_CHSTATE_MASK;
 593        tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
 594        chan_ctxt->chcfg = tmp;
 595
 596        chan_ctxt->rbase = tre_ring->iommu_base;
 597        chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
 598        chan_ctxt->rlen = tre_ring->len;
 599        tre_ring->ctxt_wp = &chan_ctxt->wp;
 600
 601        tre_ring->rp = tre_ring->wp = tre_ring->base;
 602        buf_ring->rp = buf_ring->wp = buf_ring->base;
 603        mhi_chan->db_cfg.db_mode = 1;
 604
 605        /* Update to all cores */
 606        smp_wmb();
 607
 608        return 0;
 609}
 610
 611static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
 612                        const struct mhi_controller_config *config)
 613{
 614        struct mhi_event *mhi_event;
 615        const struct mhi_event_config *event_cfg;
 616        struct device *dev = mhi_cntrl->cntrl_dev;
 617        int i, num;
 618
 619        num = config->num_events;
 620        mhi_cntrl->total_ev_rings = num;
 621        mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
 622                                       GFP_KERNEL);
 623        if (!mhi_cntrl->mhi_event)
 624                return -ENOMEM;
 625
 626        /* Populate event ring */
 627        mhi_event = mhi_cntrl->mhi_event;
 628        for (i = 0; i < num; i++) {
 629                event_cfg = &config->event_cfg[i];
 630
 631                mhi_event->er_index = i;
 632                mhi_event->ring.elements = event_cfg->num_elements;
 633                mhi_event->intmod = event_cfg->irq_moderation_ms;
 634                mhi_event->irq = event_cfg->irq;
 635
 636                if (event_cfg->channel != U32_MAX) {
 637                        /* This event ring has a dedicated channel */
 638                        mhi_event->chan = event_cfg->channel;
 639                        if (mhi_event->chan >= mhi_cntrl->max_chan) {
 640                                dev_err(dev,
 641                                        "Event Ring channel not available\n");
 642                                goto error_ev_cfg;
 643                        }
 644
 645                        mhi_event->mhi_chan =
 646                                &mhi_cntrl->mhi_chan[mhi_event->chan];
 647                }
 648
 649                /* Priority is fixed to 1 for now */
 650                mhi_event->priority = 1;
 651
 652                mhi_event->db_cfg.brstmode = event_cfg->mode;
 653                if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
 654                        goto error_ev_cfg;
 655
 656                if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
 657                        mhi_event->db_cfg.process_db = mhi_db_brstmode;
 658                else
 659                        mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
 660
 661                mhi_event->data_type = event_cfg->data_type;
 662
 663                switch (mhi_event->data_type) {
 664                case MHI_ER_DATA:
 665                        mhi_event->process_event = mhi_process_data_event_ring;
 666                        break;
 667                case MHI_ER_CTRL:
 668                        mhi_event->process_event = mhi_process_ctrl_ev_ring;
 669                        break;
 670                default:
 671                        dev_err(dev, "Event Ring type not supported\n");
 672                        goto error_ev_cfg;
 673                }
 674
 675                mhi_event->hw_ring = event_cfg->hardware_event;
 676                if (mhi_event->hw_ring)
 677                        mhi_cntrl->hw_ev_rings++;
 678                else
 679                        mhi_cntrl->sw_ev_rings++;
 680
 681                mhi_event->cl_manage = event_cfg->client_managed;
 682                mhi_event->offload_ev = event_cfg->offload_channel;
 683                mhi_event++;
 684        }
 685
 686        return 0;
 687
 688error_ev_cfg:
 689
 690        kfree(mhi_cntrl->mhi_event);
 691        return -EINVAL;
 692}
 693
 694static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
 695                        const struct mhi_controller_config *config)
 696{
 697        const struct mhi_channel_config *ch_cfg;
 698        struct device *dev = mhi_cntrl->cntrl_dev;
 699        int i;
 700        u32 chan;
 701
 702        mhi_cntrl->max_chan = config->max_channels;
 703
 704        /*
 705         * The allocation of MHI channels can exceed 32KB in some scenarios,
 706         * so to avoid any memory possible allocation failures, vzalloc is
 707         * used here
 708         */
 709        mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
 710                                      sizeof(*mhi_cntrl->mhi_chan));
 711        if (!mhi_cntrl->mhi_chan)
 712                return -ENOMEM;
 713
 714        INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
 715
 716        /* Populate channel configurations */
 717        for (i = 0; i < config->num_channels; i++) {
 718                struct mhi_chan *mhi_chan;
 719
 720                ch_cfg = &config->ch_cfg[i];
 721
 722                chan = ch_cfg->num;
 723                if (chan >= mhi_cntrl->max_chan) {
 724                        dev_err(dev, "Channel %d not available\n", chan);
 725                        goto error_chan_cfg;
 726                }
 727
 728                mhi_chan = &mhi_cntrl->mhi_chan[chan];
 729                mhi_chan->name = ch_cfg->name;
 730                mhi_chan->chan = chan;
 731
 732                mhi_chan->tre_ring.elements = ch_cfg->num_elements;
 733                if (!mhi_chan->tre_ring.elements)
 734                        goto error_chan_cfg;
 735
 736                /*
 737                 * For some channels, local ring length should be bigger than
 738                 * the transfer ring length due to internal logical channels
 739                 * in device. So host can queue much more buffers than transfer
 740                 * ring length. Example, RSC channels should have a larger local
 741                 * channel length than transfer ring length.
 742                 */
 743                mhi_chan->buf_ring.elements = ch_cfg->local_elements;
 744                if (!mhi_chan->buf_ring.elements)
 745                        mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
 746                mhi_chan->er_index = ch_cfg->event_ring;
 747                mhi_chan->dir = ch_cfg->dir;
 748
 749                /*
 750                 * For most channels, chtype is identical to channel directions.
 751                 * So, if it is not defined then assign channel direction to
 752                 * chtype
 753                 */
 754                mhi_chan->type = ch_cfg->type;
 755                if (!mhi_chan->type)
 756                        mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
 757
 758                mhi_chan->ee_mask = ch_cfg->ee_mask;
 759                mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
 760                mhi_chan->lpm_notify = ch_cfg->lpm_notify;
 761                mhi_chan->offload_ch = ch_cfg->offload_channel;
 762                mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
 763                mhi_chan->pre_alloc = ch_cfg->auto_queue;
 764
 765                /*
 766                 * If MHI host allocates buffers, then the channel direction
 767                 * should be DMA_FROM_DEVICE
 768                 */
 769                if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
 770                        dev_err(dev, "Invalid channel configuration\n");
 771                        goto error_chan_cfg;
 772                }
 773
 774                /*
 775                 * Bi-directional and direction less channel must be an
 776                 * offload channel
 777                 */
 778                if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
 779                     mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
 780                        dev_err(dev, "Invalid channel configuration\n");
 781                        goto error_chan_cfg;
 782                }
 783
 784                if (!mhi_chan->offload_ch) {
 785                        mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
 786                        if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
 787                                dev_err(dev, "Invalid Door bell mode\n");
 788                                goto error_chan_cfg;
 789                        }
 790                }
 791
 792                if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
 793                        mhi_chan->db_cfg.process_db = mhi_db_brstmode;
 794                else
 795                        mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
 796
 797                mhi_chan->configured = true;
 798
 799                if (mhi_chan->lpm_notify)
 800                        list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
 801        }
 802
 803        return 0;
 804
 805error_chan_cfg:
 806        vfree(mhi_cntrl->mhi_chan);
 807
 808        return -EINVAL;
 809}
 810
 811static int parse_config(struct mhi_controller *mhi_cntrl,
 812                        const struct mhi_controller_config *config)
 813{
 814        int ret;
 815
 816        /* Parse MHI channel configuration */
 817        ret = parse_ch_cfg(mhi_cntrl, config);
 818        if (ret)
 819                return ret;
 820
 821        /* Parse MHI event configuration */
 822        ret = parse_ev_cfg(mhi_cntrl, config);
 823        if (ret)
 824                goto error_ev_cfg;
 825
 826        mhi_cntrl->timeout_ms = config->timeout_ms;
 827        if (!mhi_cntrl->timeout_ms)
 828                mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
 829
 830        mhi_cntrl->bounce_buf = config->use_bounce_buf;
 831        mhi_cntrl->buffer_len = config->buf_len;
 832        if (!mhi_cntrl->buffer_len)
 833                mhi_cntrl->buffer_len = MHI_MAX_MTU;
 834
 835        /* By default, host is allowed to ring DB in both M0 and M2 states */
 836        mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
 837        if (config->m2_no_db)
 838                mhi_cntrl->db_access &= ~MHI_PM_M2;
 839
 840        return 0;
 841
 842error_ev_cfg:
 843        vfree(mhi_cntrl->mhi_chan);
 844
 845        return ret;
 846}
 847
 848int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 849                            const struct mhi_controller_config *config)
 850{
 851        struct mhi_event *mhi_event;
 852        struct mhi_chan *mhi_chan;
 853        struct mhi_cmd *mhi_cmd;
 854        struct mhi_device *mhi_dev;
 855        u32 soc_info;
 856        int ret, i;
 857
 858        if (!mhi_cntrl)
 859                return -EINVAL;
 860
 861        if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
 862            !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
 863            !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
 864                return -EINVAL;
 865
 866        ret = parse_config(mhi_cntrl, config);
 867        if (ret)
 868                return -EINVAL;
 869
 870        mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
 871                                     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
 872        if (!mhi_cntrl->mhi_cmd) {
 873                ret = -ENOMEM;
 874                goto err_free_event;
 875        }
 876
 877        INIT_LIST_HEAD(&mhi_cntrl->transition_list);
 878        mutex_init(&mhi_cntrl->pm_mutex);
 879        rwlock_init(&mhi_cntrl->pm_lock);
 880        spin_lock_init(&mhi_cntrl->transition_lock);
 881        spin_lock_init(&mhi_cntrl->wlock);
 882        INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
 883        init_waitqueue_head(&mhi_cntrl->state_event);
 884
 885        mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
 886                                ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
 887        if (!mhi_cntrl->hiprio_wq) {
 888                dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
 889                ret = -ENOMEM;
 890                goto err_free_cmd;
 891        }
 892
 893        mhi_cmd = mhi_cntrl->mhi_cmd;
 894        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
 895                spin_lock_init(&mhi_cmd->lock);
 896
 897        mhi_event = mhi_cntrl->mhi_event;
 898        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 899                /* Skip for offload events */
 900                if (mhi_event->offload_ev)
 901                        continue;
 902
 903                mhi_event->mhi_cntrl = mhi_cntrl;
 904                spin_lock_init(&mhi_event->lock);
 905                if (mhi_event->data_type == MHI_ER_CTRL)
 906                        tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
 907                                     (ulong)mhi_event);
 908                else
 909                        tasklet_init(&mhi_event->task, mhi_ev_task,
 910                                     (ulong)mhi_event);
 911        }
 912
 913        mhi_chan = mhi_cntrl->mhi_chan;
 914        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 915                mutex_init(&mhi_chan->mutex);
 916                init_completion(&mhi_chan->completion);
 917                rwlock_init(&mhi_chan->lock);
 918
 919                /* used in setting bei field of TRE */
 920                mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
 921                mhi_chan->intmod = mhi_event->intmod;
 922        }
 923
 924        if (mhi_cntrl->bounce_buf) {
 925                mhi_cntrl->map_single = mhi_map_single_use_bb;
 926                mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
 927        } else {
 928                mhi_cntrl->map_single = mhi_map_single_no_bb;
 929                mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
 930        }
 931
 932        /* Read the MHI device info */
 933        ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
 934                           SOC_HW_VERSION_OFFS, &soc_info);
 935        if (ret)
 936                goto err_destroy_wq;
 937
 938        mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
 939                                        SOC_HW_VERSION_FAM_NUM_SHFT;
 940        mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
 941                                        SOC_HW_VERSION_DEV_NUM_SHFT;
 942        mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
 943                                        SOC_HW_VERSION_MAJOR_VER_SHFT;
 944        mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
 945                                        SOC_HW_VERSION_MINOR_VER_SHFT;
 946
 947        mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
 948        if (mhi_cntrl->index < 0) {
 949                ret = mhi_cntrl->index;
 950                goto err_destroy_wq;
 951        }
 952
 953        /* Register controller with MHI bus */
 954        mhi_dev = mhi_alloc_device(mhi_cntrl);
 955        if (IS_ERR(mhi_dev)) {
 956                dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
 957                ret = PTR_ERR(mhi_dev);
 958                goto err_ida_free;
 959        }
 960
 961        mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
 962        mhi_dev->mhi_cntrl = mhi_cntrl;
 963        dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
 964        mhi_dev->name = dev_name(&mhi_dev->dev);
 965
 966        /* Init wakeup source */
 967        device_init_wakeup(&mhi_dev->dev, true);
 968
 969        ret = device_add(&mhi_dev->dev);
 970        if (ret)
 971                goto err_release_dev;
 972
 973        mhi_cntrl->mhi_dev = mhi_dev;
 974
 975        mhi_create_debugfs(mhi_cntrl);
 976
 977        return 0;
 978
 979err_release_dev:
 980        put_device(&mhi_dev->dev);
 981err_ida_free:
 982        ida_free(&mhi_controller_ida, mhi_cntrl->index);
 983err_destroy_wq:
 984        destroy_workqueue(mhi_cntrl->hiprio_wq);
 985err_free_cmd:
 986        kfree(mhi_cntrl->mhi_cmd);
 987err_free_event:
 988        kfree(mhi_cntrl->mhi_event);
 989        vfree(mhi_cntrl->mhi_chan);
 990
 991        return ret;
 992}
 993EXPORT_SYMBOL_GPL(mhi_register_controller);
 994
 995void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
 996{
 997        struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 998        struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
 999        unsigned int i;
1000
1001        mhi_destroy_debugfs(mhi_cntrl);
1002
1003        destroy_workqueue(mhi_cntrl->hiprio_wq);
1004        kfree(mhi_cntrl->mhi_cmd);
1005        kfree(mhi_cntrl->mhi_event);
1006
1007        /* Drop the references to MHI devices created for channels */
1008        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1009                if (!mhi_chan->mhi_dev)
1010                        continue;
1011
1012                put_device(&mhi_chan->mhi_dev->dev);
1013        }
1014        vfree(mhi_cntrl->mhi_chan);
1015
1016        device_del(&mhi_dev->dev);
1017        put_device(&mhi_dev->dev);
1018
1019        ida_free(&mhi_controller_ida, mhi_cntrl->index);
1020}
1021EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1022
1023struct mhi_controller *mhi_alloc_controller(void)
1024{
1025        struct mhi_controller *mhi_cntrl;
1026
1027        mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1028
1029        return mhi_cntrl;
1030}
1031EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1032
1033void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1034{
1035        kfree(mhi_cntrl);
1036}
1037EXPORT_SYMBOL_GPL(mhi_free_controller);
1038
1039int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1040{
1041        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1042        u32 bhie_off;
1043        int ret;
1044
1045        mutex_lock(&mhi_cntrl->pm_mutex);
1046
1047        ret = mhi_init_dev_ctxt(mhi_cntrl);
1048        if (ret)
1049                goto error_dev_ctxt;
1050
1051        /*
1052         * Allocate RDDM table if specified, this table is for debugging purpose
1053         */
1054        if (mhi_cntrl->rddm_size) {
1055                mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1056                                     mhi_cntrl->rddm_size);
1057
1058                /*
1059                 * This controller supports RDDM, so we need to manually clear
1060                 * BHIE RX registers since POR values are undefined.
1061                 */
1062                ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1063                                   &bhie_off);
1064                if (ret) {
1065                        dev_err(dev, "Error getting BHIE offset\n");
1066                        goto bhie_error;
1067                }
1068
1069                mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1070                memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1071                          0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1072                          4);
1073
1074                if (mhi_cntrl->rddm_image)
1075                        mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
1076        }
1077
1078        mhi_cntrl->pre_init = true;
1079
1080        mutex_unlock(&mhi_cntrl->pm_mutex);
1081
1082        return 0;
1083
1084bhie_error:
1085        if (mhi_cntrl->rddm_image) {
1086                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1087                mhi_cntrl->rddm_image = NULL;
1088        }
1089
1090error_dev_ctxt:
1091        mutex_unlock(&mhi_cntrl->pm_mutex);
1092
1093        return ret;
1094}
1095EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1096
1097void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1098{
1099        if (mhi_cntrl->fbc_image) {
1100                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1101                mhi_cntrl->fbc_image = NULL;
1102        }
1103
1104        if (mhi_cntrl->rddm_image) {
1105                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1106                mhi_cntrl->rddm_image = NULL;
1107        }
1108
1109        mhi_deinit_dev_ctxt(mhi_cntrl);
1110        mhi_cntrl->pre_init = false;
1111}
1112EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1113
1114static void mhi_release_device(struct device *dev)
1115{
1116        struct mhi_device *mhi_dev = to_mhi_device(dev);
1117
1118        /*
1119         * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1120         * devices for the channels will only get created if the mhi_dev
1121         * associated with it is NULL. This scenario will happen during the
1122         * controller suspend and resume.
1123         */
1124        if (mhi_dev->ul_chan)
1125                mhi_dev->ul_chan->mhi_dev = NULL;
1126
1127        if (mhi_dev->dl_chan)
1128                mhi_dev->dl_chan->mhi_dev = NULL;
1129
1130        kfree(mhi_dev);
1131}
1132
1133struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1134{
1135        struct mhi_device *mhi_dev;
1136        struct device *dev;
1137
1138        mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1139        if (!mhi_dev)
1140                return ERR_PTR(-ENOMEM);
1141
1142        dev = &mhi_dev->dev;
1143        device_initialize(dev);
1144        dev->bus = &mhi_bus_type;
1145        dev->release = mhi_release_device;
1146
1147        if (mhi_cntrl->mhi_dev) {
1148                /* for MHI client devices, parent is the MHI controller device */
1149                dev->parent = &mhi_cntrl->mhi_dev->dev;
1150        } else {
1151                /* for MHI controller device, parent is the bus device (e.g. pci device) */
1152                dev->parent = mhi_cntrl->cntrl_dev;
1153        }
1154
1155        mhi_dev->mhi_cntrl = mhi_cntrl;
1156        mhi_dev->dev_wake = 0;
1157
1158        return mhi_dev;
1159}
1160
1161static int mhi_driver_probe(struct device *dev)
1162{
1163        struct mhi_device *mhi_dev = to_mhi_device(dev);
1164        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1165        struct device_driver *drv = dev->driver;
1166        struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1167        struct mhi_event *mhi_event;
1168        struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1169        struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1170        int ret;
1171
1172        /* Bring device out of LPM */
1173        ret = mhi_device_get_sync(mhi_dev);
1174        if (ret)
1175                return ret;
1176
1177        ret = -EINVAL;
1178
1179        if (ul_chan) {
1180                /*
1181                 * If channel supports LPM notifications then status_cb should
1182                 * be provided
1183                 */
1184                if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1185                        goto exit_probe;
1186
1187                /* For non-offload channels then xfer_cb should be provided */
1188                if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1189                        goto exit_probe;
1190
1191                ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1192        }
1193
1194        ret = -EINVAL;
1195        if (dl_chan) {
1196                /*
1197                 * If channel supports LPM notifications then status_cb should
1198                 * be provided
1199                 */
1200                if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1201                        goto exit_probe;
1202
1203                /* For non-offload channels then xfer_cb should be provided */
1204                if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1205                        goto exit_probe;
1206
1207                mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1208
1209                /*
1210                 * If the channel event ring is managed by client, then
1211                 * status_cb must be provided so that the framework can
1212                 * notify pending data
1213                 */
1214                if (mhi_event->cl_manage && !mhi_drv->status_cb)
1215                        goto exit_probe;
1216
1217                dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1218        }
1219
1220        /* Call the user provided probe function */
1221        ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1222        if (ret)
1223                goto exit_probe;
1224
1225        mhi_device_put(mhi_dev);
1226
1227        return ret;
1228
1229exit_probe:
1230        mhi_unprepare_from_transfer(mhi_dev);
1231
1232        mhi_device_put(mhi_dev);
1233
1234        return ret;
1235}
1236
1237static int mhi_driver_remove(struct device *dev)
1238{
1239        struct mhi_device *mhi_dev = to_mhi_device(dev);
1240        struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1241        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1242        struct mhi_chan *mhi_chan;
1243        enum mhi_ch_state ch_state[] = {
1244                MHI_CH_STATE_DISABLED,
1245                MHI_CH_STATE_DISABLED
1246        };
1247        int dir;
1248
1249        /* Skip if it is a controller device */
1250        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1251                return 0;
1252
1253        /* Reset both channels */
1254        for (dir = 0; dir < 2; dir++) {
1255                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1256
1257                if (!mhi_chan)
1258                        continue;
1259
1260                /* Wake all threads waiting for completion */
1261                write_lock_irq(&mhi_chan->lock);
1262                mhi_chan->ccs = MHI_EV_CC_INVALID;
1263                complete_all(&mhi_chan->completion);
1264                write_unlock_irq(&mhi_chan->lock);
1265
1266                /* Set the channel state to disabled */
1267                mutex_lock(&mhi_chan->mutex);
1268                write_lock_irq(&mhi_chan->lock);
1269                ch_state[dir] = mhi_chan->ch_state;
1270                mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1271                write_unlock_irq(&mhi_chan->lock);
1272
1273                /* Reset the non-offload channel */
1274                if (!mhi_chan->offload_ch)
1275                        mhi_reset_chan(mhi_cntrl, mhi_chan);
1276
1277                mutex_unlock(&mhi_chan->mutex);
1278        }
1279
1280        mhi_drv->remove(mhi_dev);
1281
1282        /* De-init channel if it was enabled */
1283        for (dir = 0; dir < 2; dir++) {
1284                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1285
1286                if (!mhi_chan)
1287                        continue;
1288
1289                mutex_lock(&mhi_chan->mutex);
1290
1291                if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
1292                    !mhi_chan->offload_ch)
1293                        mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1294
1295                mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1296
1297                mutex_unlock(&mhi_chan->mutex);
1298        }
1299
1300        while (mhi_dev->dev_wake)
1301                mhi_device_put(mhi_dev);
1302
1303        return 0;
1304}
1305
1306int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1307{
1308        struct device_driver *driver = &mhi_drv->driver;
1309
1310        if (!mhi_drv->probe || !mhi_drv->remove)
1311                return -EINVAL;
1312
1313        driver->bus = &mhi_bus_type;
1314        driver->owner = owner;
1315        driver->probe = mhi_driver_probe;
1316        driver->remove = mhi_driver_remove;
1317
1318        return driver_register(driver);
1319}
1320EXPORT_SYMBOL_GPL(__mhi_driver_register);
1321
1322void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1323{
1324        driver_unregister(&mhi_drv->driver);
1325}
1326EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1327
1328static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1329{
1330        struct mhi_device *mhi_dev = to_mhi_device(dev);
1331
1332        return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1333                                        mhi_dev->name);
1334}
1335
1336static int mhi_match(struct device *dev, struct device_driver *drv)
1337{
1338        struct mhi_device *mhi_dev = to_mhi_device(dev);
1339        struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1340        const struct mhi_device_id *id;
1341
1342        /*
1343         * If the device is a controller type then there is no client driver
1344         * associated with it
1345         */
1346        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1347                return 0;
1348
1349        for (id = mhi_drv->id_table; id->chan[0]; id++)
1350                if (!strcmp(mhi_dev->name, id->chan)) {
1351                        mhi_dev->id = id;
1352                        return 1;
1353                }
1354
1355        return 0;
1356};
1357
1358struct bus_type mhi_bus_type = {
1359        .name = "mhi",
1360        .dev_name = "mhi",
1361        .match = mhi_match,
1362        .uevent = mhi_uevent,
1363        .dev_groups = mhi_dev_groups,
1364};
1365
1366static int __init mhi_init(void)
1367{
1368        mhi_debugfs_init();
1369        return bus_register(&mhi_bus_type);
1370}
1371
1372static void __exit mhi_exit(void)
1373{
1374        mhi_debugfs_exit();
1375        bus_unregister(&mhi_bus_type);
1376}
1377
1378postcore_initcall(mhi_init);
1379module_exit(mhi_exit);
1380
1381MODULE_LICENSE("GPL v2");
1382MODULE_DESCRIPTION("MHI Host Interface");
1383