linux/drivers/bus/mhi/core/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
   4 *
   5 */
   6
   7#include <linux/debugfs.h>
   8#include <linux/device.h>
   9#include <linux/dma-direction.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/interrupt.h>
  12#include <linux/list.h>
  13#include <linux/mhi.h>
  14#include <linux/mod_devicetable.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/vmalloc.h>
  18#include <linux/wait.h>
  19#include "internal.h"
  20
  21const char * const mhi_ee_str[MHI_EE_MAX] = {
  22        [MHI_EE_PBL] = "PBL",
  23        [MHI_EE_SBL] = "SBL",
  24        [MHI_EE_AMSS] = "AMSS",
  25        [MHI_EE_RDDM] = "RDDM",
  26        [MHI_EE_WFW] = "WFW",
  27        [MHI_EE_PTHRU] = "PASS THRU",
  28        [MHI_EE_EDL] = "EDL",
  29        [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
  30        [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
  31};
  32
  33const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
  34        [DEV_ST_TRANSITION_PBL] = "PBL",
  35        [DEV_ST_TRANSITION_READY] = "READY",
  36        [DEV_ST_TRANSITION_SBL] = "SBL",
  37        [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
  38        [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
  39        [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
  40};
  41
  42const char * const mhi_state_str[MHI_STATE_MAX] = {
  43        [MHI_STATE_RESET] = "RESET",
  44        [MHI_STATE_READY] = "READY",
  45        [MHI_STATE_M0] = "M0",
  46        [MHI_STATE_M1] = "M1",
  47        [MHI_STATE_M2] = "M2",
  48        [MHI_STATE_M3] = "M3",
  49        [MHI_STATE_M3_FAST] = "M3_FAST",
  50        [MHI_STATE_BHI] = "BHI",
  51        [MHI_STATE_SYS_ERR] = "SYS_ERR",
  52};
  53
  54static const char * const mhi_pm_state_str[] = {
  55        [MHI_PM_STATE_DISABLE] = "DISABLE",
  56        [MHI_PM_STATE_POR] = "POR",
  57        [MHI_PM_STATE_M0] = "M0",
  58        [MHI_PM_STATE_M2] = "M2",
  59        [MHI_PM_STATE_M3_ENTER] = "M?->M3",
  60        [MHI_PM_STATE_M3] = "M3",
  61        [MHI_PM_STATE_M3_EXIT] = "M3->M0",
  62        [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
  63        [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
  64        [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
  65        [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
  66        [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
  67};
  68
  69const char *to_mhi_pm_state_str(enum mhi_pm_state state)
  70{
  71        int index = find_last_bit((unsigned long *)&state, 32);
  72
  73        if (index >= ARRAY_SIZE(mhi_pm_state_str))
  74                return "Invalid State";
  75
  76        return mhi_pm_state_str[index];
  77}
  78
  79static ssize_t serial_number_show(struct device *dev,
  80                                  struct device_attribute *attr,
  81                                  char *buf)
  82{
  83        struct mhi_device *mhi_dev = to_mhi_device(dev);
  84        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  85
  86        return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
  87                        mhi_cntrl->serial_number);
  88}
  89static DEVICE_ATTR_RO(serial_number);
  90
  91static ssize_t oem_pk_hash_show(struct device *dev,
  92                                struct device_attribute *attr,
  93                                char *buf)
  94{
  95        struct mhi_device *mhi_dev = to_mhi_device(dev);
  96        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
  97        int i, cnt = 0;
  98
  99        for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
 100                cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
 101                                "OEMPKHASH[%d]: 0x%x\n", i,
 102                                mhi_cntrl->oem_pk_hash[i]);
 103
 104        return cnt;
 105}
 106static DEVICE_ATTR_RO(oem_pk_hash);
 107
 108static struct attribute *mhi_dev_attrs[] = {
 109        &dev_attr_serial_number.attr,
 110        &dev_attr_oem_pk_hash.attr,
 111        NULL,
 112};
 113ATTRIBUTE_GROUPS(mhi_dev);
 114
 115/* MHI protocol requires the transfer ring to be aligned with ring length */
 116static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
 117                                  struct mhi_ring *ring,
 118                                  u64 len)
 119{
 120        ring->alloc_size = len + (len - 1);
 121        ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
 122                                               &ring->dma_handle, GFP_KERNEL);
 123        if (!ring->pre_aligned)
 124                return -ENOMEM;
 125
 126        ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
 127        ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
 128
 129        return 0;
 130}
 131
 132void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
 133{
 134        int i;
 135        struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 136
 137        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 138                if (mhi_event->offload_ev)
 139                        continue;
 140
 141                free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
 142        }
 143
 144        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
 145}
 146
 147int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
 148{
 149        struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
 150        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 151        int i, ret;
 152
 153        /* Setup BHI_INTVEC IRQ */
 154        ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
 155                                   mhi_intvec_threaded_handler,
 156                                   IRQF_SHARED | IRQF_NO_SUSPEND,
 157                                   "bhi", mhi_cntrl);
 158        if (ret)
 159                return ret;
 160
 161        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 162                if (mhi_event->offload_ev)
 163                        continue;
 164
 165                if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
 166                        dev_err(dev, "irq %d not available for event ring\n",
 167                                mhi_event->irq);
 168                        ret = -EINVAL;
 169                        goto error_request;
 170                }
 171
 172                ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
 173                                  mhi_irq_handler,
 174                                  IRQF_SHARED | IRQF_NO_SUSPEND,
 175                                  "mhi", mhi_event);
 176                if (ret) {
 177                        dev_err(dev, "Error requesting irq:%d for ev:%d\n",
 178                                mhi_cntrl->irq[mhi_event->irq], i);
 179                        goto error_request;
 180                }
 181        }
 182
 183        return 0;
 184
 185error_request:
 186        for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
 187                if (mhi_event->offload_ev)
 188                        continue;
 189
 190                free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
 191        }
 192        free_irq(mhi_cntrl->irq[0], mhi_cntrl);
 193
 194        return ret;
 195}
 196
 197void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
 198{
 199        int i;
 200        struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
 201        struct mhi_cmd *mhi_cmd;
 202        struct mhi_event *mhi_event;
 203        struct mhi_ring *ring;
 204
 205        mhi_cmd = mhi_cntrl->mhi_cmd;
 206        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
 207                ring = &mhi_cmd->ring;
 208                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 209                                  ring->pre_aligned, ring->dma_handle);
 210                ring->base = NULL;
 211                ring->iommu_base = 0;
 212        }
 213
 214        mhi_free_coherent(mhi_cntrl,
 215                          sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
 216                          mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
 217
 218        mhi_event = mhi_cntrl->mhi_event;
 219        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 220                if (mhi_event->offload_ev)
 221                        continue;
 222
 223                ring = &mhi_event->ring;
 224                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 225                                  ring->pre_aligned, ring->dma_handle);
 226                ring->base = NULL;
 227                ring->iommu_base = 0;
 228        }
 229
 230        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
 231                          mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
 232                          mhi_ctxt->er_ctxt_addr);
 233
 234        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
 235                          mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
 236                          mhi_ctxt->chan_ctxt_addr);
 237
 238        kfree(mhi_ctxt);
 239        mhi_cntrl->mhi_ctxt = NULL;
 240}
 241
 242int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
 243{
 244        struct mhi_ctxt *mhi_ctxt;
 245        struct mhi_chan_ctxt *chan_ctxt;
 246        struct mhi_event_ctxt *er_ctxt;
 247        struct mhi_cmd_ctxt *cmd_ctxt;
 248        struct mhi_chan *mhi_chan;
 249        struct mhi_event *mhi_event;
 250        struct mhi_cmd *mhi_cmd;
 251        u32 tmp;
 252        int ret = -ENOMEM, i;
 253
 254        atomic_set(&mhi_cntrl->dev_wake, 0);
 255        atomic_set(&mhi_cntrl->pending_pkts, 0);
 256
 257        mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
 258        if (!mhi_ctxt)
 259                return -ENOMEM;
 260
 261        /* Setup channel ctxt */
 262        mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
 263                                                 sizeof(*mhi_ctxt->chan_ctxt) *
 264                                                 mhi_cntrl->max_chan,
 265                                                 &mhi_ctxt->chan_ctxt_addr,
 266                                                 GFP_KERNEL);
 267        if (!mhi_ctxt->chan_ctxt)
 268                goto error_alloc_chan_ctxt;
 269
 270        mhi_chan = mhi_cntrl->mhi_chan;
 271        chan_ctxt = mhi_ctxt->chan_ctxt;
 272        for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
 273                /* Skip if it is an offload channel */
 274                if (mhi_chan->offload_ch)
 275                        continue;
 276
 277                tmp = chan_ctxt->chcfg;
 278                tmp &= ~CHAN_CTX_CHSTATE_MASK;
 279                tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
 280                tmp &= ~CHAN_CTX_BRSTMODE_MASK;
 281                tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
 282                tmp &= ~CHAN_CTX_POLLCFG_MASK;
 283                tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
 284                chan_ctxt->chcfg = tmp;
 285
 286                chan_ctxt->chtype = mhi_chan->type;
 287                chan_ctxt->erindex = mhi_chan->er_index;
 288
 289                mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
 290                mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
 291        }
 292
 293        /* Setup event context */
 294        mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
 295                                               sizeof(*mhi_ctxt->er_ctxt) *
 296                                               mhi_cntrl->total_ev_rings,
 297                                               &mhi_ctxt->er_ctxt_addr,
 298                                               GFP_KERNEL);
 299        if (!mhi_ctxt->er_ctxt)
 300                goto error_alloc_er_ctxt;
 301
 302        er_ctxt = mhi_ctxt->er_ctxt;
 303        mhi_event = mhi_cntrl->mhi_event;
 304        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
 305                     mhi_event++) {
 306                struct mhi_ring *ring = &mhi_event->ring;
 307
 308                /* Skip if it is an offload event */
 309                if (mhi_event->offload_ev)
 310                        continue;
 311
 312                tmp = er_ctxt->intmod;
 313                tmp &= ~EV_CTX_INTMODC_MASK;
 314                tmp &= ~EV_CTX_INTMODT_MASK;
 315                tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
 316                er_ctxt->intmod = tmp;
 317
 318                er_ctxt->ertype = MHI_ER_TYPE_VALID;
 319                er_ctxt->msivec = mhi_event->irq;
 320                mhi_event->db_cfg.db_mode = true;
 321
 322                ring->el_size = sizeof(struct mhi_tre);
 323                ring->len = ring->el_size * ring->elements;
 324                ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
 325                if (ret)
 326                        goto error_alloc_er;
 327
 328                /*
 329                 * If the read pointer equals to the write pointer, then the
 330                 * ring is empty
 331                 */
 332                ring->rp = ring->wp = ring->base;
 333                er_ctxt->rbase = ring->iommu_base;
 334                er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
 335                er_ctxt->rlen = ring->len;
 336                ring->ctxt_wp = &er_ctxt->wp;
 337        }
 338
 339        /* Setup cmd context */
 340        ret = -ENOMEM;
 341        mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
 342                                                sizeof(*mhi_ctxt->cmd_ctxt) *
 343                                                NR_OF_CMD_RINGS,
 344                                                &mhi_ctxt->cmd_ctxt_addr,
 345                                                GFP_KERNEL);
 346        if (!mhi_ctxt->cmd_ctxt)
 347                goto error_alloc_er;
 348
 349        mhi_cmd = mhi_cntrl->mhi_cmd;
 350        cmd_ctxt = mhi_ctxt->cmd_ctxt;
 351        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
 352                struct mhi_ring *ring = &mhi_cmd->ring;
 353
 354                ring->el_size = sizeof(struct mhi_tre);
 355                ring->elements = CMD_EL_PER_RING;
 356                ring->len = ring->el_size * ring->elements;
 357                ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
 358                if (ret)
 359                        goto error_alloc_cmd;
 360
 361                ring->rp = ring->wp = ring->base;
 362                cmd_ctxt->rbase = ring->iommu_base;
 363                cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
 364                cmd_ctxt->rlen = ring->len;
 365                ring->ctxt_wp = &cmd_ctxt->wp;
 366        }
 367
 368        mhi_cntrl->mhi_ctxt = mhi_ctxt;
 369
 370        return 0;
 371
 372error_alloc_cmd:
 373        for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
 374                struct mhi_ring *ring = &mhi_cmd->ring;
 375
 376                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 377                                  ring->pre_aligned, ring->dma_handle);
 378        }
 379        mhi_free_coherent(mhi_cntrl,
 380                          sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
 381                          mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
 382        i = mhi_cntrl->total_ev_rings;
 383        mhi_event = mhi_cntrl->mhi_event + i;
 384
 385error_alloc_er:
 386        for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
 387                struct mhi_ring *ring = &mhi_event->ring;
 388
 389                if (mhi_event->offload_ev)
 390                        continue;
 391
 392                mhi_free_coherent(mhi_cntrl, ring->alloc_size,
 393                                  ring->pre_aligned, ring->dma_handle);
 394        }
 395        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
 396                          mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
 397                          mhi_ctxt->er_ctxt_addr);
 398
 399error_alloc_er_ctxt:
 400        mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
 401                          mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
 402                          mhi_ctxt->chan_ctxt_addr);
 403
 404error_alloc_chan_ctxt:
 405        kfree(mhi_ctxt);
 406
 407        return ret;
 408}
 409
 410int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
 411{
 412        u32 val;
 413        int i, ret;
 414        struct mhi_chan *mhi_chan;
 415        struct mhi_event *mhi_event;
 416        void __iomem *base = mhi_cntrl->regs;
 417        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 418        struct {
 419                u32 offset;
 420                u32 mask;
 421                u32 shift;
 422                u32 val;
 423        } reg_info[] = {
 424                {
 425                        CCABAP_HIGHER, U32_MAX, 0,
 426                        upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
 427                },
 428                {
 429                        CCABAP_LOWER, U32_MAX, 0,
 430                        lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
 431                },
 432                {
 433                        ECABAP_HIGHER, U32_MAX, 0,
 434                        upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
 435                },
 436                {
 437                        ECABAP_LOWER, U32_MAX, 0,
 438                        lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
 439                },
 440                {
 441                        CRCBAP_HIGHER, U32_MAX, 0,
 442                        upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
 443                },
 444                {
 445                        CRCBAP_LOWER, U32_MAX, 0,
 446                        lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
 447                },
 448                {
 449                        MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
 450                        mhi_cntrl->total_ev_rings,
 451                },
 452                {
 453                        MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
 454                        mhi_cntrl->hw_ev_rings,
 455                },
 456                {
 457                        MHICTRLBASE_HIGHER, U32_MAX, 0,
 458                        upper_32_bits(mhi_cntrl->iova_start),
 459                },
 460                {
 461                        MHICTRLBASE_LOWER, U32_MAX, 0,
 462                        lower_32_bits(mhi_cntrl->iova_start),
 463                },
 464                {
 465                        MHIDATABASE_HIGHER, U32_MAX, 0,
 466                        upper_32_bits(mhi_cntrl->iova_start),
 467                },
 468                {
 469                        MHIDATABASE_LOWER, U32_MAX, 0,
 470                        lower_32_bits(mhi_cntrl->iova_start),
 471                },
 472                {
 473                        MHICTRLLIMIT_HIGHER, U32_MAX, 0,
 474                        upper_32_bits(mhi_cntrl->iova_stop),
 475                },
 476                {
 477                        MHICTRLLIMIT_LOWER, U32_MAX, 0,
 478                        lower_32_bits(mhi_cntrl->iova_stop),
 479                },
 480                {
 481                        MHIDATALIMIT_HIGHER, U32_MAX, 0,
 482                        upper_32_bits(mhi_cntrl->iova_stop),
 483                },
 484                {
 485                        MHIDATALIMIT_LOWER, U32_MAX, 0,
 486                        lower_32_bits(mhi_cntrl->iova_stop),
 487                },
 488                { 0, 0, 0 }
 489        };
 490
 491        dev_dbg(dev, "Initializing MHI registers\n");
 492
 493        /* Read channel db offset */
 494        ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
 495                                 CHDBOFF_CHDBOFF_SHIFT, &val);
 496        if (ret) {
 497                dev_err(dev, "Unable to read CHDBOFF register\n");
 498                return -EIO;
 499        }
 500
 501        /* Setup wake db */
 502        mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
 503        mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
 504        mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
 505        mhi_cntrl->wake_set = false;
 506
 507        /* Setup channel db address for each channel in tre_ring */
 508        mhi_chan = mhi_cntrl->mhi_chan;
 509        for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
 510                mhi_chan->tre_ring.db_addr = base + val;
 511
 512        /* Read event ring db offset */
 513        ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
 514                                 ERDBOFF_ERDBOFF_SHIFT, &val);
 515        if (ret) {
 516                dev_err(dev, "Unable to read ERDBOFF register\n");
 517                return -EIO;
 518        }
 519
 520        /* Setup event db address for each ev_ring */
 521        mhi_event = mhi_cntrl->mhi_event;
 522        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
 523                if (mhi_event->offload_ev)
 524                        continue;
 525
 526                mhi_event->ring.db_addr = base + val;
 527        }
 528
 529        /* Setup DB register for primary CMD rings */
 530        mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
 531
 532        /* Write to MMIO registers */
 533        for (i = 0; reg_info[i].offset; i++)
 534                mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
 535                                    reg_info[i].mask, reg_info[i].shift,
 536                                    reg_info[i].val);
 537
 538        return 0;
 539}
 540
 541void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
 542                          struct mhi_chan *mhi_chan)
 543{
 544        struct mhi_ring *buf_ring;
 545        struct mhi_ring *tre_ring;
 546        struct mhi_chan_ctxt *chan_ctxt;
 547
 548        buf_ring = &mhi_chan->buf_ring;
 549        tre_ring = &mhi_chan->tre_ring;
 550        chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
 551
 552        mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
 553                          tre_ring->pre_aligned, tre_ring->dma_handle);
 554        vfree(buf_ring->base);
 555
 556        buf_ring->base = tre_ring->base = NULL;
 557        chan_ctxt->rbase = 0;
 558}
 559
 560int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
 561                       struct mhi_chan *mhi_chan)
 562{
 563        struct mhi_ring *buf_ring;
 564        struct mhi_ring *tre_ring;
 565        struct mhi_chan_ctxt *chan_ctxt;
 566        u32 tmp;
 567        int ret;
 568
 569        buf_ring = &mhi_chan->buf_ring;
 570        tre_ring = &mhi_chan->tre_ring;
 571        tre_ring->el_size = sizeof(struct mhi_tre);
 572        tre_ring->len = tre_ring->el_size * tre_ring->elements;
 573        chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
 574        ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
 575        if (ret)
 576                return -ENOMEM;
 577
 578        buf_ring->el_size = sizeof(struct mhi_buf_info);
 579        buf_ring->len = buf_ring->el_size * buf_ring->elements;
 580        buf_ring->base = vzalloc(buf_ring->len);
 581
 582        if (!buf_ring->base) {
 583                mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
 584                                  tre_ring->pre_aligned, tre_ring->dma_handle);
 585                return -ENOMEM;
 586        }
 587
 588        tmp = chan_ctxt->chcfg;
 589        tmp &= ~CHAN_CTX_CHSTATE_MASK;
 590        tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
 591        chan_ctxt->chcfg = tmp;
 592
 593        chan_ctxt->rbase = tre_ring->iommu_base;
 594        chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
 595        chan_ctxt->rlen = tre_ring->len;
 596        tre_ring->ctxt_wp = &chan_ctxt->wp;
 597
 598        tre_ring->rp = tre_ring->wp = tre_ring->base;
 599        buf_ring->rp = buf_ring->wp = buf_ring->base;
 600        mhi_chan->db_cfg.db_mode = 1;
 601
 602        /* Update to all cores */
 603        smp_wmb();
 604
 605        return 0;
 606}
 607
 608static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
 609                        const struct mhi_controller_config *config)
 610{
 611        struct mhi_event *mhi_event;
 612        const struct mhi_event_config *event_cfg;
 613        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 614        int i, num;
 615
 616        num = config->num_events;
 617        mhi_cntrl->total_ev_rings = num;
 618        mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
 619                                       GFP_KERNEL);
 620        if (!mhi_cntrl->mhi_event)
 621                return -ENOMEM;
 622
 623        /* Populate event ring */
 624        mhi_event = mhi_cntrl->mhi_event;
 625        for (i = 0; i < num; i++) {
 626                event_cfg = &config->event_cfg[i];
 627
 628                mhi_event->er_index = i;
 629                mhi_event->ring.elements = event_cfg->num_elements;
 630                mhi_event->intmod = event_cfg->irq_moderation_ms;
 631                mhi_event->irq = event_cfg->irq;
 632
 633                if (event_cfg->channel != U32_MAX) {
 634                        /* This event ring has a dedicated channel */
 635                        mhi_event->chan = event_cfg->channel;
 636                        if (mhi_event->chan >= mhi_cntrl->max_chan) {
 637                                dev_err(dev,
 638                                        "Event Ring channel not available\n");
 639                                goto error_ev_cfg;
 640                        }
 641
 642                        mhi_event->mhi_chan =
 643                                &mhi_cntrl->mhi_chan[mhi_event->chan];
 644                }
 645
 646                /* Priority is fixed to 1 for now */
 647                mhi_event->priority = 1;
 648
 649                mhi_event->db_cfg.brstmode = event_cfg->mode;
 650                if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
 651                        goto error_ev_cfg;
 652
 653                if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
 654                        mhi_event->db_cfg.process_db = mhi_db_brstmode;
 655                else
 656                        mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
 657
 658                mhi_event->data_type = event_cfg->data_type;
 659
 660                switch (mhi_event->data_type) {
 661                case MHI_ER_DATA:
 662                        mhi_event->process_event = mhi_process_data_event_ring;
 663                        break;
 664                case MHI_ER_CTRL:
 665                        mhi_event->process_event = mhi_process_ctrl_ev_ring;
 666                        break;
 667                default:
 668                        dev_err(dev, "Event Ring type not supported\n");
 669                        goto error_ev_cfg;
 670                }
 671
 672                mhi_event->hw_ring = event_cfg->hardware_event;
 673                if (mhi_event->hw_ring)
 674                        mhi_cntrl->hw_ev_rings++;
 675                else
 676                        mhi_cntrl->sw_ev_rings++;
 677
 678                mhi_event->cl_manage = event_cfg->client_managed;
 679                mhi_event->offload_ev = event_cfg->offload_channel;
 680                mhi_event++;
 681        }
 682
 683        return 0;
 684
 685error_ev_cfg:
 686
 687        kfree(mhi_cntrl->mhi_event);
 688        return -EINVAL;
 689}
 690
 691static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
 692                        const struct mhi_controller_config *config)
 693{
 694        const struct mhi_channel_config *ch_cfg;
 695        struct device *dev = &mhi_cntrl->mhi_dev->dev;
 696        int i;
 697        u32 chan;
 698
 699        mhi_cntrl->max_chan = config->max_channels;
 700
 701        /*
 702         * The allocation of MHI channels can exceed 32KB in some scenarios,
 703         * so to avoid any memory possible allocation failures, vzalloc is
 704         * used here
 705         */
 706        mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
 707                                      sizeof(*mhi_cntrl->mhi_chan));
 708        if (!mhi_cntrl->mhi_chan)
 709                return -ENOMEM;
 710
 711        INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
 712
 713        /* Populate channel configurations */
 714        for (i = 0; i < config->num_channels; i++) {
 715                struct mhi_chan *mhi_chan;
 716
 717                ch_cfg = &config->ch_cfg[i];
 718
 719                chan = ch_cfg->num;
 720                if (chan >= mhi_cntrl->max_chan) {
 721                        dev_err(dev, "Channel %d not available\n", chan);
 722                        goto error_chan_cfg;
 723                }
 724
 725                mhi_chan = &mhi_cntrl->mhi_chan[chan];
 726                mhi_chan->name = ch_cfg->name;
 727                mhi_chan->chan = chan;
 728
 729                mhi_chan->tre_ring.elements = ch_cfg->num_elements;
 730                if (!mhi_chan->tre_ring.elements)
 731                        goto error_chan_cfg;
 732
 733                /*
 734                 * For some channels, local ring length should be bigger than
 735                 * the transfer ring length due to internal logical channels
 736                 * in device. So host can queue much more buffers than transfer
 737                 * ring length. Example, RSC channels should have a larger local
 738                 * channel length than transfer ring length.
 739                 */
 740                mhi_chan->buf_ring.elements = ch_cfg->local_elements;
 741                if (!mhi_chan->buf_ring.elements)
 742                        mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
 743                mhi_chan->er_index = ch_cfg->event_ring;
 744                mhi_chan->dir = ch_cfg->dir;
 745
 746                /*
 747                 * For most channels, chtype is identical to channel directions.
 748                 * So, if it is not defined then assign channel direction to
 749                 * chtype
 750                 */
 751                mhi_chan->type = ch_cfg->type;
 752                if (!mhi_chan->type)
 753                        mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
 754
 755                mhi_chan->ee_mask = ch_cfg->ee_mask;
 756                mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
 757                mhi_chan->lpm_notify = ch_cfg->lpm_notify;
 758                mhi_chan->offload_ch = ch_cfg->offload_channel;
 759                mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
 760                mhi_chan->pre_alloc = ch_cfg->auto_queue;
 761                mhi_chan->auto_start = ch_cfg->auto_start;
 762
 763                /*
 764                 * If MHI host allocates buffers, then the channel direction
 765                 * should be DMA_FROM_DEVICE
 766                 */
 767                if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
 768                        dev_err(dev, "Invalid channel configuration\n");
 769                        goto error_chan_cfg;
 770                }
 771
 772                /*
 773                 * Bi-directional and direction less channel must be an
 774                 * offload channel
 775                 */
 776                if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
 777                     mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
 778                        dev_err(dev, "Invalid channel configuration\n");
 779                        goto error_chan_cfg;
 780                }
 781
 782                if (!mhi_chan->offload_ch) {
 783                        mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
 784                        if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
 785                                dev_err(dev, "Invalid Door bell mode\n");
 786                                goto error_chan_cfg;
 787                        }
 788                }
 789
 790                if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
 791                        mhi_chan->db_cfg.process_db = mhi_db_brstmode;
 792                else
 793                        mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
 794
 795                mhi_chan->configured = true;
 796
 797                if (mhi_chan->lpm_notify)
 798                        list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
 799        }
 800
 801        return 0;
 802
 803error_chan_cfg:
 804        vfree(mhi_cntrl->mhi_chan);
 805
 806        return -EINVAL;
 807}
 808
 809static int parse_config(struct mhi_controller *mhi_cntrl,
 810                        const struct mhi_controller_config *config)
 811{
 812        int ret;
 813
 814        /* Parse MHI channel configuration */
 815        ret = parse_ch_cfg(mhi_cntrl, config);
 816        if (ret)
 817                return ret;
 818
 819        /* Parse MHI event configuration */
 820        ret = parse_ev_cfg(mhi_cntrl, config);
 821        if (ret)
 822                goto error_ev_cfg;
 823
 824        mhi_cntrl->timeout_ms = config->timeout_ms;
 825        if (!mhi_cntrl->timeout_ms)
 826                mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
 827
 828        mhi_cntrl->bounce_buf = config->use_bounce_buf;
 829        mhi_cntrl->buffer_len = config->buf_len;
 830        if (!mhi_cntrl->buffer_len)
 831                mhi_cntrl->buffer_len = MHI_MAX_MTU;
 832
 833        /* By default, host is allowed to ring DB in both M0 and M2 states */
 834        mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
 835        if (config->m2_no_db)
 836                mhi_cntrl->db_access &= ~MHI_PM_M2;
 837
 838        return 0;
 839
 840error_ev_cfg:
 841        vfree(mhi_cntrl->mhi_chan);
 842
 843        return ret;
 844}
 845
 846int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 847                            const struct mhi_controller_config *config)
 848{
 849        struct mhi_event *mhi_event;
 850        struct mhi_chan *mhi_chan;
 851        struct mhi_cmd *mhi_cmd;
 852        struct mhi_device *mhi_dev;
 853        u32 soc_info;
 854        int ret, i;
 855
 856        if (!mhi_cntrl)
 857                return -EINVAL;
 858
 859        if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
 860            !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
 861            !mhi_cntrl->write_reg)
 862                return -EINVAL;
 863
 864        ret = parse_config(mhi_cntrl, config);
 865        if (ret)
 866                return -EINVAL;
 867
 868        mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
 869                                     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
 870        if (!mhi_cntrl->mhi_cmd) {
 871                ret = -ENOMEM;
 872                goto error_alloc_cmd;
 873        }
 874
 875        INIT_LIST_HEAD(&mhi_cntrl->transition_list);
 876        mutex_init(&mhi_cntrl->pm_mutex);
 877        rwlock_init(&mhi_cntrl->pm_lock);
 878        spin_lock_init(&mhi_cntrl->transition_lock);
 879        spin_lock_init(&mhi_cntrl->wlock);
 880        INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
 881        init_waitqueue_head(&mhi_cntrl->state_event);
 882
 883        mhi_cmd = mhi_cntrl->mhi_cmd;
 884        for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
 885                spin_lock_init(&mhi_cmd->lock);
 886
 887        mhi_event = mhi_cntrl->mhi_event;
 888        for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 889                /* Skip for offload events */
 890                if (mhi_event->offload_ev)
 891                        continue;
 892
 893                mhi_event->mhi_cntrl = mhi_cntrl;
 894                spin_lock_init(&mhi_event->lock);
 895                if (mhi_event->data_type == MHI_ER_CTRL)
 896                        tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
 897                                     (ulong)mhi_event);
 898                else
 899                        tasklet_init(&mhi_event->task, mhi_ev_task,
 900                                     (ulong)mhi_event);
 901        }
 902
 903        mhi_chan = mhi_cntrl->mhi_chan;
 904        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 905                mutex_init(&mhi_chan->mutex);
 906                init_completion(&mhi_chan->completion);
 907                rwlock_init(&mhi_chan->lock);
 908
 909                /* used in setting bei field of TRE */
 910                mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
 911                mhi_chan->intmod = mhi_event->intmod;
 912        }
 913
 914        if (mhi_cntrl->bounce_buf) {
 915                mhi_cntrl->map_single = mhi_map_single_use_bb;
 916                mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
 917        } else {
 918                mhi_cntrl->map_single = mhi_map_single_no_bb;
 919                mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
 920        }
 921
 922        /* Read the MHI device info */
 923        ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
 924                           SOC_HW_VERSION_OFFS, &soc_info);
 925        if (ret)
 926                goto error_alloc_dev;
 927
 928        mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
 929                                        SOC_HW_VERSION_FAM_NUM_SHFT;
 930        mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
 931                                        SOC_HW_VERSION_DEV_NUM_SHFT;
 932        mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
 933                                        SOC_HW_VERSION_MAJOR_VER_SHFT;
 934        mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
 935                                        SOC_HW_VERSION_MINOR_VER_SHFT;
 936
 937        /* Register controller with MHI bus */
 938        mhi_dev = mhi_alloc_device(mhi_cntrl);
 939        if (IS_ERR(mhi_dev)) {
 940                dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
 941                ret = PTR_ERR(mhi_dev);
 942                goto error_alloc_dev;
 943        }
 944
 945        mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
 946        mhi_dev->mhi_cntrl = mhi_cntrl;
 947        dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
 948        mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev);
 949
 950        /* Init wakeup source */
 951        device_init_wakeup(&mhi_dev->dev, true);
 952
 953        ret = device_add(&mhi_dev->dev);
 954        if (ret)
 955                goto error_add_dev;
 956
 957        mhi_cntrl->mhi_dev = mhi_dev;
 958
 959        mhi_create_debugfs(mhi_cntrl);
 960
 961        return 0;
 962
 963error_add_dev:
 964        put_device(&mhi_dev->dev);
 965
 966error_alloc_dev:
 967        kfree(mhi_cntrl->mhi_cmd);
 968
 969error_alloc_cmd:
 970        vfree(mhi_cntrl->mhi_chan);
 971        kfree(mhi_cntrl->mhi_event);
 972
 973        return ret;
 974}
 975EXPORT_SYMBOL_GPL(mhi_register_controller);
 976
 977void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
 978{
 979        struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
 980        struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
 981        unsigned int i;
 982
 983        mhi_destroy_debugfs(mhi_cntrl);
 984
 985        kfree(mhi_cntrl->mhi_cmd);
 986        kfree(mhi_cntrl->mhi_event);
 987
 988        /* Drop the references to MHI devices created for channels */
 989        for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
 990                if (!mhi_chan->mhi_dev)
 991                        continue;
 992
 993                put_device(&mhi_chan->mhi_dev->dev);
 994        }
 995        vfree(mhi_cntrl->mhi_chan);
 996
 997        device_del(&mhi_dev->dev);
 998        put_device(&mhi_dev->dev);
 999}
1000EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1001
1002struct mhi_controller *mhi_alloc_controller(void)
1003{
1004        struct mhi_controller *mhi_cntrl;
1005
1006        mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1007
1008        return mhi_cntrl;
1009}
1010EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1011
1012void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1013{
1014        kfree(mhi_cntrl);
1015}
1016EXPORT_SYMBOL_GPL(mhi_free_controller);
1017
1018int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1019{
1020        struct device *dev = &mhi_cntrl->mhi_dev->dev;
1021        u32 bhie_off;
1022        int ret;
1023
1024        mutex_lock(&mhi_cntrl->pm_mutex);
1025
1026        ret = mhi_init_dev_ctxt(mhi_cntrl);
1027        if (ret)
1028                goto error_dev_ctxt;
1029
1030        /*
1031         * Allocate RDDM table if specified, this table is for debugging purpose
1032         */
1033        if (mhi_cntrl->rddm_size) {
1034                mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1035                                     mhi_cntrl->rddm_size);
1036
1037                /*
1038                 * This controller supports RDDM, so we need to manually clear
1039                 * BHIE RX registers since POR values are undefined.
1040                 */
1041                ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1042                                   &bhie_off);
1043                if (ret) {
1044                        dev_err(dev, "Error getting BHIE offset\n");
1045                        goto bhie_error;
1046                }
1047
1048                mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1049                memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1050                          0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1051                          4);
1052
1053                if (mhi_cntrl->rddm_image)
1054                        mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
1055        }
1056
1057        mhi_cntrl->pre_init = true;
1058
1059        mutex_unlock(&mhi_cntrl->pm_mutex);
1060
1061        return 0;
1062
1063bhie_error:
1064        if (mhi_cntrl->rddm_image) {
1065                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1066                mhi_cntrl->rddm_image = NULL;
1067        }
1068
1069error_dev_ctxt:
1070        mutex_unlock(&mhi_cntrl->pm_mutex);
1071
1072        return ret;
1073}
1074EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1075
1076void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1077{
1078        if (mhi_cntrl->fbc_image) {
1079                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1080                mhi_cntrl->fbc_image = NULL;
1081        }
1082
1083        if (mhi_cntrl->rddm_image) {
1084                mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1085                mhi_cntrl->rddm_image = NULL;
1086        }
1087
1088        mhi_deinit_dev_ctxt(mhi_cntrl);
1089        mhi_cntrl->pre_init = false;
1090}
1091EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1092
1093static void mhi_release_device(struct device *dev)
1094{
1095        struct mhi_device *mhi_dev = to_mhi_device(dev);
1096
1097        /*
1098         * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1099         * devices for the channels will only get created if the mhi_dev
1100         * associated with it is NULL. This scenario will happen during the
1101         * controller suspend and resume.
1102         */
1103        if (mhi_dev->ul_chan)
1104                mhi_dev->ul_chan->mhi_dev = NULL;
1105
1106        if (mhi_dev->dl_chan)
1107                mhi_dev->dl_chan->mhi_dev = NULL;
1108
1109        kfree(mhi_dev);
1110}
1111
1112struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1113{
1114        struct mhi_device *mhi_dev;
1115        struct device *dev;
1116
1117        mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1118        if (!mhi_dev)
1119                return ERR_PTR(-ENOMEM);
1120
1121        dev = &mhi_dev->dev;
1122        device_initialize(dev);
1123        dev->bus = &mhi_bus_type;
1124        dev->release = mhi_release_device;
1125        dev->parent = mhi_cntrl->cntrl_dev;
1126        mhi_dev->mhi_cntrl = mhi_cntrl;
1127        mhi_dev->dev_wake = 0;
1128
1129        return mhi_dev;
1130}
1131
1132static int mhi_driver_probe(struct device *dev)
1133{
1134        struct mhi_device *mhi_dev = to_mhi_device(dev);
1135        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1136        struct device_driver *drv = dev->driver;
1137        struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1138        struct mhi_event *mhi_event;
1139        struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1140        struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1141        int ret;
1142
1143        /* Bring device out of LPM */
1144        ret = mhi_device_get_sync(mhi_dev);
1145        if (ret)
1146                return ret;
1147
1148        ret = -EINVAL;
1149
1150        if (ul_chan) {
1151                /*
1152                 * If channel supports LPM notifications then status_cb should
1153                 * be provided
1154                 */
1155                if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1156                        goto exit_probe;
1157
1158                /* For non-offload channels then xfer_cb should be provided */
1159                if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1160                        goto exit_probe;
1161
1162                ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1163                if (ul_chan->auto_start) {
1164                        ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
1165                        if (ret)
1166                                goto exit_probe;
1167                }
1168        }
1169
1170        ret = -EINVAL;
1171        if (dl_chan) {
1172                /*
1173                 * If channel supports LPM notifications then status_cb should
1174                 * be provided
1175                 */
1176                if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1177                        goto exit_probe;
1178
1179                /* For non-offload channels then xfer_cb should be provided */
1180                if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1181                        goto exit_probe;
1182
1183                mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1184
1185                /*
1186                 * If the channel event ring is managed by client, then
1187                 * status_cb must be provided so that the framework can
1188                 * notify pending data
1189                 */
1190                if (mhi_event->cl_manage && !mhi_drv->status_cb)
1191                        goto exit_probe;
1192
1193                dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1194        }
1195
1196        /* Call the user provided probe function */
1197        ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1198        if (ret)
1199                goto exit_probe;
1200
1201        if (dl_chan && dl_chan->auto_start)
1202                mhi_prepare_channel(mhi_cntrl, dl_chan);
1203
1204        mhi_device_put(mhi_dev);
1205
1206        return ret;
1207
1208exit_probe:
1209        mhi_unprepare_from_transfer(mhi_dev);
1210
1211        mhi_device_put(mhi_dev);
1212
1213        return ret;
1214}
1215
1216static int mhi_driver_remove(struct device *dev)
1217{
1218        struct mhi_device *mhi_dev = to_mhi_device(dev);
1219        struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1220        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1221        struct mhi_chan *mhi_chan;
1222        enum mhi_ch_state ch_state[] = {
1223                MHI_CH_STATE_DISABLED,
1224                MHI_CH_STATE_DISABLED
1225        };
1226        int dir;
1227
1228        /* Skip if it is a controller device */
1229        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1230                return 0;
1231
1232        /* Reset both channels */
1233        for (dir = 0; dir < 2; dir++) {
1234                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1235
1236                if (!mhi_chan)
1237                        continue;
1238
1239                /* Wake all threads waiting for completion */
1240                write_lock_irq(&mhi_chan->lock);
1241                mhi_chan->ccs = MHI_EV_CC_INVALID;
1242                complete_all(&mhi_chan->completion);
1243                write_unlock_irq(&mhi_chan->lock);
1244
1245                /* Set the channel state to disabled */
1246                mutex_lock(&mhi_chan->mutex);
1247                write_lock_irq(&mhi_chan->lock);
1248                ch_state[dir] = mhi_chan->ch_state;
1249                mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1250                write_unlock_irq(&mhi_chan->lock);
1251
1252                /* Reset the non-offload channel */
1253                if (!mhi_chan->offload_ch)
1254                        mhi_reset_chan(mhi_cntrl, mhi_chan);
1255
1256                mutex_unlock(&mhi_chan->mutex);
1257        }
1258
1259        mhi_drv->remove(mhi_dev);
1260
1261        /* De-init channel if it was enabled */
1262        for (dir = 0; dir < 2; dir++) {
1263                mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1264
1265                if (!mhi_chan)
1266                        continue;
1267
1268                mutex_lock(&mhi_chan->mutex);
1269
1270                if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
1271                    !mhi_chan->offload_ch)
1272                        mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1273
1274                mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1275
1276                mutex_unlock(&mhi_chan->mutex);
1277        }
1278
1279        read_lock_bh(&mhi_cntrl->pm_lock);
1280        while (mhi_dev->dev_wake)
1281                mhi_device_put(mhi_dev);
1282        read_unlock_bh(&mhi_cntrl->pm_lock);
1283
1284        return 0;
1285}
1286
1287int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1288{
1289        struct device_driver *driver = &mhi_drv->driver;
1290
1291        if (!mhi_drv->probe || !mhi_drv->remove)
1292                return -EINVAL;
1293
1294        driver->bus = &mhi_bus_type;
1295        driver->owner = owner;
1296        driver->probe = mhi_driver_probe;
1297        driver->remove = mhi_driver_remove;
1298
1299        return driver_register(driver);
1300}
1301EXPORT_SYMBOL_GPL(__mhi_driver_register);
1302
1303void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1304{
1305        driver_unregister(&mhi_drv->driver);
1306}
1307EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1308
1309static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1310{
1311        struct mhi_device *mhi_dev = to_mhi_device(dev);
1312
1313        return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1314                                        mhi_dev->name);
1315}
1316
1317static int mhi_match(struct device *dev, struct device_driver *drv)
1318{
1319        struct mhi_device *mhi_dev = to_mhi_device(dev);
1320        struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1321        const struct mhi_device_id *id;
1322
1323        /*
1324         * If the device is a controller type then there is no client driver
1325         * associated with it
1326         */
1327        if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1328                return 0;
1329
1330        for (id = mhi_drv->id_table; id->chan[0]; id++)
1331                if (!strcmp(mhi_dev->name, id->chan)) {
1332                        mhi_dev->id = id;
1333                        return 1;
1334                }
1335
1336        return 0;
1337};
1338
1339struct bus_type mhi_bus_type = {
1340        .name = "mhi",
1341        .dev_name = "mhi",
1342        .match = mhi_match,
1343        .uevent = mhi_uevent,
1344        .dev_groups = mhi_dev_groups,
1345};
1346
1347static int __init mhi_init(void)
1348{
1349        mhi_debugfs_init();
1350        return bus_register(&mhi_bus_type);
1351}
1352
1353static void __exit mhi_exit(void)
1354{
1355        mhi_debugfs_exit();
1356        bus_unregister(&mhi_bus_type);
1357}
1358
1359postcore_initcall(mhi_init);
1360module_exit(mhi_exit);
1361
1362MODULE_LICENSE("GPL v2");
1363MODULE_DESCRIPTION("MHI Host Interface");
1364