linux/drivers/mmc/host/cqhci-core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/delay.h>
   6#include <linux/highmem.h>
   7#include <linux/io.h>
   8#include <linux/iopoll.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/slab.h>
  12#include <linux/scatterlist.h>
  13#include <linux/platform_device.h>
  14#include <linux/ktime.h>
  15
  16#include <linux/mmc/mmc.h>
  17#include <linux/mmc/host.h>
  18#include <linux/mmc/card.h>
  19
  20#include "cqhci.h"
  21#include "cqhci-crypto.h"
  22
  23#define DCMD_SLOT 31
  24#define NUM_SLOTS 32
  25
  26struct cqhci_slot {
  27        struct mmc_request *mrq;
  28        unsigned int flags;
  29#define CQHCI_EXTERNAL_TIMEOUT  BIT(0)
  30#define CQHCI_COMPLETED         BIT(1)
  31#define CQHCI_HOST_CRC          BIT(2)
  32#define CQHCI_HOST_TIMEOUT      BIT(3)
  33#define CQHCI_HOST_OTHER        BIT(4)
  34};
  35
  36static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  37{
  38        return cq_host->desc_base + (tag * cq_host->slot_sz);
  39}
  40
  41static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  42{
  43        u8 *desc = get_desc(cq_host, tag);
  44
  45        return desc + cq_host->task_desc_len;
  46}
  47
  48static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
  49{
  50        return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
  51}
  52
  53static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  54{
  55        size_t offset = get_trans_desc_offset(cq_host, tag);
  56
  57        return cq_host->trans_desc_dma_base + offset;
  58}
  59
  60static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  61{
  62        size_t offset = get_trans_desc_offset(cq_host, tag);
  63
  64        return cq_host->trans_desc_base + offset;
  65}
  66
  67static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  68{
  69        u8 *link_temp;
  70        dma_addr_t trans_temp;
  71
  72        link_temp = get_link_desc(cq_host, tag);
  73        trans_temp = get_trans_desc_dma(cq_host, tag);
  74
  75        memset(link_temp, 0, cq_host->link_desc_len);
  76        if (cq_host->link_desc_len > 8)
  77                *(link_temp + 8) = 0;
  78
  79        if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  80                *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  81                return;
  82        }
  83
  84        *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  85
  86        if (cq_host->dma64) {
  87                __le64 *data_addr = (__le64 __force *)(link_temp + 4);
  88
  89                data_addr[0] = cpu_to_le64(trans_temp);
  90        } else {
  91                __le32 *data_addr = (__le32 __force *)(link_temp + 4);
  92
  93                data_addr[0] = cpu_to_le32(trans_temp);
  94        }
  95}
  96
  97static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
  98{
  99        cqhci_writel(cq_host, set, CQHCI_ISTE);
 100        cqhci_writel(cq_host, set, CQHCI_ISGE);
 101}
 102
 103#define DRV_NAME "cqhci"
 104
 105#define CQHCI_DUMP(f, x...) \
 106        pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
 107
 108static void cqhci_dumpregs(struct cqhci_host *cq_host)
 109{
 110        struct mmc_host *mmc = cq_host->mmc;
 111
 112        CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 113
 114        CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 115                   cqhci_readl(cq_host, CQHCI_CAP),
 116                   cqhci_readl(cq_host, CQHCI_VER));
 117        CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 118                   cqhci_readl(cq_host, CQHCI_CFG),
 119                   cqhci_readl(cq_host, CQHCI_CTL));
 120        CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 121                   cqhci_readl(cq_host, CQHCI_IS),
 122                   cqhci_readl(cq_host, CQHCI_ISTE));
 123        CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 124                   cqhci_readl(cq_host, CQHCI_ISGE),
 125                   cqhci_readl(cq_host, CQHCI_IC));
 126        CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 127                   cqhci_readl(cq_host, CQHCI_TDLBA),
 128                   cqhci_readl(cq_host, CQHCI_TDLBAU));
 129        CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 130                   cqhci_readl(cq_host, CQHCI_TDBR),
 131                   cqhci_readl(cq_host, CQHCI_TCN));
 132        CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 133                   cqhci_readl(cq_host, CQHCI_DQS),
 134                   cqhci_readl(cq_host, CQHCI_DPT));
 135        CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 136                   cqhci_readl(cq_host, CQHCI_TCLR),
 137                   cqhci_readl(cq_host, CQHCI_SSC1));
 138        CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 139                   cqhci_readl(cq_host, CQHCI_SSC2),
 140                   cqhci_readl(cq_host, CQHCI_CRDCT));
 141        CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 142                   cqhci_readl(cq_host, CQHCI_RMEM),
 143                   cqhci_readl(cq_host, CQHCI_TERRI));
 144        CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 145                   cqhci_readl(cq_host, CQHCI_CRI),
 146                   cqhci_readl(cq_host, CQHCI_CRA));
 147
 148        if (cq_host->ops->dumpregs)
 149                cq_host->ops->dumpregs(mmc);
 150        else
 151                CQHCI_DUMP(": ===========================================\n");
 152}
 153
 154/*
 155 * The allocated descriptor table for task, link & transfer descriptors
 156 * looks like:
 157 * |----------|
 158 * |task desc |  |->|----------|
 159 * |----------|  |  |trans desc|
 160 * |link desc-|->|  |----------|
 161 * |----------|          .
 162 *      .                .
 163 *  no. of slots      max-segs
 164 *      .           |----------|
 165 * |----------|
 166 * The idea here is to create the [task+trans] table and mark & point the
 167 * link desc to the transfer desc table on a per slot basis.
 168 */
 169static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 170{
 171        int i = 0;
 172
 173        /* task descriptor can be 64/128 bit irrespective of arch */
 174        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 175                cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 176                               CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 177                cq_host->task_desc_len = 16;
 178        } else {
 179                cq_host->task_desc_len = 8;
 180        }
 181
 182        /*
 183         * 96 bits length of transfer desc instead of 128 bits which means
 184         * ADMA would expect next valid descriptor at the 96th bit
 185         * or 128th bit
 186         */
 187        if (cq_host->dma64) {
 188                if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 189                        cq_host->trans_desc_len = 12;
 190                else
 191                        cq_host->trans_desc_len = 16;
 192                cq_host->link_desc_len = 16;
 193        } else {
 194                cq_host->trans_desc_len = 8;
 195                cq_host->link_desc_len = 8;
 196        }
 197
 198        /* total size of a slot: 1 task & 1 transfer (link) */
 199        cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 200
 201        cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 202
 203        cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
 204
 205        pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 206                 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 207                 cq_host->slot_sz);
 208
 209        /*
 210         * allocate a dma-mapped chunk of memory for the descriptors
 211         * allocate a dma-mapped chunk of memory for link descriptors
 212         * setup each link-desc memory offset per slot-number to
 213         * the descriptor table.
 214         */
 215        cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 216                                                 cq_host->desc_size,
 217                                                 &cq_host->desc_dma_base,
 218                                                 GFP_KERNEL);
 219        if (!cq_host->desc_base)
 220                return -ENOMEM;
 221
 222        cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 223                                              cq_host->data_size,
 224                                              &cq_host->trans_desc_dma_base,
 225                                              GFP_KERNEL);
 226        if (!cq_host->trans_desc_base) {
 227                dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
 228                                   cq_host->desc_base,
 229                                   cq_host->desc_dma_base);
 230                cq_host->desc_base = NULL;
 231                cq_host->desc_dma_base = 0;
 232                return -ENOMEM;
 233        }
 234
 235        pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 236                 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 237                (unsigned long long)cq_host->desc_dma_base,
 238                (unsigned long long)cq_host->trans_desc_dma_base);
 239
 240        for (; i < (cq_host->num_slots); i++)
 241                setup_trans_desc(cq_host, i);
 242
 243        return 0;
 244}
 245
 246static void __cqhci_enable(struct cqhci_host *cq_host)
 247{
 248        struct mmc_host *mmc = cq_host->mmc;
 249        u32 cqcfg;
 250
 251        cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 252
 253        /* Configuration must not be changed while enabled */
 254        if (cqcfg & CQHCI_ENABLE) {
 255                cqcfg &= ~CQHCI_ENABLE;
 256                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 257        }
 258
 259        cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 260
 261        if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 262                cqcfg |= CQHCI_DCMD;
 263
 264        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 265                cqcfg |= CQHCI_TASK_DESC_SZ;
 266
 267        if (mmc->caps2 & MMC_CAP2_CRYPTO)
 268                cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
 269
 270        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 271
 272        cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 273                     CQHCI_TDLBA);
 274        cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 275                     CQHCI_TDLBAU);
 276
 277        cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 278
 279        cqhci_set_irqs(cq_host, 0);
 280
 281        cqcfg |= CQHCI_ENABLE;
 282
 283        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 284
 285        if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
 286                cqhci_writel(cq_host, 0, CQHCI_CTL);
 287
 288        mmc->cqe_on = true;
 289
 290        if (cq_host->ops->enable)
 291                cq_host->ops->enable(mmc);
 292
 293        /* Ensure all writes are done before interrupts are enabled */
 294        wmb();
 295
 296        cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 297
 298        cq_host->activated = true;
 299}
 300
 301static void __cqhci_disable(struct cqhci_host *cq_host)
 302{
 303        u32 cqcfg;
 304
 305        cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 306        cqcfg &= ~CQHCI_ENABLE;
 307        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 308
 309        cq_host->mmc->cqe_on = false;
 310
 311        cq_host->activated = false;
 312}
 313
 314int cqhci_deactivate(struct mmc_host *mmc)
 315{
 316        struct cqhci_host *cq_host = mmc->cqe_private;
 317
 318        if (cq_host->enabled && cq_host->activated)
 319                __cqhci_disable(cq_host);
 320
 321        return 0;
 322}
 323EXPORT_SYMBOL(cqhci_deactivate);
 324
 325int cqhci_resume(struct mmc_host *mmc)
 326{
 327        /* Re-enable is done upon first request */
 328        return 0;
 329}
 330EXPORT_SYMBOL(cqhci_resume);
 331
 332static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 333{
 334        struct cqhci_host *cq_host = mmc->cqe_private;
 335        int err;
 336
 337        if (!card->ext_csd.cmdq_en)
 338                return -EINVAL;
 339
 340        if (cq_host->enabled)
 341                return 0;
 342
 343        cq_host->rca = card->rca;
 344
 345        err = cqhci_host_alloc_tdl(cq_host);
 346        if (err) {
 347                pr_err("%s: Failed to enable CQE, error %d\n",
 348                       mmc_hostname(mmc), err);
 349                return err;
 350        }
 351
 352        __cqhci_enable(cq_host);
 353
 354        cq_host->enabled = true;
 355
 356#ifdef DEBUG
 357        cqhci_dumpregs(cq_host);
 358#endif
 359        return 0;
 360}
 361
 362/* CQHCI is idle and should halt immediately, so set a small timeout */
 363#define CQHCI_OFF_TIMEOUT 100
 364
 365static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
 366{
 367        return cqhci_readl(cq_host, CQHCI_CTL);
 368}
 369
 370static void cqhci_off(struct mmc_host *mmc)
 371{
 372        struct cqhci_host *cq_host = mmc->cqe_private;
 373        u32 reg;
 374        int err;
 375
 376        if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 377                return;
 378
 379        if (cq_host->ops->disable)
 380                cq_host->ops->disable(mmc, false);
 381
 382        cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 383
 384        err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
 385                                 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
 386        if (err < 0)
 387                pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 388        else
 389                pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 390
 391        if (cq_host->ops->post_disable)
 392                cq_host->ops->post_disable(mmc);
 393
 394        mmc->cqe_on = false;
 395}
 396
 397static void cqhci_disable(struct mmc_host *mmc)
 398{
 399        struct cqhci_host *cq_host = mmc->cqe_private;
 400
 401        if (!cq_host->enabled)
 402                return;
 403
 404        cqhci_off(mmc);
 405
 406        __cqhci_disable(cq_host);
 407
 408        dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 409                           cq_host->trans_desc_base,
 410                           cq_host->trans_desc_dma_base);
 411
 412        dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 413                           cq_host->desc_base,
 414                           cq_host->desc_dma_base);
 415
 416        cq_host->trans_desc_base = NULL;
 417        cq_host->desc_base = NULL;
 418
 419        cq_host->enabled = false;
 420}
 421
 422static void cqhci_prep_task_desc(struct mmc_request *mrq,
 423                                 struct cqhci_host *cq_host, int tag)
 424{
 425        __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
 426        u32 req_flags = mrq->data->flags;
 427        u64 desc0;
 428
 429        desc0 = CQHCI_VALID(1) |
 430                CQHCI_END(1) |
 431                CQHCI_INT(1) |
 432                CQHCI_ACT(0x5) |
 433                CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 434                CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 435                CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 436                CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 437                CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 438                CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 439                CQHCI_BLK_COUNT(mrq->data->blocks) |
 440                CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 441
 442        task_desc[0] = cpu_to_le64(desc0);
 443
 444        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 445                u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
 446
 447                task_desc[1] = cpu_to_le64(desc1);
 448
 449                pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
 450                         mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
 451        } else {
 452                pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
 453                         mmc_hostname(mrq->host), mrq->tag, desc0);
 454        }
 455}
 456
 457static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 458{
 459        int sg_count;
 460        struct mmc_data *data = mrq->data;
 461
 462        if (!data)
 463                return -EINVAL;
 464
 465        sg_count = dma_map_sg(mmc_dev(host), data->sg,
 466                              data->sg_len,
 467                              (data->flags & MMC_DATA_WRITE) ?
 468                              DMA_TO_DEVICE : DMA_FROM_DEVICE);
 469        if (!sg_count) {
 470                pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 471                return -ENOMEM;
 472        }
 473
 474        return sg_count;
 475}
 476
 477static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 478                                bool dma64)
 479{
 480        __le32 *attr = (__le32 __force *)desc;
 481
 482        *attr = (CQHCI_VALID(1) |
 483                 CQHCI_END(end ? 1 : 0) |
 484                 CQHCI_INT(0) |
 485                 CQHCI_ACT(0x4) |
 486                 CQHCI_DAT_LENGTH(len));
 487
 488        if (dma64) {
 489                __le64 *dataddr = (__le64 __force *)(desc + 4);
 490
 491                dataddr[0] = cpu_to_le64(addr);
 492        } else {
 493                __le32 *dataddr = (__le32 __force *)(desc + 4);
 494
 495                dataddr[0] = cpu_to_le32(addr);
 496        }
 497}
 498
 499static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 500                               struct cqhci_host *cq_host, int tag)
 501{
 502        struct mmc_data *data = mrq->data;
 503        int i, sg_count, len;
 504        bool end = false;
 505        bool dma64 = cq_host->dma64;
 506        dma_addr_t addr;
 507        u8 *desc;
 508        struct scatterlist *sg;
 509
 510        sg_count = cqhci_dma_map(mrq->host, mrq);
 511        if (sg_count < 0) {
 512                pr_err("%s: %s: unable to map sg lists, %d\n",
 513                                mmc_hostname(mrq->host), __func__, sg_count);
 514                return sg_count;
 515        }
 516
 517        desc = get_trans_desc(cq_host, tag);
 518
 519        for_each_sg(data->sg, sg, sg_count, i) {
 520                addr = sg_dma_address(sg);
 521                len = sg_dma_len(sg);
 522
 523                if ((i+1) == sg_count)
 524                        end = true;
 525                cqhci_set_tran_desc(desc, addr, len, end, dma64);
 526                desc += cq_host->trans_desc_len;
 527        }
 528
 529        return 0;
 530}
 531
 532static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 533                                   struct mmc_request *mrq)
 534{
 535        u64 *task_desc = NULL;
 536        u64 data = 0;
 537        u8 resp_type;
 538        u8 *desc;
 539        __le64 *dataddr;
 540        struct cqhci_host *cq_host = mmc->cqe_private;
 541        u8 timing;
 542
 543        if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 544                resp_type = 0x0;
 545                timing = 0x1;
 546        } else {
 547                if (mrq->cmd->flags & MMC_RSP_R1B) {
 548                        resp_type = 0x3;
 549                        timing = 0x0;
 550                } else {
 551                        resp_type = 0x2;
 552                        timing = 0x1;
 553                }
 554        }
 555
 556        task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 557        memset(task_desc, 0, cq_host->task_desc_len);
 558        data |= (CQHCI_VALID(1) |
 559                 CQHCI_END(1) |
 560                 CQHCI_INT(1) |
 561                 CQHCI_QBAR(1) |
 562                 CQHCI_ACT(0x5) |
 563                 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 564                 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 565        if (cq_host->ops->update_dcmd_desc)
 566                cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
 567        *task_desc |= data;
 568        desc = (u8 *)task_desc;
 569        pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 570                 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 571        dataddr = (__le64 __force *)(desc + 4);
 572        dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 573
 574}
 575
 576static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 577{
 578        struct mmc_data *data = mrq->data;
 579
 580        if (data) {
 581                dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 582                             (data->flags & MMC_DATA_READ) ?
 583                             DMA_FROM_DEVICE : DMA_TO_DEVICE);
 584        }
 585}
 586
 587static inline int cqhci_tag(struct mmc_request *mrq)
 588{
 589        return mrq->cmd ? DCMD_SLOT : mrq->tag;
 590}
 591
 592static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 593{
 594        int err = 0;
 595        int tag = cqhci_tag(mrq);
 596        struct cqhci_host *cq_host = mmc->cqe_private;
 597        unsigned long flags;
 598
 599        if (!cq_host->enabled) {
 600                pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 601                return -EINVAL;
 602        }
 603
 604        /* First request after resume has to re-enable */
 605        if (!cq_host->activated)
 606                __cqhci_enable(cq_host);
 607
 608        if (!mmc->cqe_on) {
 609                if (cq_host->ops->pre_enable)
 610                        cq_host->ops->pre_enable(mmc);
 611
 612                cqhci_writel(cq_host, 0, CQHCI_CTL);
 613                mmc->cqe_on = true;
 614                pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 615                if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
 616                        pr_err("%s: cqhci: CQE failed to exit halt state\n",
 617                               mmc_hostname(mmc));
 618                }
 619                if (cq_host->ops->enable)
 620                        cq_host->ops->enable(mmc);
 621        }
 622
 623        if (mrq->data) {
 624                cqhci_prep_task_desc(mrq, cq_host, tag);
 625
 626                err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 627                if (err) {
 628                        pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 629                               mmc_hostname(mmc), err);
 630                        return err;
 631                }
 632        } else {
 633                cqhci_prep_dcmd_desc(mmc, mrq);
 634        }
 635
 636        spin_lock_irqsave(&cq_host->lock, flags);
 637
 638        if (cq_host->recovery_halt) {
 639                err = -EBUSY;
 640                goto out_unlock;
 641        }
 642
 643        cq_host->slot[tag].mrq = mrq;
 644        cq_host->slot[tag].flags = 0;
 645
 646        cq_host->qcnt += 1;
 647        /* Make sure descriptors are ready before ringing the doorbell */
 648        wmb();
 649        cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 650        if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 651                pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 652                         mmc_hostname(mmc), tag);
 653out_unlock:
 654        spin_unlock_irqrestore(&cq_host->lock, flags);
 655
 656        if (err)
 657                cqhci_post_req(mmc, mrq);
 658
 659        return err;
 660}
 661
 662static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 663                                  bool notify)
 664{
 665        struct cqhci_host *cq_host = mmc->cqe_private;
 666
 667        if (!cq_host->recovery_halt) {
 668                cq_host->recovery_halt = true;
 669                pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 670                wake_up(&cq_host->wait_queue);
 671                if (notify && mrq->recovery_notifier)
 672                        mrq->recovery_notifier(mrq);
 673        }
 674}
 675
 676static unsigned int cqhci_error_flags(int error1, int error2)
 677{
 678        int error = error1 ? error1 : error2;
 679
 680        switch (error) {
 681        case -EILSEQ:
 682                return CQHCI_HOST_CRC;
 683        case -ETIMEDOUT:
 684                return CQHCI_HOST_TIMEOUT;
 685        default:
 686                return CQHCI_HOST_OTHER;
 687        }
 688}
 689
 690static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 691                            int data_error)
 692{
 693        struct cqhci_host *cq_host = mmc->cqe_private;
 694        struct cqhci_slot *slot;
 695        u32 terri;
 696        u32 tdpe;
 697        int tag;
 698
 699        spin_lock(&cq_host->lock);
 700
 701        terri = cqhci_readl(cq_host, CQHCI_TERRI);
 702
 703        pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 704                 mmc_hostname(mmc), status, cmd_error, data_error, terri);
 705
 706        /* Forget about errors when recovery has already been triggered */
 707        if (cq_host->recovery_halt)
 708                goto out_unlock;
 709
 710        if (!cq_host->qcnt) {
 711                WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 712                          mmc_hostname(mmc), status, cmd_error, data_error,
 713                          terri);
 714                goto out_unlock;
 715        }
 716
 717        if (CQHCI_TERRI_C_VALID(terri)) {
 718                tag = CQHCI_TERRI_C_TASK(terri);
 719                slot = &cq_host->slot[tag];
 720                if (slot->mrq) {
 721                        slot->flags = cqhci_error_flags(cmd_error, data_error);
 722                        cqhci_recovery_needed(mmc, slot->mrq, true);
 723                }
 724        }
 725
 726        if (CQHCI_TERRI_D_VALID(terri)) {
 727                tag = CQHCI_TERRI_D_TASK(terri);
 728                slot = &cq_host->slot[tag];
 729                if (slot->mrq) {
 730                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 731                        cqhci_recovery_needed(mmc, slot->mrq, true);
 732                }
 733        }
 734
 735        /*
 736         * Handle ICCE ("Invalid Crypto Configuration Error").  This should
 737         * never happen, since the block layer ensures that all crypto-enabled
 738         * I/O requests have a valid keyslot before they reach the driver.
 739         *
 740         * Note that GCE ("General Crypto Error") is different; it already got
 741         * handled above by checking TERRI.
 742         */
 743        if (status & CQHCI_IS_ICCE) {
 744                tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
 745                WARN_ONCE(1,
 746                          "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
 747                          mmc_hostname(mmc), status, tdpe);
 748                while (tdpe != 0) {
 749                        tag = __ffs(tdpe);
 750                        tdpe &= ~(1 << tag);
 751                        slot = &cq_host->slot[tag];
 752                        if (!slot->mrq)
 753                                continue;
 754                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 755                        cqhci_recovery_needed(mmc, slot->mrq, true);
 756                }
 757        }
 758
 759        if (!cq_host->recovery_halt) {
 760                /*
 761                 * The only way to guarantee forward progress is to mark at
 762                 * least one task in error, so if none is indicated, pick one.
 763                 */
 764                for (tag = 0; tag < NUM_SLOTS; tag++) {
 765                        slot = &cq_host->slot[tag];
 766                        if (!slot->mrq)
 767                                continue;
 768                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 769                        cqhci_recovery_needed(mmc, slot->mrq, true);
 770                        break;
 771                }
 772        }
 773
 774out_unlock:
 775        spin_unlock(&cq_host->lock);
 776}
 777
 778static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 779{
 780        struct cqhci_host *cq_host = mmc->cqe_private;
 781        struct cqhci_slot *slot = &cq_host->slot[tag];
 782        struct mmc_request *mrq = slot->mrq;
 783        struct mmc_data *data;
 784
 785        if (!mrq) {
 786                WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 787                          mmc_hostname(mmc), tag);
 788                return;
 789        }
 790
 791        /* No completions allowed during recovery */
 792        if (cq_host->recovery_halt) {
 793                slot->flags |= CQHCI_COMPLETED;
 794                return;
 795        }
 796
 797        slot->mrq = NULL;
 798
 799        cq_host->qcnt -= 1;
 800
 801        data = mrq->data;
 802        if (data) {
 803                if (data->error)
 804                        data->bytes_xfered = 0;
 805                else
 806                        data->bytes_xfered = data->blksz * data->blocks;
 807        }
 808
 809        mmc_cqe_request_done(mmc, mrq);
 810}
 811
 812irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 813                      int data_error)
 814{
 815        u32 status;
 816        unsigned long tag = 0, comp_status;
 817        struct cqhci_host *cq_host = mmc->cqe_private;
 818
 819        status = cqhci_readl(cq_host, CQHCI_IS);
 820        cqhci_writel(cq_host, status, CQHCI_IS);
 821
 822        pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 823
 824        if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
 825            cmd_error || data_error)
 826                cqhci_error_irq(mmc, status, cmd_error, data_error);
 827
 828        if (status & CQHCI_IS_TCC) {
 829                /* read TCN and complete the request */
 830                comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 831                cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 832                pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 833                         mmc_hostname(mmc), comp_status);
 834
 835                spin_lock(&cq_host->lock);
 836
 837                for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 838                        /* complete the corresponding mrq */
 839                        pr_debug("%s: cqhci: completing tag %lu\n",
 840                                 mmc_hostname(mmc), tag);
 841                        cqhci_finish_mrq(mmc, tag);
 842                }
 843
 844                if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 845                        cq_host->waiting_for_idle = false;
 846                        wake_up(&cq_host->wait_queue);
 847                }
 848
 849                spin_unlock(&cq_host->lock);
 850        }
 851
 852        if (status & CQHCI_IS_TCL)
 853                wake_up(&cq_host->wait_queue);
 854
 855        if (status & CQHCI_IS_HAC)
 856                wake_up(&cq_host->wait_queue);
 857
 858        return IRQ_HANDLED;
 859}
 860EXPORT_SYMBOL(cqhci_irq);
 861
 862static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 863{
 864        unsigned long flags;
 865        bool is_idle;
 866
 867        spin_lock_irqsave(&cq_host->lock, flags);
 868        is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 869        *ret = cq_host->recovery_halt ? -EBUSY : 0;
 870        cq_host->waiting_for_idle = !is_idle;
 871        spin_unlock_irqrestore(&cq_host->lock, flags);
 872
 873        return is_idle;
 874}
 875
 876static int cqhci_wait_for_idle(struct mmc_host *mmc)
 877{
 878        struct cqhci_host *cq_host = mmc->cqe_private;
 879        int ret;
 880
 881        wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 882
 883        return ret;
 884}
 885
 886static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 887                          bool *recovery_needed)
 888{
 889        struct cqhci_host *cq_host = mmc->cqe_private;
 890        int tag = cqhci_tag(mrq);
 891        struct cqhci_slot *slot = &cq_host->slot[tag];
 892        unsigned long flags;
 893        bool timed_out;
 894
 895        spin_lock_irqsave(&cq_host->lock, flags);
 896        timed_out = slot->mrq == mrq;
 897        if (timed_out) {
 898                slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 899                cqhci_recovery_needed(mmc, mrq, false);
 900                *recovery_needed = cq_host->recovery_halt;
 901        }
 902        spin_unlock_irqrestore(&cq_host->lock, flags);
 903
 904        if (timed_out) {
 905                pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
 906                       mmc_hostname(mmc), tag, cq_host->qcnt);
 907                cqhci_dumpregs(cq_host);
 908        }
 909
 910        return timed_out;
 911}
 912
 913static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 914{
 915        return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 916}
 917
 918static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 919{
 920        struct cqhci_host *cq_host = mmc->cqe_private;
 921        bool ret;
 922        u32 ctl;
 923
 924        cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 925
 926        ctl = cqhci_readl(cq_host, CQHCI_CTL);
 927        ctl |= CQHCI_CLEAR_ALL_TASKS;
 928        cqhci_writel(cq_host, ctl, CQHCI_CTL);
 929
 930        wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 931                           msecs_to_jiffies(timeout) + 1);
 932
 933        cqhci_set_irqs(cq_host, 0);
 934
 935        ret = cqhci_tasks_cleared(cq_host);
 936
 937        if (!ret)
 938                pr_debug("%s: cqhci: Failed to clear tasks\n",
 939                         mmc_hostname(mmc));
 940
 941        return ret;
 942}
 943
 944static bool cqhci_halted(struct cqhci_host *cq_host)
 945{
 946        return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
 947}
 948
 949static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 950{
 951        struct cqhci_host *cq_host = mmc->cqe_private;
 952        bool ret;
 953        u32 ctl;
 954
 955        if (cqhci_halted(cq_host))
 956                return true;
 957
 958        cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 959
 960        ctl = cqhci_readl(cq_host, CQHCI_CTL);
 961        ctl |= CQHCI_HALT;
 962        cqhci_writel(cq_host, ctl, CQHCI_CTL);
 963
 964        wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 965                           msecs_to_jiffies(timeout) + 1);
 966
 967        cqhci_set_irqs(cq_host, 0);
 968
 969        ret = cqhci_halted(cq_host);
 970
 971        if (!ret)
 972                pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 973
 974        return ret;
 975}
 976
 977/*
 978 * After halting we expect to be able to use the command line. We interpret the
 979 * failure to halt to mean the data lines might still be in use (and the upper
 980 * layers will need to send a STOP command), so we set the timeout based on a
 981 * generous command timeout.
 982 */
 983#define CQHCI_START_HALT_TIMEOUT        5
 984
 985static void cqhci_recovery_start(struct mmc_host *mmc)
 986{
 987        struct cqhci_host *cq_host = mmc->cqe_private;
 988
 989        pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
 990
 991        WARN_ON(!cq_host->recovery_halt);
 992
 993        cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
 994
 995        if (cq_host->ops->disable)
 996                cq_host->ops->disable(mmc, true);
 997
 998        mmc->cqe_on = false;
 999}
1000
1001static int cqhci_error_from_flags(unsigned int flags)
1002{
1003        if (!flags)
1004                return 0;
1005
1006        /* CRC errors might indicate re-tuning so prefer to report that */
1007        if (flags & CQHCI_HOST_CRC)
1008                return -EILSEQ;
1009
1010        if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1011                return -ETIMEDOUT;
1012
1013        return -EIO;
1014}
1015
1016static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1017{
1018        struct cqhci_slot *slot = &cq_host->slot[tag];
1019        struct mmc_request *mrq = slot->mrq;
1020        struct mmc_data *data;
1021
1022        if (!mrq)
1023                return;
1024
1025        slot->mrq = NULL;
1026
1027        cq_host->qcnt -= 1;
1028
1029        data = mrq->data;
1030        if (data) {
1031                data->bytes_xfered = 0;
1032                data->error = cqhci_error_from_flags(slot->flags);
1033        } else {
1034                mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1035        }
1036
1037        mmc_cqe_request_done(cq_host->mmc, mrq);
1038}
1039
1040static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1041{
1042        int i;
1043
1044        for (i = 0; i < cq_host->num_slots; i++)
1045                cqhci_recover_mrq(cq_host, i);
1046}
1047
1048/*
1049 * By now the command and data lines should be unused so there is no reason for
1050 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1051 * problems clearing tasks, so be generous.
1052 */
1053#define CQHCI_FINISH_HALT_TIMEOUT       20
1054
1055/* CQHCI could be expected to clear it's internal state pretty quickly */
1056#define CQHCI_CLEAR_TIMEOUT             20
1057
1058static void cqhci_recovery_finish(struct mmc_host *mmc)
1059{
1060        struct cqhci_host *cq_host = mmc->cqe_private;
1061        unsigned long flags;
1062        u32 cqcfg;
1063        bool ok;
1064
1065        pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1066
1067        WARN_ON(!cq_host->recovery_halt);
1068
1069        ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1070
1071        if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1072                ok = false;
1073
1074        /*
1075         * The specification contradicts itself, by saying that tasks cannot be
1076         * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1077         * be disabled/re-enabled, but not to disable before clearing tasks.
1078         * Have a go anyway.
1079         */
1080        if (!ok) {
1081                pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1082                cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1083                cqcfg &= ~CQHCI_ENABLE;
1084                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1085                cqcfg |= CQHCI_ENABLE;
1086                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1087                /* Be sure that there are no tasks */
1088                ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1089                if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1090                        ok = false;
1091                WARN_ON(!ok);
1092        }
1093
1094        cqhci_recover_mrqs(cq_host);
1095
1096        WARN_ON(cq_host->qcnt);
1097
1098        spin_lock_irqsave(&cq_host->lock, flags);
1099        cq_host->qcnt = 0;
1100        cq_host->recovery_halt = false;
1101        mmc->cqe_on = false;
1102        spin_unlock_irqrestore(&cq_host->lock, flags);
1103
1104        /* Ensure all writes are done before interrupts are re-enabled */
1105        wmb();
1106
1107        cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1108
1109        cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1110
1111        pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1112}
1113
1114static const struct mmc_cqe_ops cqhci_cqe_ops = {
1115        .cqe_enable = cqhci_enable,
1116        .cqe_disable = cqhci_disable,
1117        .cqe_request = cqhci_request,
1118        .cqe_post_req = cqhci_post_req,
1119        .cqe_off = cqhci_off,
1120        .cqe_wait_for_idle = cqhci_wait_for_idle,
1121        .cqe_timeout = cqhci_timeout,
1122        .cqe_recovery_start = cqhci_recovery_start,
1123        .cqe_recovery_finish = cqhci_recovery_finish,
1124};
1125
1126struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1127{
1128        struct cqhci_host *cq_host;
1129        struct resource *cqhci_memres = NULL;
1130
1131        /* check and setup CMDQ interface */
1132        cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1133                                                   "cqhci");
1134        if (!cqhci_memres) {
1135                dev_dbg(&pdev->dev, "CMDQ not supported\n");
1136                return ERR_PTR(-EINVAL);
1137        }
1138
1139        cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1140        if (!cq_host)
1141                return ERR_PTR(-ENOMEM);
1142        cq_host->mmio = devm_ioremap(&pdev->dev,
1143                                     cqhci_memres->start,
1144                                     resource_size(cqhci_memres));
1145        if (!cq_host->mmio) {
1146                dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1147                return ERR_PTR(-EBUSY);
1148        }
1149        dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1150
1151        return cq_host;
1152}
1153EXPORT_SYMBOL(cqhci_pltfm_init);
1154
1155static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1156{
1157        return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1158}
1159
1160static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1161{
1162        u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1163
1164        return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1165}
1166
1167int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1168              bool dma64)
1169{
1170        int err;
1171
1172        cq_host->dma64 = dma64;
1173        cq_host->mmc = mmc;
1174        cq_host->mmc->cqe_private = cq_host;
1175
1176        cq_host->num_slots = NUM_SLOTS;
1177        cq_host->dcmd_slot = DCMD_SLOT;
1178
1179        mmc->cqe_ops = &cqhci_cqe_ops;
1180
1181        mmc->cqe_qdepth = NUM_SLOTS;
1182        if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1183                mmc->cqe_qdepth -= 1;
1184
1185        cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1186                                     sizeof(*cq_host->slot), GFP_KERNEL);
1187        if (!cq_host->slot) {
1188                err = -ENOMEM;
1189                goto out_err;
1190        }
1191
1192        err = cqhci_crypto_init(cq_host);
1193        if (err) {
1194                pr_err("%s: CQHCI crypto initialization failed\n",
1195                       mmc_hostname(mmc));
1196                goto out_err;
1197        }
1198
1199        spin_lock_init(&cq_host->lock);
1200
1201        init_completion(&cq_host->halt_comp);
1202        init_waitqueue_head(&cq_host->wait_queue);
1203
1204        pr_info("%s: CQHCI version %u.%02u\n",
1205                mmc_hostname(mmc), cqhci_ver_major(cq_host),
1206                cqhci_ver_minor(cq_host));
1207
1208        return 0;
1209
1210out_err:
1211        pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1212               mmc_hostname(mmc), cqhci_ver_major(cq_host),
1213               cqhci_ver_minor(cq_host), err);
1214        return err;
1215}
1216EXPORT_SYMBOL(cqhci_init);
1217
1218MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1219MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1220MODULE_LICENSE("GPL v2");
1221