linux/drivers/mmc/host/cqhci.c
<<
>>
Prefs
   1/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 */
  12
  13#include <linux/delay.h>
  14#include <linux/highmem.h>
  15#include <linux/io.h>
  16#include <linux/module.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/slab.h>
  19#include <linux/scatterlist.h>
  20#include <linux/platform_device.h>
  21#include <linux/ktime.h>
  22
  23#include <linux/mmc/mmc.h>
  24#include <linux/mmc/host.h>
  25#include <linux/mmc/card.h>
  26
  27#include "cqhci.h"
  28
  29#define DCMD_SLOT 31
  30#define NUM_SLOTS 32
  31
  32struct cqhci_slot {
  33        struct mmc_request *mrq;
  34        unsigned int flags;
  35#define CQHCI_EXTERNAL_TIMEOUT  BIT(0)
  36#define CQHCI_COMPLETED         BIT(1)
  37#define CQHCI_HOST_CRC          BIT(2)
  38#define CQHCI_HOST_TIMEOUT      BIT(3)
  39#define CQHCI_HOST_OTHER        BIT(4)
  40};
  41
  42static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  43{
  44        return cq_host->desc_base + (tag * cq_host->slot_sz);
  45}
  46
  47static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  48{
  49        u8 *desc = get_desc(cq_host, tag);
  50
  51        return desc + cq_host->task_desc_len;
  52}
  53
  54static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  55{
  56        return cq_host->trans_desc_dma_base +
  57                (cq_host->mmc->max_segs * tag *
  58                 cq_host->trans_desc_len);
  59}
  60
  61static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  62{
  63        return cq_host->trans_desc_base +
  64                (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
  65}
  66
  67static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  68{
  69        u8 *link_temp;
  70        dma_addr_t trans_temp;
  71
  72        link_temp = get_link_desc(cq_host, tag);
  73        trans_temp = get_trans_desc_dma(cq_host, tag);
  74
  75        memset(link_temp, 0, cq_host->link_desc_len);
  76        if (cq_host->link_desc_len > 8)
  77                *(link_temp + 8) = 0;
  78
  79        if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  80                *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  81                return;
  82        }
  83
  84        *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  85
  86        if (cq_host->dma64) {
  87                __le64 *data_addr = (__le64 __force *)(link_temp + 4);
  88
  89                data_addr[0] = cpu_to_le64(trans_temp);
  90        } else {
  91                __le32 *data_addr = (__le32 __force *)(link_temp + 4);
  92
  93                data_addr[0] = cpu_to_le32(trans_temp);
  94        }
  95}
  96
  97static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
  98{
  99        cqhci_writel(cq_host, set, CQHCI_ISTE);
 100        cqhci_writel(cq_host, set, CQHCI_ISGE);
 101}
 102
 103#define DRV_NAME "cqhci"
 104
 105#define CQHCI_DUMP(f, x...) \
 106        pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
 107
 108static void cqhci_dumpregs(struct cqhci_host *cq_host)
 109{
 110        struct mmc_host *mmc = cq_host->mmc;
 111
 112        CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 113
 114        CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 115                   cqhci_readl(cq_host, CQHCI_CAP),
 116                   cqhci_readl(cq_host, CQHCI_VER));
 117        CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 118                   cqhci_readl(cq_host, CQHCI_CFG),
 119                   cqhci_readl(cq_host, CQHCI_CTL));
 120        CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 121                   cqhci_readl(cq_host, CQHCI_IS),
 122                   cqhci_readl(cq_host, CQHCI_ISTE));
 123        CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 124                   cqhci_readl(cq_host, CQHCI_ISGE),
 125                   cqhci_readl(cq_host, CQHCI_IC));
 126        CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 127                   cqhci_readl(cq_host, CQHCI_TDLBA),
 128                   cqhci_readl(cq_host, CQHCI_TDLBAU));
 129        CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 130                   cqhci_readl(cq_host, CQHCI_TDBR),
 131                   cqhci_readl(cq_host, CQHCI_TCN));
 132        CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 133                   cqhci_readl(cq_host, CQHCI_DQS),
 134                   cqhci_readl(cq_host, CQHCI_DPT));
 135        CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 136                   cqhci_readl(cq_host, CQHCI_TCLR),
 137                   cqhci_readl(cq_host, CQHCI_SSC1));
 138        CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 139                   cqhci_readl(cq_host, CQHCI_SSC2),
 140                   cqhci_readl(cq_host, CQHCI_CRDCT));
 141        CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 142                   cqhci_readl(cq_host, CQHCI_RMEM),
 143                   cqhci_readl(cq_host, CQHCI_TERRI));
 144        CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 145                   cqhci_readl(cq_host, CQHCI_CRI),
 146                   cqhci_readl(cq_host, CQHCI_CRA));
 147
 148        if (cq_host->ops->dumpregs)
 149                cq_host->ops->dumpregs(mmc);
 150        else
 151                CQHCI_DUMP(": ===========================================\n");
 152}
 153
 154/**
 155 * The allocated descriptor table for task, link & transfer descritors
 156 * looks like:
 157 * |----------|
 158 * |task desc |  |->|----------|
 159 * |----------|  |  |trans desc|
 160 * |link desc-|->|  |----------|
 161 * |----------|          .
 162 *      .                .
 163 *  no. of slots      max-segs
 164 *      .           |----------|
 165 * |----------|
 166 * The idea here is to create the [task+trans] table and mark & point the
 167 * link desc to the transfer desc table on a per slot basis.
 168 */
 169static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 170{
 171        int i = 0;
 172
 173        /* task descriptor can be 64/128 bit irrespective of arch */
 174        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 175                cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 176                               CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 177                cq_host->task_desc_len = 16;
 178        } else {
 179                cq_host->task_desc_len = 8;
 180        }
 181
 182        /*
 183         * 96 bits length of transfer desc instead of 128 bits which means
 184         * ADMA would expect next valid descriptor at the 96th bit
 185         * or 128th bit
 186         */
 187        if (cq_host->dma64) {
 188                if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 189                        cq_host->trans_desc_len = 12;
 190                else
 191                        cq_host->trans_desc_len = 16;
 192                cq_host->link_desc_len = 16;
 193        } else {
 194                cq_host->trans_desc_len = 8;
 195                cq_host->link_desc_len = 8;
 196        }
 197
 198        /* total size of a slot: 1 task & 1 transfer (link) */
 199        cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 200
 201        cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 202
 203        cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
 204                (cq_host->num_slots - 1);
 205
 206        pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 207                 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 208                 cq_host->slot_sz);
 209
 210        /*
 211         * allocate a dma-mapped chunk of memory for the descriptors
 212         * allocate a dma-mapped chunk of memory for link descriptors
 213         * setup each link-desc memory offset per slot-number to
 214         * the descriptor table.
 215         */
 216        cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 217                                                 cq_host->desc_size,
 218                                                 &cq_host->desc_dma_base,
 219                                                 GFP_KERNEL);
 220        cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 221                                              cq_host->data_size,
 222                                              &cq_host->trans_desc_dma_base,
 223                                              GFP_KERNEL);
 224        if (!cq_host->desc_base || !cq_host->trans_desc_base)
 225                return -ENOMEM;
 226
 227        pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 228                 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 229                (unsigned long long)cq_host->desc_dma_base,
 230                (unsigned long long)cq_host->trans_desc_dma_base);
 231
 232        for (; i < (cq_host->num_slots); i++)
 233                setup_trans_desc(cq_host, i);
 234
 235        return 0;
 236}
 237
 238static void __cqhci_enable(struct cqhci_host *cq_host)
 239{
 240        struct mmc_host *mmc = cq_host->mmc;
 241        u32 cqcfg;
 242
 243        cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 244
 245        /* Configuration must not be changed while enabled */
 246        if (cqcfg & CQHCI_ENABLE) {
 247                cqcfg &= ~CQHCI_ENABLE;
 248                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 249        }
 250
 251        cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 252
 253        if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 254                cqcfg |= CQHCI_DCMD;
 255
 256        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 257                cqcfg |= CQHCI_TASK_DESC_SZ;
 258
 259        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 260
 261        cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 262                     CQHCI_TDLBA);
 263        cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 264                     CQHCI_TDLBAU);
 265
 266        cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 267
 268        cqhci_set_irqs(cq_host, 0);
 269
 270        cqcfg |= CQHCI_ENABLE;
 271
 272        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 273
 274        mmc->cqe_on = true;
 275
 276        if (cq_host->ops->enable)
 277                cq_host->ops->enable(mmc);
 278
 279        /* Ensure all writes are done before interrupts are enabled */
 280        wmb();
 281
 282        cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 283
 284        cq_host->activated = true;
 285}
 286
 287static void __cqhci_disable(struct cqhci_host *cq_host)
 288{
 289        u32 cqcfg;
 290
 291        cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 292        cqcfg &= ~CQHCI_ENABLE;
 293        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 294
 295        cq_host->mmc->cqe_on = false;
 296
 297        cq_host->activated = false;
 298}
 299
 300int cqhci_suspend(struct mmc_host *mmc)
 301{
 302        struct cqhci_host *cq_host = mmc->cqe_private;
 303
 304        if (cq_host->enabled)
 305                __cqhci_disable(cq_host);
 306
 307        return 0;
 308}
 309EXPORT_SYMBOL(cqhci_suspend);
 310
 311int cqhci_resume(struct mmc_host *mmc)
 312{
 313        /* Re-enable is done upon first request */
 314        return 0;
 315}
 316EXPORT_SYMBOL(cqhci_resume);
 317
 318static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 319{
 320        struct cqhci_host *cq_host = mmc->cqe_private;
 321        int err;
 322
 323        if (cq_host->enabled)
 324                return 0;
 325
 326        cq_host->rca = card->rca;
 327
 328        err = cqhci_host_alloc_tdl(cq_host);
 329        if (err)
 330                return err;
 331
 332        __cqhci_enable(cq_host);
 333
 334        cq_host->enabled = true;
 335
 336#ifdef DEBUG
 337        cqhci_dumpregs(cq_host);
 338#endif
 339        return 0;
 340}
 341
 342/* CQHCI is idle and should halt immediately, so set a small timeout */
 343#define CQHCI_OFF_TIMEOUT 100
 344
 345static void cqhci_off(struct mmc_host *mmc)
 346{
 347        struct cqhci_host *cq_host = mmc->cqe_private;
 348        ktime_t timeout;
 349        bool timed_out;
 350        u32 reg;
 351
 352        if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 353                return;
 354
 355        if (cq_host->ops->disable)
 356                cq_host->ops->disable(mmc, false);
 357
 358        cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 359
 360        timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
 361        while (1) {
 362                timed_out = ktime_compare(ktime_get(), timeout) > 0;
 363                reg = cqhci_readl(cq_host, CQHCI_CTL);
 364                if ((reg & CQHCI_HALT) || timed_out)
 365                        break;
 366        }
 367
 368        if (timed_out)
 369                pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 370        else
 371                pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 372
 373        mmc->cqe_on = false;
 374}
 375
 376static void cqhci_disable(struct mmc_host *mmc)
 377{
 378        struct cqhci_host *cq_host = mmc->cqe_private;
 379
 380        if (!cq_host->enabled)
 381                return;
 382
 383        cqhci_off(mmc);
 384
 385        __cqhci_disable(cq_host);
 386
 387        dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 388                           cq_host->trans_desc_base,
 389                           cq_host->trans_desc_dma_base);
 390
 391        dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 392                           cq_host->desc_base,
 393                           cq_host->desc_dma_base);
 394
 395        cq_host->trans_desc_base = NULL;
 396        cq_host->desc_base = NULL;
 397
 398        cq_host->enabled = false;
 399}
 400
 401static void cqhci_prep_task_desc(struct mmc_request *mrq,
 402                                        u64 *data, bool intr)
 403{
 404        u32 req_flags = mrq->data->flags;
 405
 406        *data = CQHCI_VALID(1) |
 407                CQHCI_END(1) |
 408                CQHCI_INT(intr) |
 409                CQHCI_ACT(0x5) |
 410                CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 411                CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 412                CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 413                CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 414                CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 415                CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 416                CQHCI_BLK_COUNT(mrq->data->blocks) |
 417                CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 418
 419        pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
 420                 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
 421}
 422
 423static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 424{
 425        int sg_count;
 426        struct mmc_data *data = mrq->data;
 427
 428        if (!data)
 429                return -EINVAL;
 430
 431        sg_count = dma_map_sg(mmc_dev(host), data->sg,
 432                              data->sg_len,
 433                              (data->flags & MMC_DATA_WRITE) ?
 434                              DMA_TO_DEVICE : DMA_FROM_DEVICE);
 435        if (!sg_count) {
 436                pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 437                return -ENOMEM;
 438        }
 439
 440        return sg_count;
 441}
 442
 443static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 444                                bool dma64)
 445{
 446        __le32 *attr = (__le32 __force *)desc;
 447
 448        *attr = (CQHCI_VALID(1) |
 449                 CQHCI_END(end ? 1 : 0) |
 450                 CQHCI_INT(0) |
 451                 CQHCI_ACT(0x4) |
 452                 CQHCI_DAT_LENGTH(len));
 453
 454        if (dma64) {
 455                __le64 *dataddr = (__le64 __force *)(desc + 4);
 456
 457                dataddr[0] = cpu_to_le64(addr);
 458        } else {
 459                __le32 *dataddr = (__le32 __force *)(desc + 4);
 460
 461                dataddr[0] = cpu_to_le32(addr);
 462        }
 463}
 464
 465static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 466                               struct cqhci_host *cq_host, int tag)
 467{
 468        struct mmc_data *data = mrq->data;
 469        int i, sg_count, len;
 470        bool end = false;
 471        bool dma64 = cq_host->dma64;
 472        dma_addr_t addr;
 473        u8 *desc;
 474        struct scatterlist *sg;
 475
 476        sg_count = cqhci_dma_map(mrq->host, mrq);
 477        if (sg_count < 0) {
 478                pr_err("%s: %s: unable to map sg lists, %d\n",
 479                                mmc_hostname(mrq->host), __func__, sg_count);
 480                return sg_count;
 481        }
 482
 483        desc = get_trans_desc(cq_host, tag);
 484
 485        for_each_sg(data->sg, sg, sg_count, i) {
 486                addr = sg_dma_address(sg);
 487                len = sg_dma_len(sg);
 488
 489                if ((i+1) == sg_count)
 490                        end = true;
 491                cqhci_set_tran_desc(desc, addr, len, end, dma64);
 492                desc += cq_host->trans_desc_len;
 493        }
 494
 495        return 0;
 496}
 497
 498static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 499                                   struct mmc_request *mrq)
 500{
 501        u64 *task_desc = NULL;
 502        u64 data = 0;
 503        u8 resp_type;
 504        u8 *desc;
 505        __le64 *dataddr;
 506        struct cqhci_host *cq_host = mmc->cqe_private;
 507        u8 timing;
 508
 509        if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 510                resp_type = 0x0;
 511                timing = 0x1;
 512        } else {
 513                if (mrq->cmd->flags & MMC_RSP_R1B) {
 514                        resp_type = 0x3;
 515                        timing = 0x0;
 516                } else {
 517                        resp_type = 0x2;
 518                        timing = 0x1;
 519                }
 520        }
 521
 522        task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 523        memset(task_desc, 0, cq_host->task_desc_len);
 524        data |= (CQHCI_VALID(1) |
 525                 CQHCI_END(1) |
 526                 CQHCI_INT(1) |
 527                 CQHCI_QBAR(1) |
 528                 CQHCI_ACT(0x5) |
 529                 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 530                 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 531        *task_desc |= data;
 532        desc = (u8 *)task_desc;
 533        pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 534                 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 535        dataddr = (__le64 __force *)(desc + 4);
 536        dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 537
 538}
 539
 540static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 541{
 542        struct mmc_data *data = mrq->data;
 543
 544        if (data) {
 545                dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 546                             (data->flags & MMC_DATA_READ) ?
 547                             DMA_FROM_DEVICE : DMA_TO_DEVICE);
 548        }
 549}
 550
 551static inline int cqhci_tag(struct mmc_request *mrq)
 552{
 553        return mrq->cmd ? DCMD_SLOT : mrq->tag;
 554}
 555
 556static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 557{
 558        int err = 0;
 559        u64 data = 0;
 560        u64 *task_desc = NULL;
 561        int tag = cqhci_tag(mrq);
 562        struct cqhci_host *cq_host = mmc->cqe_private;
 563        unsigned long flags;
 564
 565        if (!cq_host->enabled) {
 566                pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 567                return -EINVAL;
 568        }
 569
 570        /* First request after resume has to re-enable */
 571        if (!cq_host->activated)
 572                __cqhci_enable(cq_host);
 573
 574        if (!mmc->cqe_on) {
 575                cqhci_writel(cq_host, 0, CQHCI_CTL);
 576                mmc->cqe_on = true;
 577                pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 578                if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
 579                        pr_err("%s: cqhci: CQE failed to exit halt state\n",
 580                               mmc_hostname(mmc));
 581                }
 582                if (cq_host->ops->enable)
 583                        cq_host->ops->enable(mmc);
 584        }
 585
 586        if (mrq->data) {
 587                task_desc = (__le64 __force *)get_desc(cq_host, tag);
 588                cqhci_prep_task_desc(mrq, &data, 1);
 589                *task_desc = cpu_to_le64(data);
 590                err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 591                if (err) {
 592                        pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 593                               mmc_hostname(mmc), err);
 594                        return err;
 595                }
 596        } else {
 597                cqhci_prep_dcmd_desc(mmc, mrq);
 598        }
 599
 600        spin_lock_irqsave(&cq_host->lock, flags);
 601
 602        if (cq_host->recovery_halt) {
 603                err = -EBUSY;
 604                goto out_unlock;
 605        }
 606
 607        cq_host->slot[tag].mrq = mrq;
 608        cq_host->slot[tag].flags = 0;
 609
 610        cq_host->qcnt += 1;
 611
 612        cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 613        if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 614                pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 615                         mmc_hostname(mmc), tag);
 616out_unlock:
 617        spin_unlock_irqrestore(&cq_host->lock, flags);
 618
 619        if (err)
 620                cqhci_post_req(mmc, mrq);
 621
 622        return err;
 623}
 624
 625static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 626                                  bool notify)
 627{
 628        struct cqhci_host *cq_host = mmc->cqe_private;
 629
 630        if (!cq_host->recovery_halt) {
 631                cq_host->recovery_halt = true;
 632                pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 633                wake_up(&cq_host->wait_queue);
 634                if (notify && mrq->recovery_notifier)
 635                        mrq->recovery_notifier(mrq);
 636        }
 637}
 638
 639static unsigned int cqhci_error_flags(int error1, int error2)
 640{
 641        int error = error1 ? error1 : error2;
 642
 643        switch (error) {
 644        case -EILSEQ:
 645                return CQHCI_HOST_CRC;
 646        case -ETIMEDOUT:
 647                return CQHCI_HOST_TIMEOUT;
 648        default:
 649                return CQHCI_HOST_OTHER;
 650        }
 651}
 652
 653static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 654                            int data_error)
 655{
 656        struct cqhci_host *cq_host = mmc->cqe_private;
 657        struct cqhci_slot *slot;
 658        u32 terri;
 659        int tag;
 660
 661        spin_lock(&cq_host->lock);
 662
 663        terri = cqhci_readl(cq_host, CQHCI_TERRI);
 664
 665        pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 666                 mmc_hostname(mmc), status, cmd_error, data_error, terri);
 667
 668        /* Forget about errors when recovery has already been triggered */
 669        if (cq_host->recovery_halt)
 670                goto out_unlock;
 671
 672        if (!cq_host->qcnt) {
 673                WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 674                          mmc_hostname(mmc), status, cmd_error, data_error,
 675                          terri);
 676                goto out_unlock;
 677        }
 678
 679        if (CQHCI_TERRI_C_VALID(terri)) {
 680                tag = CQHCI_TERRI_C_TASK(terri);
 681                slot = &cq_host->slot[tag];
 682                if (slot->mrq) {
 683                        slot->flags = cqhci_error_flags(cmd_error, data_error);
 684                        cqhci_recovery_needed(mmc, slot->mrq, true);
 685                }
 686        }
 687
 688        if (CQHCI_TERRI_D_VALID(terri)) {
 689                tag = CQHCI_TERRI_D_TASK(terri);
 690                slot = &cq_host->slot[tag];
 691                if (slot->mrq) {
 692                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 693                        cqhci_recovery_needed(mmc, slot->mrq, true);
 694                }
 695        }
 696
 697        if (!cq_host->recovery_halt) {
 698                /*
 699                 * The only way to guarantee forward progress is to mark at
 700                 * least one task in error, so if none is indicated, pick one.
 701                 */
 702                for (tag = 0; tag < NUM_SLOTS; tag++) {
 703                        slot = &cq_host->slot[tag];
 704                        if (!slot->mrq)
 705                                continue;
 706                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 707                        cqhci_recovery_needed(mmc, slot->mrq, true);
 708                        break;
 709                }
 710        }
 711
 712out_unlock:
 713        spin_unlock(&cq_host->lock);
 714}
 715
 716static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 717{
 718        struct cqhci_host *cq_host = mmc->cqe_private;
 719        struct cqhci_slot *slot = &cq_host->slot[tag];
 720        struct mmc_request *mrq = slot->mrq;
 721        struct mmc_data *data;
 722
 723        if (!mrq) {
 724                WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 725                          mmc_hostname(mmc), tag);
 726                return;
 727        }
 728
 729        /* No completions allowed during recovery */
 730        if (cq_host->recovery_halt) {
 731                slot->flags |= CQHCI_COMPLETED;
 732                return;
 733        }
 734
 735        slot->mrq = NULL;
 736
 737        cq_host->qcnt -= 1;
 738
 739        data = mrq->data;
 740        if (data) {
 741                if (data->error)
 742                        data->bytes_xfered = 0;
 743                else
 744                        data->bytes_xfered = data->blksz * data->blocks;
 745        }
 746
 747        mmc_cqe_request_done(mmc, mrq);
 748}
 749
 750irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 751                      int data_error)
 752{
 753        u32 status;
 754        unsigned long tag = 0, comp_status;
 755        struct cqhci_host *cq_host = mmc->cqe_private;
 756
 757        status = cqhci_readl(cq_host, CQHCI_IS);
 758        cqhci_writel(cq_host, status, CQHCI_IS);
 759
 760        pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 761
 762        if ((status & CQHCI_IS_RED) || cmd_error || data_error)
 763                cqhci_error_irq(mmc, status, cmd_error, data_error);
 764
 765        if (status & CQHCI_IS_TCC) {
 766                /* read TCN and complete the request */
 767                comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 768                cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 769                pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 770                         mmc_hostname(mmc), comp_status);
 771
 772                spin_lock(&cq_host->lock);
 773
 774                for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 775                        /* complete the corresponding mrq */
 776                        pr_debug("%s: cqhci: completing tag %lu\n",
 777                                 mmc_hostname(mmc), tag);
 778                        cqhci_finish_mrq(mmc, tag);
 779                }
 780
 781                if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 782                        cq_host->waiting_for_idle = false;
 783                        wake_up(&cq_host->wait_queue);
 784                }
 785
 786                spin_unlock(&cq_host->lock);
 787        }
 788
 789        if (status & CQHCI_IS_TCL)
 790                wake_up(&cq_host->wait_queue);
 791
 792        if (status & CQHCI_IS_HAC)
 793                wake_up(&cq_host->wait_queue);
 794
 795        return IRQ_HANDLED;
 796}
 797EXPORT_SYMBOL(cqhci_irq);
 798
 799static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 800{
 801        unsigned long flags;
 802        bool is_idle;
 803
 804        spin_lock_irqsave(&cq_host->lock, flags);
 805        is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 806        *ret = cq_host->recovery_halt ? -EBUSY : 0;
 807        cq_host->waiting_for_idle = !is_idle;
 808        spin_unlock_irqrestore(&cq_host->lock, flags);
 809
 810        return is_idle;
 811}
 812
 813static int cqhci_wait_for_idle(struct mmc_host *mmc)
 814{
 815        struct cqhci_host *cq_host = mmc->cqe_private;
 816        int ret;
 817
 818        wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 819
 820        return ret;
 821}
 822
 823static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 824                          bool *recovery_needed)
 825{
 826        struct cqhci_host *cq_host = mmc->cqe_private;
 827        int tag = cqhci_tag(mrq);
 828        struct cqhci_slot *slot = &cq_host->slot[tag];
 829        unsigned long flags;
 830        bool timed_out;
 831
 832        spin_lock_irqsave(&cq_host->lock, flags);
 833        timed_out = slot->mrq == mrq;
 834        if (timed_out) {
 835                slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 836                cqhci_recovery_needed(mmc, mrq, false);
 837                *recovery_needed = cq_host->recovery_halt;
 838        }
 839        spin_unlock_irqrestore(&cq_host->lock, flags);
 840
 841        if (timed_out) {
 842                pr_err("%s: cqhci: timeout for tag %d\n",
 843                       mmc_hostname(mmc), tag);
 844                cqhci_dumpregs(cq_host);
 845        }
 846
 847        return timed_out;
 848}
 849
 850static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 851{
 852        return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 853}
 854
 855static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 856{
 857        struct cqhci_host *cq_host = mmc->cqe_private;
 858        bool ret;
 859        u32 ctl;
 860
 861        cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 862
 863        ctl = cqhci_readl(cq_host, CQHCI_CTL);
 864        ctl |= CQHCI_CLEAR_ALL_TASKS;
 865        cqhci_writel(cq_host, ctl, CQHCI_CTL);
 866
 867        wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 868                           msecs_to_jiffies(timeout) + 1);
 869
 870        cqhci_set_irqs(cq_host, 0);
 871
 872        ret = cqhci_tasks_cleared(cq_host);
 873
 874        if (!ret)
 875                pr_debug("%s: cqhci: Failed to clear tasks\n",
 876                         mmc_hostname(mmc));
 877
 878        return ret;
 879}
 880
 881static bool cqhci_halted(struct cqhci_host *cq_host)
 882{
 883        return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
 884}
 885
 886static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 887{
 888        struct cqhci_host *cq_host = mmc->cqe_private;
 889        bool ret;
 890        u32 ctl;
 891
 892        if (cqhci_halted(cq_host))
 893                return true;
 894
 895        cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 896
 897        ctl = cqhci_readl(cq_host, CQHCI_CTL);
 898        ctl |= CQHCI_HALT;
 899        cqhci_writel(cq_host, ctl, CQHCI_CTL);
 900
 901        wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 902                           msecs_to_jiffies(timeout) + 1);
 903
 904        cqhci_set_irqs(cq_host, 0);
 905
 906        ret = cqhci_halted(cq_host);
 907
 908        if (!ret)
 909                pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 910
 911        return ret;
 912}
 913
 914/*
 915 * After halting we expect to be able to use the command line. We interpret the
 916 * failure to halt to mean the data lines might still be in use (and the upper
 917 * layers will need to send a STOP command), so we set the timeout based on a
 918 * generous command timeout.
 919 */
 920#define CQHCI_START_HALT_TIMEOUT        5
 921
 922static void cqhci_recovery_start(struct mmc_host *mmc)
 923{
 924        struct cqhci_host *cq_host = mmc->cqe_private;
 925
 926        pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
 927
 928        WARN_ON(!cq_host->recovery_halt);
 929
 930        cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
 931
 932        if (cq_host->ops->disable)
 933                cq_host->ops->disable(mmc, true);
 934
 935        mmc->cqe_on = false;
 936}
 937
 938static int cqhci_error_from_flags(unsigned int flags)
 939{
 940        if (!flags)
 941                return 0;
 942
 943        /* CRC errors might indicate re-tuning so prefer to report that */
 944        if (flags & CQHCI_HOST_CRC)
 945                return -EILSEQ;
 946
 947        if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
 948                return -ETIMEDOUT;
 949
 950        return -EIO;
 951}
 952
 953static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
 954{
 955        struct cqhci_slot *slot = &cq_host->slot[tag];
 956        struct mmc_request *mrq = slot->mrq;
 957        struct mmc_data *data;
 958
 959        if (!mrq)
 960                return;
 961
 962        slot->mrq = NULL;
 963
 964        cq_host->qcnt -= 1;
 965
 966        data = mrq->data;
 967        if (data) {
 968                data->bytes_xfered = 0;
 969                data->error = cqhci_error_from_flags(slot->flags);
 970        } else {
 971                mrq->cmd->error = cqhci_error_from_flags(slot->flags);
 972        }
 973
 974        mmc_cqe_request_done(cq_host->mmc, mrq);
 975}
 976
 977static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
 978{
 979        int i;
 980
 981        for (i = 0; i < cq_host->num_slots; i++)
 982                cqhci_recover_mrq(cq_host, i);
 983}
 984
 985/*
 986 * By now the command and data lines should be unused so there is no reason for
 987 * CQHCI to take a long time to halt, but if it doesn't halt there could be
 988 * problems clearing tasks, so be generous.
 989 */
 990#define CQHCI_FINISH_HALT_TIMEOUT       20
 991
 992/* CQHCI could be expected to clear it's internal state pretty quickly */
 993#define CQHCI_CLEAR_TIMEOUT             20
 994
 995static void cqhci_recovery_finish(struct mmc_host *mmc)
 996{
 997        struct cqhci_host *cq_host = mmc->cqe_private;
 998        unsigned long flags;
 999        u32 cqcfg;
1000        bool ok;
1001
1002        pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1003
1004        WARN_ON(!cq_host->recovery_halt);
1005
1006        ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1007
1008        if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1009                ok = false;
1010
1011        /*
1012         * The specification contradicts itself, by saying that tasks cannot be
1013         * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1014         * be disabled/re-enabled, but not to disable before clearing tasks.
1015         * Have a go anyway.
1016         */
1017        if (!ok) {
1018                pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1019                cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1020                cqcfg &= ~CQHCI_ENABLE;
1021                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1022                cqcfg |= CQHCI_ENABLE;
1023                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1024                /* Be sure that there are no tasks */
1025                ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1026                if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1027                        ok = false;
1028                WARN_ON(!ok);
1029        }
1030
1031        cqhci_recover_mrqs(cq_host);
1032
1033        WARN_ON(cq_host->qcnt);
1034
1035        spin_lock_irqsave(&cq_host->lock, flags);
1036        cq_host->qcnt = 0;
1037        cq_host->recovery_halt = false;
1038        mmc->cqe_on = false;
1039        spin_unlock_irqrestore(&cq_host->lock, flags);
1040
1041        /* Ensure all writes are done before interrupts are re-enabled */
1042        wmb();
1043
1044        cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1045
1046        cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1047
1048        pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1049}
1050
1051static const struct mmc_cqe_ops cqhci_cqe_ops = {
1052        .cqe_enable = cqhci_enable,
1053        .cqe_disable = cqhci_disable,
1054        .cqe_request = cqhci_request,
1055        .cqe_post_req = cqhci_post_req,
1056        .cqe_off = cqhci_off,
1057        .cqe_wait_for_idle = cqhci_wait_for_idle,
1058        .cqe_timeout = cqhci_timeout,
1059        .cqe_recovery_start = cqhci_recovery_start,
1060        .cqe_recovery_finish = cqhci_recovery_finish,
1061};
1062
1063struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1064{
1065        struct cqhci_host *cq_host;
1066        struct resource *cqhci_memres = NULL;
1067
1068        /* check and setup CMDQ interface */
1069        cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1070                                                   "cqhci_mem");
1071        if (!cqhci_memres) {
1072                dev_dbg(&pdev->dev, "CMDQ not supported\n");
1073                return ERR_PTR(-EINVAL);
1074        }
1075
1076        cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1077        if (!cq_host)
1078                return ERR_PTR(-ENOMEM);
1079        cq_host->mmio = devm_ioremap(&pdev->dev,
1080                                     cqhci_memres->start,
1081                                     resource_size(cqhci_memres));
1082        if (!cq_host->mmio) {
1083                dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1084                return ERR_PTR(-EBUSY);
1085        }
1086        dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1087
1088        return cq_host;
1089}
1090EXPORT_SYMBOL(cqhci_pltfm_init);
1091
1092static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1093{
1094        return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1095}
1096
1097static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1098{
1099        u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1100
1101        return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1102}
1103
1104int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1105              bool dma64)
1106{
1107        int err;
1108
1109        cq_host->dma64 = dma64;
1110        cq_host->mmc = mmc;
1111        cq_host->mmc->cqe_private = cq_host;
1112
1113        cq_host->num_slots = NUM_SLOTS;
1114        cq_host->dcmd_slot = DCMD_SLOT;
1115
1116        mmc->cqe_ops = &cqhci_cqe_ops;
1117
1118        mmc->cqe_qdepth = NUM_SLOTS;
1119        if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1120                mmc->cqe_qdepth -= 1;
1121
1122        cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1123                                     sizeof(*cq_host->slot), GFP_KERNEL);
1124        if (!cq_host->slot) {
1125                err = -ENOMEM;
1126                goto out_err;
1127        }
1128
1129        spin_lock_init(&cq_host->lock);
1130
1131        init_completion(&cq_host->halt_comp);
1132        init_waitqueue_head(&cq_host->wait_queue);
1133
1134        pr_info("%s: CQHCI version %u.%02u\n",
1135                mmc_hostname(mmc), cqhci_ver_major(cq_host),
1136                cqhci_ver_minor(cq_host));
1137
1138        return 0;
1139
1140out_err:
1141        pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1142               mmc_hostname(mmc), cqhci_ver_major(cq_host),
1143               cqhci_ver_minor(cq_host), err);
1144        return err;
1145}
1146EXPORT_SYMBOL(cqhci_init);
1147
1148MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1149MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1150MODULE_LICENSE("GPL v2");
1151