linux/drivers/mmc/host/cavium.c
<<
>>
Prefs
   1/*
   2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
   3 * ThunderX SOCs.
   4 *
   5 * This file is subject to the terms and conditions of the GNU General Public
   6 * License.  See the file "COPYING" in the main directory of this archive
   7 * for more details.
   8 *
   9 * Copyright (C) 2012-2017 Cavium Inc.
  10 * Authors:
  11 *   David Daney <david.daney@cavium.com>
  12 *   Peter Swain <pswain@cavium.com>
  13 *   Steven J. Hill <steven.hill@cavium.com>
  14 *   Jan Glauber <jglauber@cavium.com>
  15 */
  16#include <linux/bitfield.h>
  17#include <linux/delay.h>
  18#include <linux/dma-direction.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/mmc/mmc.h>
  23#include <linux/mmc/slot-gpio.h>
  24#include <linux/module.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/scatterlist.h>
  27#include <linux/time.h>
  28
  29#include "cavium.h"
  30
  31const char *cvm_mmc_irq_names[] = {
  32        "MMC Buffer",
  33        "MMC Command",
  34        "MMC DMA",
  35        "MMC Command Error",
  36        "MMC DMA Error",
  37        "MMC Switch",
  38        "MMC Switch Error",
  39        "MMC DMA int Fifo",
  40        "MMC DMA int",
  41};
  42
  43/*
  44 * The Cavium MMC host hardware assumes that all commands have fixed
  45 * command and response types.  These are correct if MMC devices are
  46 * being used.  However, non-MMC devices like SD use command and
  47 * response types that are unexpected by the host hardware.
  48 *
  49 * The command and response types can be overridden by supplying an
  50 * XOR value that is applied to the type.  We calculate the XOR value
  51 * from the values in this table and the flags passed from the MMC
  52 * core.
  53 */
  54static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
  55        {0, 0},         /* CMD0 */
  56        {0, 3},         /* CMD1 */
  57        {0, 2},         /* CMD2 */
  58        {0, 1},         /* CMD3 */
  59        {0, 0},         /* CMD4 */
  60        {0, 1},         /* CMD5 */
  61        {0, 1},         /* CMD6 */
  62        {0, 1},         /* CMD7 */
  63        {1, 1},         /* CMD8 */
  64        {0, 2},         /* CMD9 */
  65        {0, 2},         /* CMD10 */
  66        {1, 1},         /* CMD11 */
  67        {0, 1},         /* CMD12 */
  68        {0, 1},         /* CMD13 */
  69        {1, 1},         /* CMD14 */
  70        {0, 0},         /* CMD15 */
  71        {0, 1},         /* CMD16 */
  72        {1, 1},         /* CMD17 */
  73        {1, 1},         /* CMD18 */
  74        {3, 1},         /* CMD19 */
  75        {2, 1},         /* CMD20 */
  76        {0, 0},         /* CMD21 */
  77        {0, 0},         /* CMD22 */
  78        {0, 1},         /* CMD23 */
  79        {2, 1},         /* CMD24 */
  80        {2, 1},         /* CMD25 */
  81        {2, 1},         /* CMD26 */
  82        {2, 1},         /* CMD27 */
  83        {0, 1},         /* CMD28 */
  84        {0, 1},         /* CMD29 */
  85        {1, 1},         /* CMD30 */
  86        {1, 1},         /* CMD31 */
  87        {0, 0},         /* CMD32 */
  88        {0, 0},         /* CMD33 */
  89        {0, 0},         /* CMD34 */
  90        {0, 1},         /* CMD35 */
  91        {0, 1},         /* CMD36 */
  92        {0, 0},         /* CMD37 */
  93        {0, 1},         /* CMD38 */
  94        {0, 4},         /* CMD39 */
  95        {0, 5},         /* CMD40 */
  96        {0, 0},         /* CMD41 */
  97        {2, 1},         /* CMD42 */
  98        {0, 0},         /* CMD43 */
  99        {0, 0},         /* CMD44 */
 100        {0, 0},         /* CMD45 */
 101        {0, 0},         /* CMD46 */
 102        {0, 0},         /* CMD47 */
 103        {0, 0},         /* CMD48 */
 104        {0, 0},         /* CMD49 */
 105        {0, 0},         /* CMD50 */
 106        {0, 0},         /* CMD51 */
 107        {0, 0},         /* CMD52 */
 108        {0, 0},         /* CMD53 */
 109        {0, 0},         /* CMD54 */
 110        {0, 1},         /* CMD55 */
 111        {0xff, 0xff},   /* CMD56 */
 112        {0, 0},         /* CMD57 */
 113        {0, 0},         /* CMD58 */
 114        {0, 0},         /* CMD59 */
 115        {0, 0},         /* CMD60 */
 116        {0, 0},         /* CMD61 */
 117        {0, 0},         /* CMD62 */
 118        {0, 0}          /* CMD63 */
 119};
 120
 121static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
 122{
 123        struct cvm_mmc_cr_type *cr;
 124        u8 hardware_ctype, hardware_rtype;
 125        u8 desired_ctype = 0, desired_rtype = 0;
 126        struct cvm_mmc_cr_mods r;
 127
 128        cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
 129        hardware_ctype = cr->ctype;
 130        hardware_rtype = cr->rtype;
 131        if (cmd->opcode == MMC_GEN_CMD)
 132                hardware_ctype = (cmd->arg & 1) ? 1 : 2;
 133
 134        switch (mmc_cmd_type(cmd)) {
 135        case MMC_CMD_ADTC:
 136                desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
 137                break;
 138        case MMC_CMD_AC:
 139        case MMC_CMD_BC:
 140        case MMC_CMD_BCR:
 141                desired_ctype = 0;
 142                break;
 143        }
 144
 145        switch (mmc_resp_type(cmd)) {
 146        case MMC_RSP_NONE:
 147                desired_rtype = 0;
 148                break;
 149        case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
 150        case MMC_RSP_R1B:
 151                desired_rtype = 1;
 152                break;
 153        case MMC_RSP_R2:
 154                desired_rtype = 2;
 155                break;
 156        case MMC_RSP_R3: /* MMC_RSP_R4 */
 157                desired_rtype = 3;
 158                break;
 159        }
 160        r.ctype_xor = desired_ctype ^ hardware_ctype;
 161        r.rtype_xor = desired_rtype ^ hardware_rtype;
 162        return r;
 163}
 164
 165static void check_switch_errors(struct cvm_mmc_host *host)
 166{
 167        u64 emm_switch;
 168
 169        emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
 170        if (emm_switch & MIO_EMM_SWITCH_ERR0)
 171                dev_err(host->dev, "Switch power class error\n");
 172        if (emm_switch & MIO_EMM_SWITCH_ERR1)
 173                dev_err(host->dev, "Switch hs timing error\n");
 174        if (emm_switch & MIO_EMM_SWITCH_ERR2)
 175                dev_err(host->dev, "Switch bus width error\n");
 176}
 177
 178static void clear_bus_id(u64 *reg)
 179{
 180        u64 bus_id_mask = GENMASK_ULL(61, 60);
 181
 182        *reg &= ~bus_id_mask;
 183}
 184
 185static void set_bus_id(u64 *reg, int bus_id)
 186{
 187        clear_bus_id(reg);
 188        *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
 189}
 190
 191static int get_bus_id(u64 reg)
 192{
 193        return FIELD_GET(GENMASK_ULL(61, 60), reg);
 194}
 195
 196/*
 197 * We never set the switch_exe bit since that would interfere
 198 * with the commands send by the MMC core.
 199 */
 200static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
 201{
 202        int retries = 100;
 203        u64 rsp_sts;
 204        int bus_id;
 205
 206        /*
 207         * Modes setting only taken from slot 0. Work around that hardware
 208         * issue by first switching to slot 0.
 209         */
 210        bus_id = get_bus_id(emm_switch);
 211        clear_bus_id(&emm_switch);
 212        writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 213
 214        set_bus_id(&emm_switch, bus_id);
 215        writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 216
 217        /* wait for the switch to finish */
 218        do {
 219                rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 220                if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
 221                        break;
 222                udelay(10);
 223        } while (--retries);
 224
 225        check_switch_errors(host);
 226}
 227
 228static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
 229{
 230        /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
 231        u64 match = 0x3001070fffffffffull;
 232
 233        return (slot->cached_switch & match) != (new_val & match);
 234}
 235
 236static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
 237{
 238        u64 timeout;
 239
 240        if (!slot->clock)
 241                return;
 242
 243        if (ns)
 244                timeout = (slot->clock * ns) / NSEC_PER_SEC;
 245        else
 246                timeout = (slot->clock * 850ull) / 1000ull;
 247        writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
 248}
 249
 250static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
 251{
 252        struct cvm_mmc_host *host = slot->host;
 253        u64 emm_switch, wdog;
 254
 255        emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
 256        emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
 257                        MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
 258        set_bus_id(&emm_switch, slot->bus_id);
 259
 260        wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
 261        do_switch(slot->host, emm_switch);
 262
 263        slot->cached_switch = emm_switch;
 264
 265        msleep(20);
 266
 267        writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
 268}
 269
 270/* Switch to another slot if needed */
 271static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
 272{
 273        struct cvm_mmc_host *host = slot->host;
 274        struct cvm_mmc_slot *old_slot;
 275        u64 emm_sample, emm_switch;
 276
 277        if (slot->bus_id == host->last_slot)
 278                return;
 279
 280        if (host->last_slot >= 0 && host->slot[host->last_slot]) {
 281                old_slot = host->slot[host->last_slot];
 282                old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
 283                old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
 284        }
 285
 286        writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
 287        emm_switch = slot->cached_switch;
 288        set_bus_id(&emm_switch, slot->bus_id);
 289        do_switch(host, emm_switch);
 290
 291        emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
 292                     FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
 293        writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
 294
 295        host->last_slot = slot->bus_id;
 296}
 297
 298static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
 299                    u64 dbuf)
 300{
 301        struct sg_mapping_iter *smi = &host->smi;
 302        int data_len = req->data->blocks * req->data->blksz;
 303        int bytes_xfered, shift = -1;
 304        u64 dat = 0;
 305
 306        /* Auto inc from offset zero */
 307        writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
 308
 309        for (bytes_xfered = 0; bytes_xfered < data_len;) {
 310                if (smi->consumed >= smi->length) {
 311                        if (!sg_miter_next(smi))
 312                                break;
 313                        smi->consumed = 0;
 314                }
 315
 316                if (shift < 0) {
 317                        dat = readq(host->base + MIO_EMM_BUF_DAT(host));
 318                        shift = 56;
 319                }
 320
 321                while (smi->consumed < smi->length && shift >= 0) {
 322                        ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
 323                        bytes_xfered++;
 324                        smi->consumed++;
 325                        shift -= 8;
 326                }
 327        }
 328
 329        sg_miter_stop(smi);
 330        req->data->bytes_xfered = bytes_xfered;
 331        req->data->error = 0;
 332}
 333
 334static void do_write(struct mmc_request *req)
 335{
 336        req->data->bytes_xfered = req->data->blocks * req->data->blksz;
 337        req->data->error = 0;
 338}
 339
 340static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
 341                             u64 rsp_sts)
 342{
 343        u64 rsp_hi, rsp_lo;
 344
 345        if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
 346                return;
 347
 348        rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
 349
 350        switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
 351        case 1:
 352        case 3:
 353                req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
 354                req->cmd->resp[1] = 0;
 355                req->cmd->resp[2] = 0;
 356                req->cmd->resp[3] = 0;
 357                break;
 358        case 2:
 359                req->cmd->resp[3] = rsp_lo & 0xffffffff;
 360                req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
 361                rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
 362                req->cmd->resp[1] = rsp_hi & 0xffffffff;
 363                req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
 364                break;
 365        }
 366}
 367
 368static int get_dma_dir(struct mmc_data *data)
 369{
 370        return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 371}
 372
 373static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 374{
 375        data->bytes_xfered = data->blocks * data->blksz;
 376        data->error = 0;
 377        return 1;
 378}
 379
 380static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 381{
 382        u64 fifo_cfg;
 383        int count;
 384
 385        /* Check if there are any pending requests left */
 386        fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 387        count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
 388        if (count)
 389                dev_err(host->dev, "%u requests still pending\n", count);
 390
 391        data->bytes_xfered = data->blocks * data->blksz;
 392        data->error = 0;
 393
 394        /* Clear and disable FIFO */
 395        writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 396        dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 397        return 1;
 398}
 399
 400static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 401{
 402        if (host->use_sg && data->sg_len > 1)
 403                return finish_dma_sg(host, data);
 404        else
 405                return finish_dma_single(host, data);
 406}
 407
 408static int check_status(u64 rsp_sts)
 409{
 410        if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
 411            rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
 412            rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
 413                return -EILSEQ;
 414        if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
 415            rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
 416                return -ETIMEDOUT;
 417        if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
 418                return -EIO;
 419        return 0;
 420}
 421
 422/* Try to clean up failed DMA. */
 423static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
 424{
 425        u64 emm_dma;
 426
 427        emm_dma = readq(host->base + MIO_EMM_DMA(host));
 428        emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 429                   FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
 430        set_bus_id(&emm_dma, get_bus_id(rsp_sts));
 431        writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 432}
 433
 434irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
 435{
 436        struct cvm_mmc_host *host = dev_id;
 437        struct mmc_request *req;
 438        unsigned long flags = 0;
 439        u64 emm_int, rsp_sts;
 440        bool host_done;
 441
 442        if (host->need_irq_handler_lock)
 443                spin_lock_irqsave(&host->irq_handler_lock, flags);
 444        else
 445                __acquire(&host->irq_handler_lock);
 446
 447        /* Clear interrupt bits (write 1 clears ). */
 448        emm_int = readq(host->base + MIO_EMM_INT(host));
 449        writeq(emm_int, host->base + MIO_EMM_INT(host));
 450
 451        if (emm_int & MIO_EMM_INT_SWITCH_ERR)
 452                check_switch_errors(host);
 453
 454        req = host->current_req;
 455        if (!req)
 456                goto out;
 457
 458        rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 459        /*
 460         * dma_val set means DMA is still in progress. Don't touch
 461         * the request and wait for the interrupt indicating that
 462         * the DMA is finished.
 463         */
 464        if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
 465                goto out;
 466
 467        if (!host->dma_active && req->data &&
 468            (emm_int & MIO_EMM_INT_BUF_DONE)) {
 469                unsigned int type = (rsp_sts >> 7) & 3;
 470
 471                if (type == 1)
 472                        do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
 473                else if (type == 2)
 474                        do_write(req);
 475        }
 476
 477        host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
 478                    emm_int & MIO_EMM_INT_DMA_DONE ||
 479                    emm_int & MIO_EMM_INT_CMD_ERR  ||
 480                    emm_int & MIO_EMM_INT_DMA_ERR;
 481
 482        if (!(host_done && req->done))
 483                goto no_req_done;
 484
 485        req->cmd->error = check_status(rsp_sts);
 486
 487        if (host->dma_active && req->data)
 488                if (!finish_dma(host, req->data))
 489                        goto no_req_done;
 490
 491        set_cmd_response(host, req, rsp_sts);
 492        if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
 493            (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
 494                cleanup_dma(host, rsp_sts);
 495
 496        host->current_req = NULL;
 497        req->done(req);
 498
 499no_req_done:
 500        if (host->dmar_fixup_done)
 501                host->dmar_fixup_done(host);
 502        if (host_done)
 503                host->release_bus(host);
 504out:
 505        if (host->need_irq_handler_lock)
 506                spin_unlock_irqrestore(&host->irq_handler_lock, flags);
 507        else
 508                __release(&host->irq_handler_lock);
 509        return IRQ_RETVAL(emm_int != 0);
 510}
 511
 512/*
 513 * Program DMA_CFG and if needed DMA_ADR.
 514 * Returns 0 on error, DMA address otherwise.
 515 */
 516static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 517{
 518        u64 dma_cfg, addr;
 519        int count, rw;
 520
 521        count = dma_map_sg(host->dev, data->sg, data->sg_len,
 522                           get_dma_dir(data));
 523        if (!count)
 524                return 0;
 525
 526        rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 527        dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
 528                  FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
 529#ifdef __LITTLE_ENDIAN
 530        dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
 531#endif
 532        dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
 533                              (sg_dma_len(&data->sg[0]) / 8) - 1);
 534
 535        addr = sg_dma_address(&data->sg[0]);
 536        if (!host->big_dma_addr)
 537                dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
 538        writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
 539
 540        pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
 541                 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
 542
 543        if (host->big_dma_addr)
 544                writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
 545        return addr;
 546}
 547
 548/*
 549 * Queue complete sg list into the FIFO.
 550 * Returns 0 on error, 1 otherwise.
 551 */
 552static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 553{
 554        struct scatterlist *sg;
 555        u64 fifo_cmd, addr;
 556        int count, i, rw;
 557
 558        count = dma_map_sg(host->dev, data->sg, data->sg_len,
 559                           get_dma_dir(data));
 560        if (!count)
 561                return 0;
 562        if (count > 16)
 563                goto error;
 564
 565        /* Enable FIFO by removing CLR bit */
 566        writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 567
 568        for_each_sg(data->sg, sg, count, i) {
 569                /* Program DMA address */
 570                addr = sg_dma_address(sg);
 571                if (addr & 7)
 572                        goto error;
 573                writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
 574
 575                /*
 576                 * If we have scatter-gather support we also have an extra
 577                 * register for the DMA addr, so no need to check
 578                 * host->big_dma_addr here.
 579                 */
 580                rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 581                fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
 582
 583                /* enable interrupts on the last element */
 584                fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
 585                                       (i + 1 == count) ? 0 : 1);
 586
 587#ifdef __LITTLE_ENDIAN
 588                fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
 589#endif
 590                fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
 591                                       sg_dma_len(sg) / 8 - 1);
 592                /*
 593                 * The write copies the address and the command to the FIFO
 594                 * and increments the FIFO's COUNT field.
 595                 */
 596                writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
 597                pr_debug("[%s] sg_dma_len: %u  sg_elem: %d/%d\n",
 598                         (rw) ? "W" : "R", sg_dma_len(sg), i, count);
 599        }
 600
 601        /*
 602         * In difference to prepare_dma_single we don't return the
 603         * address here, as it would not make sense for scatter-gather.
 604         * The dma fixup is only required on models that don't support
 605         * scatter-gather, so that is not a problem.
 606         */
 607        return 1;
 608
 609error:
 610        WARN_ON_ONCE(1);
 611        dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 612        /* Disable FIFO */
 613        writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 614        return 0;
 615}
 616
 617static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 618{
 619        if (host->use_sg && data->sg_len > 1)
 620                return prepare_dma_sg(host, data);
 621        else
 622                return prepare_dma_single(host, data);
 623}
 624
 625static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
 626{
 627        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 628        u64 emm_dma;
 629
 630        emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 631                  FIELD_PREP(MIO_EMM_DMA_SECTOR,
 632                             mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
 633                  FIELD_PREP(MIO_EMM_DMA_RW,
 634                             (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
 635                  FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
 636                  FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
 637        set_bus_id(&emm_dma, slot->bus_id);
 638
 639        if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
 640            (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
 641                emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
 642
 643        pr_debug("[%s] blocks: %u  multi: %d\n",
 644                (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
 645                 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
 646        return emm_dma;
 647}
 648
 649static void cvm_mmc_dma_request(struct mmc_host *mmc,
 650                                struct mmc_request *mrq)
 651{
 652        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 653        struct cvm_mmc_host *host = slot->host;
 654        struct mmc_data *data;
 655        u64 emm_dma, addr;
 656
 657        if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
 658            !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
 659                dev_err(&mmc->card->dev,
 660                        "Error: cmv_mmc_dma_request no data\n");
 661                goto error;
 662        }
 663
 664        cvm_mmc_switch_to(slot);
 665
 666        data = mrq->data;
 667        pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
 668                 data->blocks, data->blksz, data->blocks * data->blksz);
 669        if (data->timeout_ns)
 670                set_wdog(slot, data->timeout_ns);
 671
 672        WARN_ON(host->current_req);
 673        host->current_req = mrq;
 674
 675        emm_dma = prepare_ext_dma(mmc, mrq);
 676        addr = prepare_dma(host, data);
 677        if (!addr) {
 678                dev_err(host->dev, "prepare_dma failed\n");
 679                goto error;
 680        }
 681
 682        host->dma_active = true;
 683        host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
 684                         MIO_EMM_INT_DMA_ERR);
 685
 686        if (host->dmar_fixup)
 687                host->dmar_fixup(host, mrq->cmd, data, addr);
 688
 689        /*
 690         * If we have a valid SD card in the slot, we set the response
 691         * bit mask to check for CRC errors and timeouts only.
 692         * Otherwise, use the default power reset value.
 693         */
 694        if (mmc_card_sd(mmc->card))
 695                writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
 696        else
 697                writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 698        writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 699        return;
 700
 701error:
 702        mrq->cmd->error = -EINVAL;
 703        if (mrq->done)
 704                mrq->done(mrq);
 705        host->release_bus(host);
 706}
 707
 708static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 709{
 710        sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
 711                       SG_MITER_ATOMIC | SG_MITER_TO_SG);
 712}
 713
 714static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 715{
 716        unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
 717        struct sg_mapping_iter *smi = &host->smi;
 718        unsigned int bytes_xfered;
 719        int shift = 56;
 720        u64 dat = 0;
 721
 722        /* Copy data to the xmit buffer before issuing the command. */
 723        sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
 724
 725        /* Auto inc from offset zero, dbuf zero */
 726        writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
 727
 728        for (bytes_xfered = 0; bytes_xfered < data_len;) {
 729                if (smi->consumed >= smi->length) {
 730                        if (!sg_miter_next(smi))
 731                                break;
 732                        smi->consumed = 0;
 733                }
 734
 735                while (smi->consumed < smi->length && shift >= 0) {
 736                        dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
 737                        bytes_xfered++;
 738                        smi->consumed++;
 739                        shift -= 8;
 740                }
 741
 742                if (shift < 0) {
 743                        writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
 744                        shift = 56;
 745                        dat = 0;
 746                }
 747        }
 748        sg_miter_stop(smi);
 749}
 750
 751static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 752{
 753        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 754        struct cvm_mmc_host *host = slot->host;
 755        struct mmc_command *cmd = mrq->cmd;
 756        struct cvm_mmc_cr_mods mods;
 757        u64 emm_cmd, rsp_sts;
 758        int retries = 100;
 759
 760        /*
 761         * Note about locking:
 762         * All MMC devices share the same bus and controller. Allow only a
 763         * single user of the bootbus/MMC bus at a time. The lock is acquired
 764         * on all entry points from the MMC layer.
 765         *
 766         * For requests the lock is only released after the completion
 767         * interrupt!
 768         */
 769        host->acquire_bus(host);
 770
 771        if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
 772            cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
 773                return cvm_mmc_dma_request(mmc, mrq);
 774
 775        cvm_mmc_switch_to(slot);
 776
 777        mods = cvm_mmc_get_cr_mods(cmd);
 778
 779        WARN_ON(host->current_req);
 780        host->current_req = mrq;
 781
 782        if (cmd->data) {
 783                if (cmd->data->flags & MMC_DATA_READ)
 784                        do_read_request(host, mrq);
 785                else
 786                        do_write_request(host, mrq);
 787
 788                if (cmd->data->timeout_ns)
 789                        set_wdog(slot, cmd->data->timeout_ns);
 790        } else
 791                set_wdog(slot, 0);
 792
 793        host->dma_active = false;
 794        host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
 795
 796        emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
 797                  FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
 798                  FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
 799                  FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
 800                  FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
 801        set_bus_id(&emm_cmd, slot->bus_id);
 802        if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
 803                emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
 804                                64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
 805
 806        writeq(0, host->base + MIO_EMM_STS_MASK(host));
 807
 808retry:
 809        rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 810        if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
 811            rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
 812            rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
 813            rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
 814                udelay(10);
 815                if (--retries)
 816                        goto retry;
 817        }
 818        if (!retries)
 819                dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
 820        writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
 821}
 822
 823static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 824{
 825        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 826        struct cvm_mmc_host *host = slot->host;
 827        int clk_period = 0, power_class = 10, bus_width = 0;
 828        u64 clock, emm_switch;
 829
 830        host->acquire_bus(host);
 831        cvm_mmc_switch_to(slot);
 832
 833        /* Set the power state */
 834        switch (ios->power_mode) {
 835        case MMC_POWER_ON:
 836                break;
 837
 838        case MMC_POWER_OFF:
 839                cvm_mmc_reset_bus(slot);
 840                if (host->global_pwr_gpiod)
 841                        host->set_shared_power(host, 0);
 842                else if (!IS_ERR(mmc->supply.vmmc))
 843                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 844                break;
 845
 846        case MMC_POWER_UP:
 847                if (host->global_pwr_gpiod)
 848                        host->set_shared_power(host, 1);
 849                else if (!IS_ERR(mmc->supply.vmmc))
 850                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 851                break;
 852        }
 853
 854        /* Convert bus width to HW definition */
 855        switch (ios->bus_width) {
 856        case MMC_BUS_WIDTH_8:
 857                bus_width = 2;
 858                break;
 859        case MMC_BUS_WIDTH_4:
 860                bus_width = 1;
 861                break;
 862        case MMC_BUS_WIDTH_1:
 863                bus_width = 0;
 864                break;
 865        }
 866
 867        /* DDR is available for 4/8 bit bus width */
 868        if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
 869                bus_width |= 4;
 870
 871        /* Change the clock frequency. */
 872        clock = ios->clock;
 873        if (clock > 52000000)
 874                clock = 52000000;
 875        slot->clock = clock;
 876
 877        if (clock)
 878                clk_period = (host->sys_freq + clock - 1) / (2 * clock);
 879
 880        emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
 881                                (ios->timing == MMC_TIMING_MMC_HS)) |
 882                     FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
 883                     FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
 884                     FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
 885                     FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
 886        set_bus_id(&emm_switch, slot->bus_id);
 887
 888        if (!switch_val_changed(slot, emm_switch))
 889                goto out;
 890
 891        set_wdog(slot, 0);
 892        do_switch(host, emm_switch);
 893        slot->cached_switch = emm_switch;
 894out:
 895        host->release_bus(host);
 896}
 897
 898static const struct mmc_host_ops cvm_mmc_ops = {
 899        .request        = cvm_mmc_request,
 900        .set_ios        = cvm_mmc_set_ios,
 901        .get_ro         = mmc_gpio_get_ro,
 902        .get_cd         = mmc_gpio_get_cd,
 903};
 904
 905static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
 906{
 907        struct mmc_host *mmc = slot->mmc;
 908
 909        clock = min(clock, mmc->f_max);
 910        clock = max(clock, mmc->f_min);
 911        slot->clock = clock;
 912}
 913
 914static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
 915{
 916        struct cvm_mmc_host *host = slot->host;
 917        u64 emm_switch;
 918
 919        /* Enable this bus slot. */
 920        host->emm_cfg |= (1ull << slot->bus_id);
 921        writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
 922        udelay(10);
 923
 924        /* Program initial clock speed and power. */
 925        cvm_mmc_set_clock(slot, slot->mmc->f_min);
 926        emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
 927        emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
 928                                 (host->sys_freq / slot->clock) / 2);
 929        emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
 930                                 (host->sys_freq / slot->clock) / 2);
 931
 932        /* Make the changes take effect on this bus slot. */
 933        set_bus_id(&emm_switch, slot->bus_id);
 934        do_switch(host, emm_switch);
 935
 936        slot->cached_switch = emm_switch;
 937
 938        /*
 939         * Set watchdog timeout value and default reset value
 940         * for the mask register. Finally, set the CARD_RCA
 941         * bit so that we can get the card address relative
 942         * to the CMD register for CMD7 transactions.
 943         */
 944        set_wdog(slot, 0);
 945        writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 946        writeq(1, host->base + MIO_EMM_RCA(host));
 947        return 0;
 948}
 949
 950static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
 951{
 952        u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
 953        struct device_node *node = dev->of_node;
 954        struct mmc_host *mmc = slot->mmc;
 955        u64 clock_period;
 956        int ret;
 957
 958        ret = of_property_read_u32(node, "reg", &id);
 959        if (ret) {
 960                dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
 961                return ret;
 962        }
 963
 964        if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
 965                dev_err(dev, "Invalid reg property on %pOF\n", node);
 966                return -EINVAL;
 967        }
 968
 969        ret = mmc_regulator_get_supply(mmc);
 970        if (ret)
 971                return ret;
 972        /*
 973         * Legacy Octeon firmware has no regulator entry, fall-back to
 974         * a hard-coded voltage to get a sane OCR.
 975         */
 976        if (IS_ERR(mmc->supply.vmmc))
 977                mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 978
 979        /* Common MMC bindings */
 980        ret = mmc_of_parse(mmc);
 981        if (ret)
 982                return ret;
 983
 984        /* Set bus width */
 985        if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
 986                of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
 987                if (bus_width == 8)
 988                        mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
 989                else if (bus_width == 4)
 990                        mmc->caps |= MMC_CAP_4_BIT_DATA;
 991        }
 992
 993        /* Set maximum and minimum frequency */
 994        if (!mmc->f_max)
 995                of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
 996        if (!mmc->f_max || mmc->f_max > 52000000)
 997                mmc->f_max = 52000000;
 998        mmc->f_min = 400000;
 999
1000        /* Sampling register settings, period in picoseconds */
1001        clock_period = 1000000000000ull / slot->host->sys_freq;
1002        of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1003        of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1004        slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1005        slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1006
1007        return id;
1008}
1009
1010int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1011{
1012        struct cvm_mmc_slot *slot;
1013        struct mmc_host *mmc;
1014        int ret, id;
1015
1016        mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1017        if (!mmc)
1018                return -ENOMEM;
1019
1020        slot = mmc_priv(mmc);
1021        slot->mmc = mmc;
1022        slot->host = host;
1023
1024        ret = cvm_mmc_of_parse(dev, slot);
1025        if (ret < 0)
1026                goto error;
1027        id = ret;
1028
1029        /* Set up host parameters */
1030        mmc->ops = &cvm_mmc_ops;
1031
1032        /*
1033         * We only have a 3.3v supply, we cannot support any
1034         * of the UHS modes. We do support the high speed DDR
1035         * modes up to 52MHz.
1036         *
1037         * Disable bounce buffers for max_segs = 1
1038         */
1039        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1040                     MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
1041                     MMC_CAP_3_3V_DDR;
1042
1043        if (host->use_sg)
1044                mmc->max_segs = 16;
1045        else
1046                mmc->max_segs = 1;
1047
1048        /* DMA size field can address up to 8 MB */
1049        mmc->max_seg_size = 8 * 1024 * 1024;
1050        mmc->max_req_size = mmc->max_seg_size;
1051        /* External DMA is in 512 byte blocks */
1052        mmc->max_blk_size = 512;
1053        /* DMA block count field is 15 bits */
1054        mmc->max_blk_count = 32767;
1055
1056        slot->clock = mmc->f_min;
1057        slot->bus_id = id;
1058        slot->cached_rca = 1;
1059
1060        host->acquire_bus(host);
1061        host->slot[id] = slot;
1062        cvm_mmc_switch_to(slot);
1063        cvm_mmc_init_lowlevel(slot);
1064        host->release_bus(host);
1065
1066        ret = mmc_add_host(mmc);
1067        if (ret) {
1068                dev_err(dev, "mmc_add_host() returned %d\n", ret);
1069                slot->host->slot[id] = NULL;
1070                goto error;
1071        }
1072        return 0;
1073
1074error:
1075        mmc_free_host(slot->mmc);
1076        return ret;
1077}
1078
1079int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1080{
1081        mmc_remove_host(slot->mmc);
1082        slot->host->slot[slot->bus_id] = NULL;
1083        mmc_free_host(slot->mmc);
1084        return 0;
1085}
1086