linux/drivers/mmc/host/cavium.c
<<
>>
Prefs
   1/*
   2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
   3 * ThunderX SOCs.
   4 *
   5 * This file is subject to the terms and conditions of the GNU General Public
   6 * License.  See the file "COPYING" in the main directory of this archive
   7 * for more details.
   8 *
   9 * Copyright (C) 2012-2017 Cavium Inc.
  10 * Authors:
  11 *   David Daney <david.daney@cavium.com>
  12 *   Peter Swain <pswain@cavium.com>
  13 *   Steven J. Hill <steven.hill@cavium.com>
  14 *   Jan Glauber <jglauber@cavium.com>
  15 */
  16#include <linux/bitfield.h>
  17#include <linux/delay.h>
  18#include <linux/dma-direction.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/mmc/mmc.h>
  23#include <linux/mmc/slot-gpio.h>
  24#include <linux/module.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/scatterlist.h>
  27#include <linux/time.h>
  28
  29#include "cavium.h"
  30
  31const char *cvm_mmc_irq_names[] = {
  32        "MMC Buffer",
  33        "MMC Command",
  34        "MMC DMA",
  35        "MMC Command Error",
  36        "MMC DMA Error",
  37        "MMC Switch",
  38        "MMC Switch Error",
  39        "MMC DMA int Fifo",
  40        "MMC DMA int",
  41};
  42
  43/*
  44 * The Cavium MMC host hardware assumes that all commands have fixed
  45 * command and response types.  These are correct if MMC devices are
  46 * being used.  However, non-MMC devices like SD use command and
  47 * response types that are unexpected by the host hardware.
  48 *
  49 * The command and response types can be overridden by supplying an
  50 * XOR value that is applied to the type.  We calculate the XOR value
  51 * from the values in this table and the flags passed from the MMC
  52 * core.
  53 */
  54static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
  55        {0, 0},         /* CMD0 */
  56        {0, 3},         /* CMD1 */
  57        {0, 2},         /* CMD2 */
  58        {0, 1},         /* CMD3 */
  59        {0, 0},         /* CMD4 */
  60        {0, 1},         /* CMD5 */
  61        {0, 1},         /* CMD6 */
  62        {0, 1},         /* CMD7 */
  63        {1, 1},         /* CMD8 */
  64        {0, 2},         /* CMD9 */
  65        {0, 2},         /* CMD10 */
  66        {1, 1},         /* CMD11 */
  67        {0, 1},         /* CMD12 */
  68        {0, 1},         /* CMD13 */
  69        {1, 1},         /* CMD14 */
  70        {0, 0},         /* CMD15 */
  71        {0, 1},         /* CMD16 */
  72        {1, 1},         /* CMD17 */
  73        {1, 1},         /* CMD18 */
  74        {3, 1},         /* CMD19 */
  75        {2, 1},         /* CMD20 */
  76        {0, 0},         /* CMD21 */
  77        {0, 0},         /* CMD22 */
  78        {0, 1},         /* CMD23 */
  79        {2, 1},         /* CMD24 */
  80        {2, 1},         /* CMD25 */
  81        {2, 1},         /* CMD26 */
  82        {2, 1},         /* CMD27 */
  83        {0, 1},         /* CMD28 */
  84        {0, 1},         /* CMD29 */
  85        {1, 1},         /* CMD30 */
  86        {1, 1},         /* CMD31 */
  87        {0, 0},         /* CMD32 */
  88        {0, 0},         /* CMD33 */
  89        {0, 0},         /* CMD34 */
  90        {0, 1},         /* CMD35 */
  91        {0, 1},         /* CMD36 */
  92        {0, 0},         /* CMD37 */
  93        {0, 1},         /* CMD38 */
  94        {0, 4},         /* CMD39 */
  95        {0, 5},         /* CMD40 */
  96        {0, 0},         /* CMD41 */
  97        {2, 1},         /* CMD42 */
  98        {0, 0},         /* CMD43 */
  99        {0, 0},         /* CMD44 */
 100        {0, 0},         /* CMD45 */
 101        {0, 0},         /* CMD46 */
 102        {0, 0},         /* CMD47 */
 103        {0, 0},         /* CMD48 */
 104        {0, 0},         /* CMD49 */
 105        {0, 0},         /* CMD50 */
 106        {0, 0},         /* CMD51 */
 107        {0, 0},         /* CMD52 */
 108        {0, 0},         /* CMD53 */
 109        {0, 0},         /* CMD54 */
 110        {0, 1},         /* CMD55 */
 111        {0xff, 0xff},   /* CMD56 */
 112        {0, 0},         /* CMD57 */
 113        {0, 0},         /* CMD58 */
 114        {0, 0},         /* CMD59 */
 115        {0, 0},         /* CMD60 */
 116        {0, 0},         /* CMD61 */
 117        {0, 0},         /* CMD62 */
 118        {0, 0}          /* CMD63 */
 119};
 120
 121static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
 122{
 123        struct cvm_mmc_cr_type *cr;
 124        u8 hardware_ctype, hardware_rtype;
 125        u8 desired_ctype = 0, desired_rtype = 0;
 126        struct cvm_mmc_cr_mods r;
 127
 128        cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
 129        hardware_ctype = cr->ctype;
 130        hardware_rtype = cr->rtype;
 131        if (cmd->opcode == MMC_GEN_CMD)
 132                hardware_ctype = (cmd->arg & 1) ? 1 : 2;
 133
 134        switch (mmc_cmd_type(cmd)) {
 135        case MMC_CMD_ADTC:
 136                desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
 137                break;
 138        case MMC_CMD_AC:
 139        case MMC_CMD_BC:
 140        case MMC_CMD_BCR:
 141                desired_ctype = 0;
 142                break;
 143        }
 144
 145        switch (mmc_resp_type(cmd)) {
 146        case MMC_RSP_NONE:
 147                desired_rtype = 0;
 148                break;
 149        case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
 150        case MMC_RSP_R1B:
 151                desired_rtype = 1;
 152                break;
 153        case MMC_RSP_R2:
 154                desired_rtype = 2;
 155                break;
 156        case MMC_RSP_R3: /* MMC_RSP_R4 */
 157                desired_rtype = 3;
 158                break;
 159        }
 160        r.ctype_xor = desired_ctype ^ hardware_ctype;
 161        r.rtype_xor = desired_rtype ^ hardware_rtype;
 162        return r;
 163}
 164
 165static void check_switch_errors(struct cvm_mmc_host *host)
 166{
 167        u64 emm_switch;
 168
 169        emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
 170        if (emm_switch & MIO_EMM_SWITCH_ERR0)
 171                dev_err(host->dev, "Switch power class error\n");
 172        if (emm_switch & MIO_EMM_SWITCH_ERR1)
 173                dev_err(host->dev, "Switch hs timing error\n");
 174        if (emm_switch & MIO_EMM_SWITCH_ERR2)
 175                dev_err(host->dev, "Switch bus width error\n");
 176}
 177
 178static void clear_bus_id(u64 *reg)
 179{
 180        u64 bus_id_mask = GENMASK_ULL(61, 60);
 181
 182        *reg &= ~bus_id_mask;
 183}
 184
 185static void set_bus_id(u64 *reg, int bus_id)
 186{
 187        clear_bus_id(reg);
 188        *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
 189}
 190
 191static int get_bus_id(u64 reg)
 192{
 193        return FIELD_GET(GENMASK_ULL(61, 60), reg);
 194}
 195
 196/*
 197 * We never set the switch_exe bit since that would interfere
 198 * with the commands send by the MMC core.
 199 */
 200static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
 201{
 202        int retries = 100;
 203        u64 rsp_sts;
 204        int bus_id;
 205
 206        /*
 207         * Modes setting only taken from slot 0. Work around that hardware
 208         * issue by first switching to slot 0.
 209         */
 210        bus_id = get_bus_id(emm_switch);
 211        clear_bus_id(&emm_switch);
 212        writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 213
 214        set_bus_id(&emm_switch, bus_id);
 215        writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
 216
 217        /* wait for the switch to finish */
 218        do {
 219                rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 220                if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
 221                        break;
 222                udelay(10);
 223        } while (--retries);
 224
 225        check_switch_errors(host);
 226}
 227
 228static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
 229{
 230        /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
 231        u64 match = 0x3001070fffffffffull;
 232
 233        return (slot->cached_switch & match) != (new_val & match);
 234}
 235
 236static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
 237{
 238        u64 timeout;
 239
 240        if (!slot->clock)
 241                return;
 242
 243        if (ns)
 244                timeout = (slot->clock * ns) / NSEC_PER_SEC;
 245        else
 246                timeout = (slot->clock * 850ull) / 1000ull;
 247        writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
 248}
 249
 250static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
 251{
 252        struct cvm_mmc_host *host = slot->host;
 253        u64 emm_switch, wdog;
 254
 255        emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
 256        emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
 257                        MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
 258        set_bus_id(&emm_switch, slot->bus_id);
 259
 260        wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
 261        do_switch(slot->host, emm_switch);
 262
 263        slot->cached_switch = emm_switch;
 264
 265        msleep(20);
 266
 267        writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
 268}
 269
 270/* Switch to another slot if needed */
 271static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
 272{
 273        struct cvm_mmc_host *host = slot->host;
 274        struct cvm_mmc_slot *old_slot;
 275        u64 emm_sample, emm_switch;
 276
 277        if (slot->bus_id == host->last_slot)
 278                return;
 279
 280        if (host->last_slot >= 0 && host->slot[host->last_slot]) {
 281                old_slot = host->slot[host->last_slot];
 282                old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
 283                old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
 284        }
 285
 286        writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
 287        emm_switch = slot->cached_switch;
 288        set_bus_id(&emm_switch, slot->bus_id);
 289        do_switch(host, emm_switch);
 290
 291        emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
 292                     FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
 293        writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
 294
 295        host->last_slot = slot->bus_id;
 296}
 297
 298static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
 299                    u64 dbuf)
 300{
 301        struct sg_mapping_iter *smi = &host->smi;
 302        int data_len = req->data->blocks * req->data->blksz;
 303        int bytes_xfered, shift = -1;
 304        u64 dat = 0;
 305
 306        /* Auto inc from offset zero */
 307        writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
 308
 309        for (bytes_xfered = 0; bytes_xfered < data_len;) {
 310                if (smi->consumed >= smi->length) {
 311                        if (!sg_miter_next(smi))
 312                                break;
 313                        smi->consumed = 0;
 314                }
 315
 316                if (shift < 0) {
 317                        dat = readq(host->base + MIO_EMM_BUF_DAT(host));
 318                        shift = 56;
 319                }
 320
 321                while (smi->consumed < smi->length && shift >= 0) {
 322                        ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
 323                        bytes_xfered++;
 324                        smi->consumed++;
 325                        shift -= 8;
 326                }
 327        }
 328
 329        sg_miter_stop(smi);
 330        req->data->bytes_xfered = bytes_xfered;
 331        req->data->error = 0;
 332}
 333
 334static void do_write(struct mmc_request *req)
 335{
 336        req->data->bytes_xfered = req->data->blocks * req->data->blksz;
 337        req->data->error = 0;
 338}
 339
 340static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
 341                             u64 rsp_sts)
 342{
 343        u64 rsp_hi, rsp_lo;
 344
 345        if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
 346                return;
 347
 348        rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
 349
 350        switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
 351        case 1:
 352        case 3:
 353                req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
 354                req->cmd->resp[1] = 0;
 355                req->cmd->resp[2] = 0;
 356                req->cmd->resp[3] = 0;
 357                break;
 358        case 2:
 359                req->cmd->resp[3] = rsp_lo & 0xffffffff;
 360                req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
 361                rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
 362                req->cmd->resp[1] = rsp_hi & 0xffffffff;
 363                req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
 364                break;
 365        }
 366}
 367
 368static int get_dma_dir(struct mmc_data *data)
 369{
 370        return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 371}
 372
 373static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 374{
 375        data->bytes_xfered = data->blocks * data->blksz;
 376        data->error = 0;
 377        dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 378        return 1;
 379}
 380
 381static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 382{
 383        u64 fifo_cfg;
 384        int count;
 385
 386        /* Check if there are any pending requests left */
 387        fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 388        count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
 389        if (count)
 390                dev_err(host->dev, "%u requests still pending\n", count);
 391
 392        data->bytes_xfered = data->blocks * data->blksz;
 393        data->error = 0;
 394
 395        /* Clear and disable FIFO */
 396        writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 397        dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 398        return 1;
 399}
 400
 401static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 402{
 403        if (host->use_sg && data->sg_len > 1)
 404                return finish_dma_sg(host, data);
 405        else
 406                return finish_dma_single(host, data);
 407}
 408
 409static int check_status(u64 rsp_sts)
 410{
 411        if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
 412            rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
 413            rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
 414                return -EILSEQ;
 415        if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
 416            rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
 417                return -ETIMEDOUT;
 418        if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
 419                return -EIO;
 420        return 0;
 421}
 422
 423/* Try to clean up failed DMA. */
 424static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
 425{
 426        u64 emm_dma;
 427
 428        emm_dma = readq(host->base + MIO_EMM_DMA(host));
 429        emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 430                   FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
 431        set_bus_id(&emm_dma, get_bus_id(rsp_sts));
 432        writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 433}
 434
 435irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
 436{
 437        struct cvm_mmc_host *host = dev_id;
 438        struct mmc_request *req;
 439        u64 emm_int, rsp_sts;
 440        bool host_done;
 441
 442        if (host->need_irq_handler_lock)
 443                spin_lock(&host->irq_handler_lock);
 444        else
 445                __acquire(&host->irq_handler_lock);
 446
 447        /* Clear interrupt bits (write 1 clears ). */
 448        emm_int = readq(host->base + MIO_EMM_INT(host));
 449        writeq(emm_int, host->base + MIO_EMM_INT(host));
 450
 451        if (emm_int & MIO_EMM_INT_SWITCH_ERR)
 452                check_switch_errors(host);
 453
 454        req = host->current_req;
 455        if (!req)
 456                goto out;
 457
 458        rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 459        /*
 460         * dma_val set means DMA is still in progress. Don't touch
 461         * the request and wait for the interrupt indicating that
 462         * the DMA is finished.
 463         */
 464        if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
 465                goto out;
 466
 467        if (!host->dma_active && req->data &&
 468            (emm_int & MIO_EMM_INT_BUF_DONE)) {
 469                unsigned int type = (rsp_sts >> 7) & 3;
 470
 471                if (type == 1)
 472                        do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
 473                else if (type == 2)
 474                        do_write(req);
 475        }
 476
 477        host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
 478                    emm_int & MIO_EMM_INT_DMA_DONE ||
 479                    emm_int & MIO_EMM_INT_CMD_ERR  ||
 480                    emm_int & MIO_EMM_INT_DMA_ERR;
 481
 482        if (!(host_done && req->done))
 483                goto no_req_done;
 484
 485        req->cmd->error = check_status(rsp_sts);
 486
 487        if (host->dma_active && req->data)
 488                if (!finish_dma(host, req->data))
 489                        goto no_req_done;
 490
 491        set_cmd_response(host, req, rsp_sts);
 492        if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
 493            (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
 494                cleanup_dma(host, rsp_sts);
 495
 496        host->current_req = NULL;
 497        req->done(req);
 498
 499no_req_done:
 500        if (host->dmar_fixup_done)
 501                host->dmar_fixup_done(host);
 502        if (host_done)
 503                host->release_bus(host);
 504out:
 505        if (host->need_irq_handler_lock)
 506                spin_unlock(&host->irq_handler_lock);
 507        else
 508                __release(&host->irq_handler_lock);
 509        return IRQ_RETVAL(emm_int != 0);
 510}
 511
 512/*
 513 * Program DMA_CFG and if needed DMA_ADR.
 514 * Returns 0 on error, DMA address otherwise.
 515 */
 516static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
 517{
 518        u64 dma_cfg, addr;
 519        int count, rw;
 520
 521        count = dma_map_sg(host->dev, data->sg, data->sg_len,
 522                           get_dma_dir(data));
 523        if (!count)
 524                return 0;
 525
 526        rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 527        dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
 528                  FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
 529#ifdef __LITTLE_ENDIAN
 530        dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
 531#endif
 532        dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
 533                              (sg_dma_len(&data->sg[0]) / 8) - 1);
 534
 535        addr = sg_dma_address(&data->sg[0]);
 536        if (!host->big_dma_addr)
 537                dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
 538        writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
 539
 540        pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
 541                 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
 542
 543        if (host->big_dma_addr)
 544                writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
 545        return addr;
 546}
 547
 548/*
 549 * Queue complete sg list into the FIFO.
 550 * Returns 0 on error, 1 otherwise.
 551 */
 552static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
 553{
 554        struct scatterlist *sg;
 555        u64 fifo_cmd, addr;
 556        int count, i, rw;
 557
 558        count = dma_map_sg(host->dev, data->sg, data->sg_len,
 559                           get_dma_dir(data));
 560        if (!count)
 561                return 0;
 562        if (count > 16)
 563                goto error;
 564
 565        /* Enable FIFO by removing CLR bit */
 566        writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 567
 568        for_each_sg(data->sg, sg, count, i) {
 569                /* Program DMA address */
 570                addr = sg_dma_address(sg);
 571                if (addr & 7)
 572                        goto error;
 573                writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
 574
 575                /*
 576                 * If we have scatter-gather support we also have an extra
 577                 * register for the DMA addr, so no need to check
 578                 * host->big_dma_addr here.
 579                 */
 580                rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
 581                fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
 582
 583                /* enable interrupts on the last element */
 584                fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
 585                                       (i + 1 == count) ? 0 : 1);
 586
 587#ifdef __LITTLE_ENDIAN
 588                fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
 589#endif
 590                fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
 591                                       sg_dma_len(sg) / 8 - 1);
 592                /*
 593                 * The write copies the address and the command to the FIFO
 594                 * and increments the FIFO's COUNT field.
 595                 */
 596                writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
 597                pr_debug("[%s] sg_dma_len: %u  sg_elem: %d/%d\n",
 598                         (rw) ? "W" : "R", sg_dma_len(sg), i, count);
 599        }
 600
 601        /*
 602         * In difference to prepare_dma_single we don't return the
 603         * address here, as it would not make sense for scatter-gather.
 604         * The dma fixup is only required on models that don't support
 605         * scatter-gather, so that is not a problem.
 606         */
 607        return 1;
 608
 609error:
 610        WARN_ON_ONCE(1);
 611        dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
 612        /* Disable FIFO */
 613        writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
 614        return 0;
 615}
 616
 617static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
 618{
 619        if (host->use_sg && data->sg_len > 1)
 620                return prepare_dma_sg(host, data);
 621        else
 622                return prepare_dma_single(host, data);
 623}
 624
 625static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
 626{
 627        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 628        u64 emm_dma;
 629
 630        emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
 631                  FIELD_PREP(MIO_EMM_DMA_SECTOR,
 632                             mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
 633                  FIELD_PREP(MIO_EMM_DMA_RW,
 634                             (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
 635                  FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
 636                  FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
 637        set_bus_id(&emm_dma, slot->bus_id);
 638
 639        if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
 640            (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
 641                emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
 642
 643        pr_debug("[%s] blocks: %u  multi: %d\n",
 644                (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
 645                 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
 646        return emm_dma;
 647}
 648
 649static void cvm_mmc_dma_request(struct mmc_host *mmc,
 650                                struct mmc_request *mrq)
 651{
 652        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 653        struct cvm_mmc_host *host = slot->host;
 654        struct mmc_data *data;
 655        u64 emm_dma, addr;
 656
 657        if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
 658            !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
 659                dev_err(&mmc->card->dev, "Error: %s no data\n", __func__);
 660                goto error;
 661        }
 662
 663        cvm_mmc_switch_to(slot);
 664
 665        data = mrq->data;
 666        pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
 667                 data->blocks, data->blksz, data->blocks * data->blksz);
 668        if (data->timeout_ns)
 669                set_wdog(slot, data->timeout_ns);
 670
 671        WARN_ON(host->current_req);
 672        host->current_req = mrq;
 673
 674        emm_dma = prepare_ext_dma(mmc, mrq);
 675        addr = prepare_dma(host, data);
 676        if (!addr) {
 677                dev_err(host->dev, "prepare_dma failed\n");
 678                goto error;
 679        }
 680
 681        host->dma_active = true;
 682        host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
 683                         MIO_EMM_INT_DMA_ERR);
 684
 685        if (host->dmar_fixup)
 686                host->dmar_fixup(host, mrq->cmd, data, addr);
 687
 688        /*
 689         * If we have a valid SD card in the slot, we set the response
 690         * bit mask to check for CRC errors and timeouts only.
 691         * Otherwise, use the default power reset value.
 692         */
 693        if (mmc_card_sd(mmc->card))
 694                writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
 695        else
 696                writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 697        writeq(emm_dma, host->base + MIO_EMM_DMA(host));
 698        return;
 699
 700error:
 701        mrq->cmd->error = -EINVAL;
 702        if (mrq->done)
 703                mrq->done(mrq);
 704        host->release_bus(host);
 705}
 706
 707static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 708{
 709        sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
 710                       SG_MITER_ATOMIC | SG_MITER_TO_SG);
 711}
 712
 713static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
 714{
 715        unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
 716        struct sg_mapping_iter *smi = &host->smi;
 717        unsigned int bytes_xfered;
 718        int shift = 56;
 719        u64 dat = 0;
 720
 721        /* Copy data to the xmit buffer before issuing the command. */
 722        sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
 723
 724        /* Auto inc from offset zero, dbuf zero */
 725        writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
 726
 727        for (bytes_xfered = 0; bytes_xfered < data_len;) {
 728                if (smi->consumed >= smi->length) {
 729                        if (!sg_miter_next(smi))
 730                                break;
 731                        smi->consumed = 0;
 732                }
 733
 734                while (smi->consumed < smi->length && shift >= 0) {
 735                        dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
 736                        bytes_xfered++;
 737                        smi->consumed++;
 738                        shift -= 8;
 739                }
 740
 741                if (shift < 0) {
 742                        writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
 743                        shift = 56;
 744                        dat = 0;
 745                }
 746        }
 747        sg_miter_stop(smi);
 748}
 749
 750static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 751{
 752        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 753        struct cvm_mmc_host *host = slot->host;
 754        struct mmc_command *cmd = mrq->cmd;
 755        struct cvm_mmc_cr_mods mods;
 756        u64 emm_cmd, rsp_sts;
 757        int retries = 100;
 758
 759        /*
 760         * Note about locking:
 761         * All MMC devices share the same bus and controller. Allow only a
 762         * single user of the bootbus/MMC bus at a time. The lock is acquired
 763         * on all entry points from the MMC layer.
 764         *
 765         * For requests the lock is only released after the completion
 766         * interrupt!
 767         */
 768        host->acquire_bus(host);
 769
 770        if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
 771            cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
 772                return cvm_mmc_dma_request(mmc, mrq);
 773
 774        cvm_mmc_switch_to(slot);
 775
 776        mods = cvm_mmc_get_cr_mods(cmd);
 777
 778        WARN_ON(host->current_req);
 779        host->current_req = mrq;
 780
 781        if (cmd->data) {
 782                if (cmd->data->flags & MMC_DATA_READ)
 783                        do_read_request(host, mrq);
 784                else
 785                        do_write_request(host, mrq);
 786
 787                if (cmd->data->timeout_ns)
 788                        set_wdog(slot, cmd->data->timeout_ns);
 789        } else
 790                set_wdog(slot, 0);
 791
 792        host->dma_active = false;
 793        host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
 794
 795        emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
 796                  FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
 797                  FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
 798                  FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
 799                  FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
 800        set_bus_id(&emm_cmd, slot->bus_id);
 801        if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
 802                emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
 803                                64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
 804
 805        writeq(0, host->base + MIO_EMM_STS_MASK(host));
 806
 807retry:
 808        rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
 809        if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
 810            rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
 811            rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
 812            rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
 813                udelay(10);
 814                if (--retries)
 815                        goto retry;
 816        }
 817        if (!retries)
 818                dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
 819        writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
 820}
 821
 822static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 823{
 824        struct cvm_mmc_slot *slot = mmc_priv(mmc);
 825        struct cvm_mmc_host *host = slot->host;
 826        int clk_period = 0, power_class = 10, bus_width = 0;
 827        u64 clock, emm_switch;
 828
 829        host->acquire_bus(host);
 830        cvm_mmc_switch_to(slot);
 831
 832        /* Set the power state */
 833        switch (ios->power_mode) {
 834        case MMC_POWER_ON:
 835                break;
 836
 837        case MMC_POWER_OFF:
 838                cvm_mmc_reset_bus(slot);
 839                if (host->global_pwr_gpiod)
 840                        host->set_shared_power(host, 0);
 841                else if (!IS_ERR(mmc->supply.vmmc))
 842                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 843                break;
 844
 845        case MMC_POWER_UP:
 846                if (host->global_pwr_gpiod)
 847                        host->set_shared_power(host, 1);
 848                else if (!IS_ERR(mmc->supply.vmmc))
 849                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 850                break;
 851        }
 852
 853        /* Convert bus width to HW definition */
 854        switch (ios->bus_width) {
 855        case MMC_BUS_WIDTH_8:
 856                bus_width = 2;
 857                break;
 858        case MMC_BUS_WIDTH_4:
 859                bus_width = 1;
 860                break;
 861        case MMC_BUS_WIDTH_1:
 862                bus_width = 0;
 863                break;
 864        }
 865
 866        /* DDR is available for 4/8 bit bus width */
 867        if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
 868                bus_width |= 4;
 869
 870        /* Change the clock frequency. */
 871        clock = ios->clock;
 872        if (clock > 52000000)
 873                clock = 52000000;
 874        slot->clock = clock;
 875
 876        if (clock)
 877                clk_period = (host->sys_freq + clock - 1) / (2 * clock);
 878
 879        emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
 880                                (ios->timing == MMC_TIMING_MMC_HS)) |
 881                     FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
 882                     FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
 883                     FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
 884                     FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
 885        set_bus_id(&emm_switch, slot->bus_id);
 886
 887        if (!switch_val_changed(slot, emm_switch))
 888                goto out;
 889
 890        set_wdog(slot, 0);
 891        do_switch(host, emm_switch);
 892        slot->cached_switch = emm_switch;
 893out:
 894        host->release_bus(host);
 895}
 896
 897static const struct mmc_host_ops cvm_mmc_ops = {
 898        .request        = cvm_mmc_request,
 899        .set_ios        = cvm_mmc_set_ios,
 900        .get_ro         = mmc_gpio_get_ro,
 901        .get_cd         = mmc_gpio_get_cd,
 902};
 903
 904static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
 905{
 906        struct mmc_host *mmc = slot->mmc;
 907
 908        clock = min(clock, mmc->f_max);
 909        clock = max(clock, mmc->f_min);
 910        slot->clock = clock;
 911}
 912
 913static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
 914{
 915        struct cvm_mmc_host *host = slot->host;
 916        u64 emm_switch;
 917
 918        /* Enable this bus slot. */
 919        host->emm_cfg |= (1ull << slot->bus_id);
 920        writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
 921        udelay(10);
 922
 923        /* Program initial clock speed and power. */
 924        cvm_mmc_set_clock(slot, slot->mmc->f_min);
 925        emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
 926        emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
 927                                 (host->sys_freq / slot->clock) / 2);
 928        emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
 929                                 (host->sys_freq / slot->clock) / 2);
 930
 931        /* Make the changes take effect on this bus slot. */
 932        set_bus_id(&emm_switch, slot->bus_id);
 933        do_switch(host, emm_switch);
 934
 935        slot->cached_switch = emm_switch;
 936
 937        /*
 938         * Set watchdog timeout value and default reset value
 939         * for the mask register. Finally, set the CARD_RCA
 940         * bit so that we can get the card address relative
 941         * to the CMD register for CMD7 transactions.
 942         */
 943        set_wdog(slot, 0);
 944        writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
 945        writeq(1, host->base + MIO_EMM_RCA(host));
 946        return 0;
 947}
 948
 949static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
 950{
 951        u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
 952        struct device_node *node = dev->of_node;
 953        struct mmc_host *mmc = slot->mmc;
 954        u64 clock_period;
 955        int ret;
 956
 957        ret = of_property_read_u32(node, "reg", &id);
 958        if (ret) {
 959                dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
 960                return ret;
 961        }
 962
 963        if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
 964                dev_err(dev, "Invalid reg property on %pOF\n", node);
 965                return -EINVAL;
 966        }
 967
 968        ret = mmc_regulator_get_supply(mmc);
 969        if (ret)
 970                return ret;
 971        /*
 972         * Legacy Octeon firmware has no regulator entry, fall-back to
 973         * a hard-coded voltage to get a sane OCR.
 974         */
 975        if (IS_ERR(mmc->supply.vmmc))
 976                mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 977
 978        /* Common MMC bindings */
 979        ret = mmc_of_parse(mmc);
 980        if (ret)
 981                return ret;
 982
 983        /* Set bus width */
 984        if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
 985                of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
 986                if (bus_width == 8)
 987                        mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
 988                else if (bus_width == 4)
 989                        mmc->caps |= MMC_CAP_4_BIT_DATA;
 990        }
 991
 992        /* Set maximum and minimum frequency */
 993        if (!mmc->f_max)
 994                of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
 995        if (!mmc->f_max || mmc->f_max > 52000000)
 996                mmc->f_max = 52000000;
 997        mmc->f_min = 400000;
 998
 999        /* Sampling register settings, period in picoseconds */
1000        clock_period = 1000000000000ull / slot->host->sys_freq;
1001        of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1002        of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1003        slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1004        slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1005
1006        return id;
1007}
1008
1009int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1010{
1011        struct cvm_mmc_slot *slot;
1012        struct mmc_host *mmc;
1013        int ret, id;
1014
1015        mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1016        if (!mmc)
1017                return -ENOMEM;
1018
1019        slot = mmc_priv(mmc);
1020        slot->mmc = mmc;
1021        slot->host = host;
1022
1023        ret = cvm_mmc_of_parse(dev, slot);
1024        if (ret < 0)
1025                goto error;
1026        id = ret;
1027
1028        /* Set up host parameters */
1029        mmc->ops = &cvm_mmc_ops;
1030
1031        /*
1032         * We only have a 3.3v supply, we cannot support any
1033         * of the UHS modes. We do support the high speed DDR
1034         * modes up to 52MHz.
1035         *
1036         * Disable bounce buffers for max_segs = 1
1037         */
1038        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1039                     MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
1040
1041        if (host->use_sg)
1042                mmc->max_segs = 16;
1043        else
1044                mmc->max_segs = 1;
1045
1046        /* DMA size field can address up to 8 MB */
1047        mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1048                                  dma_get_max_seg_size(host->dev));
1049        mmc->max_req_size = mmc->max_seg_size;
1050        /* External DMA is in 512 byte blocks */
1051        mmc->max_blk_size = 512;
1052        /* DMA block count field is 15 bits */
1053        mmc->max_blk_count = 32767;
1054
1055        slot->clock = mmc->f_min;
1056        slot->bus_id = id;
1057        slot->cached_rca = 1;
1058
1059        host->acquire_bus(host);
1060        host->slot[id] = slot;
1061        cvm_mmc_switch_to(slot);
1062        cvm_mmc_init_lowlevel(slot);
1063        host->release_bus(host);
1064
1065        ret = mmc_add_host(mmc);
1066        if (ret) {
1067                dev_err(dev, "mmc_add_host() returned %d\n", ret);
1068                slot->host->slot[id] = NULL;
1069                goto error;
1070        }
1071        return 0;
1072
1073error:
1074        mmc_free_host(slot->mmc);
1075        return ret;
1076}
1077
1078int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1079{
1080        mmc_remove_host(slot->mmc);
1081        slot->host->slot[slot->bus_id] = NULL;
1082        mmc_free_host(slot->mmc);
1083        return 0;
1084}
1085