linux/drivers/mmc/core/mmc_ops.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  linux/drivers/mmc/core/mmc_ops.h
   4 *
   5 *  Copyright 2006-2007 Pierre Ossman
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/export.h>
  10#include <linux/types.h>
  11#include <linux/scatterlist.h>
  12
  13#include <linux/mmc/host.h>
  14#include <linux/mmc/card.h>
  15#include <linux/mmc/mmc.h>
  16
  17#include "core.h"
  18#include "card.h"
  19#include "host.h"
  20#include "mmc_ops.h"
  21
  22#define MMC_BKOPS_TIMEOUT_MS            (120 * 1000) /* 120s */
  23#define MMC_SANITIZE_TIMEOUT_MS         (240 * 1000) /* 240s */
  24
  25static const u8 tuning_blk_pattern_4bit[] = {
  26        0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  27        0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  28        0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  29        0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  30        0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  31        0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  32        0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  33        0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  34};
  35
  36static const u8 tuning_blk_pattern_8bit[] = {
  37        0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  38        0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  39        0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  40        0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  41        0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  42        0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  43        0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  44        0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  45        0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  46        0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  47        0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  48        0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  49        0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  50        0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  51        0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  52        0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  53};
  54
  55struct mmc_busy_data {
  56        struct mmc_card *card;
  57        bool retry_crc_err;
  58        enum mmc_busy_cmd busy_cmd;
  59};
  60
  61int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  62{
  63        int err;
  64        struct mmc_command cmd = {};
  65
  66        cmd.opcode = MMC_SEND_STATUS;
  67        if (!mmc_host_is_spi(card->host))
  68                cmd.arg = card->rca << 16;
  69        cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  70
  71        err = mmc_wait_for_cmd(card->host, &cmd, retries);
  72        if (err)
  73                return err;
  74
  75        /* NOTE: callers are required to understand the difference
  76         * between "native" and SPI format status words!
  77         */
  78        if (status)
  79                *status = cmd.resp[0];
  80
  81        return 0;
  82}
  83EXPORT_SYMBOL_GPL(__mmc_send_status);
  84
  85int mmc_send_status(struct mmc_card *card, u32 *status)
  86{
  87        return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  88}
  89EXPORT_SYMBOL_GPL(mmc_send_status);
  90
  91static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  92{
  93        struct mmc_command cmd = {};
  94
  95        cmd.opcode = MMC_SELECT_CARD;
  96
  97        if (card) {
  98                cmd.arg = card->rca << 16;
  99                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 100        } else {
 101                cmd.arg = 0;
 102                cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 103        }
 104
 105        return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 106}
 107
 108int mmc_select_card(struct mmc_card *card)
 109{
 110
 111        return _mmc_select_card(card->host, card);
 112}
 113
 114int mmc_deselect_cards(struct mmc_host *host)
 115{
 116        return _mmc_select_card(host, NULL);
 117}
 118
 119/*
 120 * Write the value specified in the device tree or board code into the optional
 121 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 122 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 123 * value is hardware dependant.
 124 * The presence of the DSR register can be determined from the CSD register,
 125 * bit 76.
 126 */
 127int mmc_set_dsr(struct mmc_host *host)
 128{
 129        struct mmc_command cmd = {};
 130
 131        cmd.opcode = MMC_SET_DSR;
 132
 133        cmd.arg = (host->dsr << 16) | 0xffff;
 134        cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 135
 136        return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 137}
 138
 139int mmc_go_idle(struct mmc_host *host)
 140{
 141        int err;
 142        struct mmc_command cmd = {};
 143
 144        /*
 145         * Non-SPI hosts need to prevent chipselect going active during
 146         * GO_IDLE; that would put chips into SPI mode.  Remind them of
 147         * that in case of hardware that won't pull up DAT3/nCS otherwise.
 148         *
 149         * SPI hosts ignore ios.chip_select; it's managed according to
 150         * rules that must accommodate non-MMC slaves which this layer
 151         * won't even know about.
 152         */
 153        if (!mmc_host_is_spi(host)) {
 154                mmc_set_chip_select(host, MMC_CS_HIGH);
 155                mmc_delay(1);
 156        }
 157
 158        cmd.opcode = MMC_GO_IDLE_STATE;
 159        cmd.arg = 0;
 160        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 161
 162        err = mmc_wait_for_cmd(host, &cmd, 0);
 163
 164        mmc_delay(1);
 165
 166        if (!mmc_host_is_spi(host)) {
 167                mmc_set_chip_select(host, MMC_CS_DONTCARE);
 168                mmc_delay(1);
 169        }
 170
 171        host->use_spi_crc = 0;
 172
 173        return err;
 174}
 175
 176int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 177{
 178        struct mmc_command cmd = {};
 179        int i, err = 0;
 180
 181        cmd.opcode = MMC_SEND_OP_COND;
 182        cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 183        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 184
 185        for (i = 100; i; i--) {
 186                err = mmc_wait_for_cmd(host, &cmd, 0);
 187                if (err)
 188                        break;
 189
 190                /* wait until reset completes */
 191                if (mmc_host_is_spi(host)) {
 192                        if (!(cmd.resp[0] & R1_SPI_IDLE))
 193                                break;
 194                } else {
 195                        if (cmd.resp[0] & MMC_CARD_BUSY)
 196                                break;
 197                }
 198
 199                err = -ETIMEDOUT;
 200
 201                mmc_delay(10);
 202
 203                /*
 204                 * According to eMMC specification v5.1 section 6.4.3, we
 205                 * should issue CMD1 repeatedly in the idle state until
 206                 * the eMMC is ready. Otherwise some eMMC devices seem to enter
 207                 * the inactive mode after mmc_init_card() issued CMD0 when
 208                 * the eMMC device is busy.
 209                 */
 210                if (!ocr && !mmc_host_is_spi(host))
 211                        cmd.arg = cmd.resp[0] | BIT(30);
 212        }
 213
 214        if (rocr && !mmc_host_is_spi(host))
 215                *rocr = cmd.resp[0];
 216
 217        return err;
 218}
 219
 220int mmc_set_relative_addr(struct mmc_card *card)
 221{
 222        struct mmc_command cmd = {};
 223
 224        cmd.opcode = MMC_SET_RELATIVE_ADDR;
 225        cmd.arg = card->rca << 16;
 226        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 227
 228        return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 229}
 230
 231static int
 232mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 233{
 234        int err;
 235        struct mmc_command cmd = {};
 236
 237        cmd.opcode = opcode;
 238        cmd.arg = arg;
 239        cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 240
 241        err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 242        if (err)
 243                return err;
 244
 245        memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 246
 247        return 0;
 248}
 249
 250/*
 251 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 252 * buffer or on-stack buffer (with some overhead in callee).
 253 */
 254int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
 255                       u32 args, void *buf, unsigned len)
 256{
 257        struct mmc_request mrq = {};
 258        struct mmc_command cmd = {};
 259        struct mmc_data data = {};
 260        struct scatterlist sg;
 261
 262        mrq.cmd = &cmd;
 263        mrq.data = &data;
 264
 265        cmd.opcode = opcode;
 266        cmd.arg = args;
 267
 268        /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 269         * rely on callers to never use this with "native" calls for reading
 270         * CSD or CID.  Native versions of those commands use the R2 type,
 271         * not R1 plus a data block.
 272         */
 273        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 274
 275        data.blksz = len;
 276        data.blocks = 1;
 277        data.flags = MMC_DATA_READ;
 278        data.sg = &sg;
 279        data.sg_len = 1;
 280
 281        sg_init_one(&sg, buf, len);
 282
 283        if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 284                /*
 285                 * The spec states that CSR and CID accesses have a timeout
 286                 * of 64 clock cycles.
 287                 */
 288                data.timeout_ns = 0;
 289                data.timeout_clks = 64;
 290        } else
 291                mmc_set_data_timeout(&data, card);
 292
 293        mmc_wait_for_req(host, &mrq);
 294
 295        if (cmd.error)
 296                return cmd.error;
 297        if (data.error)
 298                return data.error;
 299
 300        return 0;
 301}
 302
 303static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
 304{
 305        int ret, i;
 306        __be32 *cxd_tmp;
 307
 308        cxd_tmp = kzalloc(16, GFP_KERNEL);
 309        if (!cxd_tmp)
 310                return -ENOMEM;
 311
 312        ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
 313        if (ret)
 314                goto err;
 315
 316        for (i = 0; i < 4; i++)
 317                cxd[i] = be32_to_cpu(cxd_tmp[i]);
 318
 319err:
 320        kfree(cxd_tmp);
 321        return ret;
 322}
 323
 324int mmc_send_csd(struct mmc_card *card, u32 *csd)
 325{
 326        if (mmc_host_is_spi(card->host))
 327                return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
 328
 329        return mmc_send_cxd_native(card->host, card->rca << 16, csd,
 330                                MMC_SEND_CSD);
 331}
 332
 333int mmc_send_cid(struct mmc_host *host, u32 *cid)
 334{
 335        if (mmc_host_is_spi(host))
 336                return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
 337
 338        return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 339}
 340
 341int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 342{
 343        int err;
 344        u8 *ext_csd;
 345
 346        if (!card || !new_ext_csd)
 347                return -EINVAL;
 348
 349        if (!mmc_can_ext_csd(card))
 350                return -EOPNOTSUPP;
 351
 352        /*
 353         * As the ext_csd is so large and mostly unused, we don't store the
 354         * raw block in mmc_card.
 355         */
 356        ext_csd = kzalloc(512, GFP_KERNEL);
 357        if (!ext_csd)
 358                return -ENOMEM;
 359
 360        err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
 361                                512);
 362        if (err)
 363                kfree(ext_csd);
 364        else
 365                *new_ext_csd = ext_csd;
 366
 367        return err;
 368}
 369EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 370
 371int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 372{
 373        struct mmc_command cmd = {};
 374        int err;
 375
 376        cmd.opcode = MMC_SPI_READ_OCR;
 377        cmd.arg = highcap ? (1 << 30) : 0;
 378        cmd.flags = MMC_RSP_SPI_R3;
 379
 380        err = mmc_wait_for_cmd(host, &cmd, 0);
 381
 382        *ocrp = cmd.resp[1];
 383        return err;
 384}
 385
 386int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 387{
 388        struct mmc_command cmd = {};
 389        int err;
 390
 391        cmd.opcode = MMC_SPI_CRC_ON_OFF;
 392        cmd.flags = MMC_RSP_SPI_R1;
 393        cmd.arg = use_crc;
 394
 395        err = mmc_wait_for_cmd(host, &cmd, 0);
 396        if (!err)
 397                host->use_spi_crc = use_crc;
 398        return err;
 399}
 400
 401static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 402{
 403        if (mmc_host_is_spi(host)) {
 404                if (status & R1_SPI_ILLEGAL_COMMAND)
 405                        return -EBADMSG;
 406        } else {
 407                if (R1_STATUS(status))
 408                        pr_warn("%s: unexpected status %#x after switch\n",
 409                                mmc_hostname(host), status);
 410                if (status & R1_SWITCH_ERROR)
 411                        return -EBADMSG;
 412        }
 413        return 0;
 414}
 415
 416/* Caller must hold re-tuning */
 417int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 418{
 419        u32 status;
 420        int err;
 421
 422        err = mmc_send_status(card, &status);
 423        if (!crc_err_fatal && err == -EILSEQ)
 424                return 0;
 425        if (err)
 426                return err;
 427
 428        return mmc_switch_status_error(card->host, status);
 429}
 430
 431static int mmc_busy_cb(void *cb_data, bool *busy)
 432{
 433        struct mmc_busy_data *data = cb_data;
 434        struct mmc_host *host = data->card->host;
 435        u32 status = 0;
 436        int err;
 437
 438        if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
 439                *busy = host->ops->card_busy(host);
 440                return 0;
 441        }
 442
 443        err = mmc_send_status(data->card, &status);
 444        if (data->retry_crc_err && err == -EILSEQ) {
 445                *busy = true;
 446                return 0;
 447        }
 448        if (err)
 449                return err;
 450
 451        switch (data->busy_cmd) {
 452        case MMC_BUSY_CMD6:
 453                err = mmc_switch_status_error(host, status);
 454                break;
 455        case MMC_BUSY_ERASE:
 456                err = R1_STATUS(status) ? -EIO : 0;
 457                break;
 458        case MMC_BUSY_HPI:
 459        case MMC_BUSY_EXTR_SINGLE:
 460        case MMC_BUSY_IO:
 461                break;
 462        default:
 463                err = -EINVAL;
 464        }
 465
 466        if (err)
 467                return err;
 468
 469        *busy = !mmc_ready_for_data(status);
 470        return 0;
 471}
 472
 473int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 474                        int (*busy_cb)(void *cb_data, bool *busy),
 475                        void *cb_data)
 476{
 477        struct mmc_host *host = card->host;
 478        int err;
 479        unsigned long timeout;
 480        unsigned int udelay = 32, udelay_max = 32768;
 481        bool expired = false;
 482        bool busy = false;
 483
 484        timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 485        do {
 486                /*
 487                 * Due to the possibility of being preempted while polling,
 488                 * check the expiration time first.
 489                 */
 490                expired = time_after(jiffies, timeout);
 491
 492                err = (*busy_cb)(cb_data, &busy);
 493                if (err)
 494                        return err;
 495
 496                /* Timeout if the device still remains busy. */
 497                if (expired && busy) {
 498                        pr_err("%s: Card stuck being busy! %s\n",
 499                                mmc_hostname(host), __func__);
 500                        return -ETIMEDOUT;
 501                }
 502
 503                /* Throttle the polling rate to avoid hogging the CPU. */
 504                if (busy) {
 505                        usleep_range(udelay, udelay * 2);
 506                        if (udelay < udelay_max)
 507                                udelay *= 2;
 508                }
 509        } while (busy);
 510
 511        return 0;
 512}
 513EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
 514
 515int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 516                      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
 517{
 518        struct mmc_busy_data cb_data;
 519
 520        cb_data.card = card;
 521        cb_data.retry_crc_err = retry_crc_err;
 522        cb_data.busy_cmd = busy_cmd;
 523
 524        return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
 525}
 526EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
 527
 528bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
 529                          unsigned int timeout_ms)
 530{
 531        /*
 532         * If the max_busy_timeout of the host is specified, make sure it's
 533         * enough to fit the used timeout_ms. In case it's not, let's instruct
 534         * the host to avoid HW busy detection, by converting to a R1 response
 535         * instead of a R1B. Note, some hosts requires R1B, which also means
 536         * they are on their own when it comes to deal with the busy timeout.
 537         */
 538        if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
 539            (timeout_ms > host->max_busy_timeout)) {
 540                cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
 541                return false;
 542        }
 543
 544        cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 545        cmd->busy_timeout = timeout_ms;
 546        return true;
 547}
 548
 549/**
 550 *      __mmc_switch - modify EXT_CSD register
 551 *      @card: the MMC card associated with the data transfer
 552 *      @set: cmd set values
 553 *      @index: EXT_CSD register index
 554 *      @value: value to program into EXT_CSD register
 555 *      @timeout_ms: timeout (ms) for operation performed by register write,
 556 *                   timeout of zero implies maximum possible timeout
 557 *      @timing: new timing to change to
 558 *      @send_status: send status cmd to poll for busy
 559 *      @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 560 *      @retries: number of retries
 561 *
 562 *      Modifies the EXT_CSD register for selected card.
 563 */
 564int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 565                unsigned int timeout_ms, unsigned char timing,
 566                bool send_status, bool retry_crc_err, unsigned int retries)
 567{
 568        struct mmc_host *host = card->host;
 569        int err;
 570        struct mmc_command cmd = {};
 571        bool use_r1b_resp;
 572        unsigned char old_timing = host->ios.timing;
 573
 574        mmc_retune_hold(host);
 575
 576        if (!timeout_ms) {
 577                pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
 578                        mmc_hostname(host));
 579                timeout_ms = card->ext_csd.generic_cmd6_time;
 580        }
 581
 582        cmd.opcode = MMC_SWITCH;
 583        cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 584                  (index << 16) |
 585                  (value << 8) |
 586                  set;
 587        use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
 588
 589        err = mmc_wait_for_cmd(host, &cmd, retries);
 590        if (err)
 591                goto out;
 592
 593        /*If SPI or used HW busy detection above, then we don't need to poll. */
 594        if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 595                mmc_host_is_spi(host))
 596                goto out_tim;
 597
 598        /*
 599         * If the host doesn't support HW polling via the ->card_busy() ops and
 600         * when it's not allowed to poll by using CMD13, then we need to rely on
 601         * waiting the stated timeout to be sufficient.
 602         */
 603        if (!send_status && !host->ops->card_busy) {
 604                mmc_delay(timeout_ms);
 605                goto out_tim;
 606        }
 607
 608        /* Let's try to poll to find out when the command is completed. */
 609        err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
 610        if (err)
 611                goto out;
 612
 613out_tim:
 614        /* Switch to new timing before check switch status. */
 615        if (timing)
 616                mmc_set_timing(host, timing);
 617
 618        if (send_status) {
 619                err = mmc_switch_status(card, true);
 620                if (err && timing)
 621                        mmc_set_timing(host, old_timing);
 622        }
 623out:
 624        mmc_retune_release(host);
 625
 626        return err;
 627}
 628
 629int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 630                unsigned int timeout_ms)
 631{
 632        return __mmc_switch(card, set, index, value, timeout_ms, 0,
 633                            true, false, MMC_CMD_RETRIES);
 634}
 635EXPORT_SYMBOL_GPL(mmc_switch);
 636
 637int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 638{
 639        struct mmc_request mrq = {};
 640        struct mmc_command cmd = {};
 641        struct mmc_data data = {};
 642        struct scatterlist sg;
 643        struct mmc_ios *ios = &host->ios;
 644        const u8 *tuning_block_pattern;
 645        int size, err = 0;
 646        u8 *data_buf;
 647
 648        if (ios->bus_width == MMC_BUS_WIDTH_8) {
 649                tuning_block_pattern = tuning_blk_pattern_8bit;
 650                size = sizeof(tuning_blk_pattern_8bit);
 651        } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 652                tuning_block_pattern = tuning_blk_pattern_4bit;
 653                size = sizeof(tuning_blk_pattern_4bit);
 654        } else
 655                return -EINVAL;
 656
 657        data_buf = kzalloc(size, GFP_KERNEL);
 658        if (!data_buf)
 659                return -ENOMEM;
 660
 661        mrq.cmd = &cmd;
 662        mrq.data = &data;
 663
 664        cmd.opcode = opcode;
 665        cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 666
 667        data.blksz = size;
 668        data.blocks = 1;
 669        data.flags = MMC_DATA_READ;
 670
 671        /*
 672         * According to the tuning specs, Tuning process
 673         * is normally shorter 40 executions of CMD19,
 674         * and timeout value should be shorter than 150 ms
 675         */
 676        data.timeout_ns = 150 * NSEC_PER_MSEC;
 677
 678        data.sg = &sg;
 679        data.sg_len = 1;
 680        sg_init_one(&sg, data_buf, size);
 681
 682        mmc_wait_for_req(host, &mrq);
 683
 684        if (cmd_error)
 685                *cmd_error = cmd.error;
 686
 687        if (cmd.error) {
 688                err = cmd.error;
 689                goto out;
 690        }
 691
 692        if (data.error) {
 693                err = data.error;
 694                goto out;
 695        }
 696
 697        if (memcmp(data_buf, tuning_block_pattern, size))
 698                err = -EIO;
 699
 700out:
 701        kfree(data_buf);
 702        return err;
 703}
 704EXPORT_SYMBOL_GPL(mmc_send_tuning);
 705
 706int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
 707{
 708        struct mmc_command cmd = {};
 709
 710        /*
 711         * eMMC specification specifies that CMD12 can be used to stop a tuning
 712         * command, but SD specification does not, so do nothing unless it is
 713         * eMMC.
 714         */
 715        if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 716                return 0;
 717
 718        cmd.opcode = MMC_STOP_TRANSMISSION;
 719        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 720
 721        /*
 722         * For drivers that override R1 to R1b, set an arbitrary timeout based
 723         * on the tuning timeout i.e. 150ms.
 724         */
 725        cmd.busy_timeout = 150;
 726
 727        return mmc_wait_for_cmd(host, &cmd, 0);
 728}
 729EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
 730
 731static int
 732mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 733                  u8 len)
 734{
 735        struct mmc_request mrq = {};
 736        struct mmc_command cmd = {};
 737        struct mmc_data data = {};
 738        struct scatterlist sg;
 739        u8 *data_buf;
 740        u8 *test_buf;
 741        int i, err;
 742        static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 743        static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 744
 745        /* dma onto stack is unsafe/nonportable, but callers to this
 746         * routine normally provide temporary on-stack buffers ...
 747         */
 748        data_buf = kmalloc(len, GFP_KERNEL);
 749        if (!data_buf)
 750                return -ENOMEM;
 751
 752        if (len == 8)
 753                test_buf = testdata_8bit;
 754        else if (len == 4)
 755                test_buf = testdata_4bit;
 756        else {
 757                pr_err("%s: Invalid bus_width %d\n",
 758                       mmc_hostname(host), len);
 759                kfree(data_buf);
 760                return -EINVAL;
 761        }
 762
 763        if (opcode == MMC_BUS_TEST_W)
 764                memcpy(data_buf, test_buf, len);
 765
 766        mrq.cmd = &cmd;
 767        mrq.data = &data;
 768        cmd.opcode = opcode;
 769        cmd.arg = 0;
 770
 771        /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 772         * rely on callers to never use this with "native" calls for reading
 773         * CSD or CID.  Native versions of those commands use the R2 type,
 774         * not R1 plus a data block.
 775         */
 776        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 777
 778        data.blksz = len;
 779        data.blocks = 1;
 780        if (opcode == MMC_BUS_TEST_R)
 781                data.flags = MMC_DATA_READ;
 782        else
 783                data.flags = MMC_DATA_WRITE;
 784
 785        data.sg = &sg;
 786        data.sg_len = 1;
 787        mmc_set_data_timeout(&data, card);
 788        sg_init_one(&sg, data_buf, len);
 789        mmc_wait_for_req(host, &mrq);
 790        err = 0;
 791        if (opcode == MMC_BUS_TEST_R) {
 792                for (i = 0; i < len / 4; i++)
 793                        if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 794                                err = -EIO;
 795                                break;
 796                        }
 797        }
 798        kfree(data_buf);
 799
 800        if (cmd.error)
 801                return cmd.error;
 802        if (data.error)
 803                return data.error;
 804
 805        return err;
 806}
 807
 808int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 809{
 810        int width;
 811
 812        if (bus_width == MMC_BUS_WIDTH_8)
 813                width = 8;
 814        else if (bus_width == MMC_BUS_WIDTH_4)
 815                width = 4;
 816        else if (bus_width == MMC_BUS_WIDTH_1)
 817                return 0; /* no need for test */
 818        else
 819                return -EINVAL;
 820
 821        /*
 822         * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 823         * is a problem.  This improves chances that the test will work.
 824         */
 825        mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 826        return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 827}
 828
 829static int mmc_send_hpi_cmd(struct mmc_card *card)
 830{
 831        unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
 832        struct mmc_host *host = card->host;
 833        bool use_r1b_resp = false;
 834        struct mmc_command cmd = {};
 835        int err;
 836
 837        cmd.opcode = card->ext_csd.hpi_cmd;
 838        cmd.arg = card->rca << 16 | 1;
 839        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 840
 841        if (cmd.opcode == MMC_STOP_TRANSMISSION)
 842                use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
 843                                                    busy_timeout_ms);
 844
 845        err = mmc_wait_for_cmd(host, &cmd, 0);
 846        if (err) {
 847                pr_warn("%s: HPI error %d. Command response %#x\n",
 848                        mmc_hostname(host), err, cmd.resp[0]);
 849                return err;
 850        }
 851
 852        /* No need to poll when using HW busy detection. */
 853        if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
 854                return 0;
 855
 856        /* Let's poll to find out when the HPI request completes. */
 857        return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
 858}
 859
 860/**
 861 *      mmc_interrupt_hpi - Issue for High priority Interrupt
 862 *      @card: the MMC card associated with the HPI transfer
 863 *
 864 *      Issued High Priority Interrupt, and check for card status
 865 *      until out-of prg-state.
 866 */
 867static int mmc_interrupt_hpi(struct mmc_card *card)
 868{
 869        int err;
 870        u32 status;
 871
 872        if (!card->ext_csd.hpi_en) {
 873                pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 874                return 1;
 875        }
 876
 877        err = mmc_send_status(card, &status);
 878        if (err) {
 879                pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 880                goto out;
 881        }
 882
 883        switch (R1_CURRENT_STATE(status)) {
 884        case R1_STATE_IDLE:
 885        case R1_STATE_READY:
 886        case R1_STATE_STBY:
 887        case R1_STATE_TRAN:
 888                /*
 889                 * In idle and transfer states, HPI is not needed and the caller
 890                 * can issue the next intended command immediately
 891                 */
 892                goto out;
 893        case R1_STATE_PRG:
 894                break;
 895        default:
 896                /* In all other states, it's illegal to issue HPI */
 897                pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 898                        mmc_hostname(card->host), R1_CURRENT_STATE(status));
 899                err = -EINVAL;
 900                goto out;
 901        }
 902
 903        err = mmc_send_hpi_cmd(card);
 904out:
 905        return err;
 906}
 907
 908int mmc_can_ext_csd(struct mmc_card *card)
 909{
 910        return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 911}
 912
 913static int mmc_read_bkops_status(struct mmc_card *card)
 914{
 915        int err;
 916        u8 *ext_csd;
 917
 918        err = mmc_get_ext_csd(card, &ext_csd);
 919        if (err)
 920                return err;
 921
 922        card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 923        card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 924        kfree(ext_csd);
 925        return 0;
 926}
 927
 928/**
 929 *      mmc_run_bkops - Run BKOPS for supported cards
 930 *      @card: MMC card to run BKOPS for
 931 *
 932 *      Run background operations synchronously for cards having manual BKOPS
 933 *      enabled and in case it reports urgent BKOPS level.
 934*/
 935void mmc_run_bkops(struct mmc_card *card)
 936{
 937        int err;
 938
 939        if (!card->ext_csd.man_bkops_en)
 940                return;
 941
 942        err = mmc_read_bkops_status(card);
 943        if (err) {
 944                pr_err("%s: Failed to read bkops status: %d\n",
 945                       mmc_hostname(card->host), err);
 946                return;
 947        }
 948
 949        if (!card->ext_csd.raw_bkops_status ||
 950            card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
 951                return;
 952
 953        mmc_retune_hold(card->host);
 954
 955        /*
 956         * For urgent BKOPS status, LEVEL_2 and higher, let's execute
 957         * synchronously. Future wise, we may consider to start BKOPS, for less
 958         * urgent levels by using an asynchronous background task, when idle.
 959         */
 960        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 961                         EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
 962        /*
 963         * If the BKOPS timed out, the card is probably still busy in the
 964         * R1_STATE_PRG. Rather than continue to wait, let's try to abort
 965         * it with a HPI command to get back into R1_STATE_TRAN.
 966         */
 967        if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
 968                pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
 969        else if (err)
 970                pr_warn("%s: Error %d running bkops\n",
 971                        mmc_hostname(card->host), err);
 972
 973        mmc_retune_release(card->host);
 974}
 975EXPORT_SYMBOL(mmc_run_bkops);
 976
 977static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
 978{
 979        u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
 980        int err;
 981
 982        if (!card->ext_csd.cmdq_support)
 983                return -EOPNOTSUPP;
 984
 985        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
 986                         val, card->ext_csd.generic_cmd6_time);
 987        if (!err)
 988                card->ext_csd.cmdq_en = enable;
 989
 990        return err;
 991}
 992
 993int mmc_cmdq_enable(struct mmc_card *card)
 994{
 995        return mmc_cmdq_switch(card, true);
 996}
 997EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
 998
 999int mmc_cmdq_disable(struct mmc_card *card)
1000{
1001        return mmc_cmdq_switch(card, false);
1002}
1003EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1004
1005int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
1006{
1007        struct mmc_host *host = card->host;
1008        int err;
1009
1010        if (!mmc_can_sanitize(card)) {
1011                pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1012                return -EOPNOTSUPP;
1013        }
1014
1015        if (!timeout_ms)
1016                timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1017
1018        pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1019
1020        mmc_retune_hold(host);
1021
1022        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1023                           1, timeout_ms, 0, true, false, 0);
1024        if (err)
1025                pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1026
1027        /*
1028         * If the sanitize operation timed out, the card is probably still busy
1029         * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1030         * it with a HPI command to get back into R1_STATE_TRAN.
1031         */
1032        if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1033                pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1034
1035        mmc_retune_release(host);
1036
1037        pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1038        return err;
1039}
1040EXPORT_SYMBOL_GPL(mmc_sanitize);
1041