linux/drivers/mmc/core/mmc_ops.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/mmc/core/mmc_ops.h
   3 *
   4 *  Copyright 2006-2007 Pierre Ossman
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/export.h>
  14#include <linux/types.h>
  15#include <linux/scatterlist.h>
  16
  17#include <linux/mmc/host.h>
  18#include <linux/mmc/card.h>
  19#include <linux/mmc/mmc.h>
  20
  21#include "core.h"
  22#include "host.h"
  23#include "mmc_ops.h"
  24
  25#define MMC_OPS_TIMEOUT_MS      (10 * 60 * 1000) /* 10 minute timeout */
  26
  27static const u8 tuning_blk_pattern_4bit[] = {
  28        0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  29        0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  30        0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  31        0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  32        0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  33        0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  34        0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  35        0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  36};
  37
  38static const u8 tuning_blk_pattern_8bit[] = {
  39        0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  40        0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  41        0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  42        0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  43        0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  44        0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  45        0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  46        0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  47        0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  48        0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  49        0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  50        0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  51        0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  52        0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  53        0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  54        0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  55};
  56
  57static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
  58                                    bool ignore_crc)
  59{
  60        int err;
  61        struct mmc_command cmd = {0};
  62
  63        BUG_ON(!card);
  64        BUG_ON(!card->host);
  65
  66        cmd.opcode = MMC_SEND_STATUS;
  67        if (!mmc_host_is_spi(card->host))
  68                cmd.arg = card->rca << 16;
  69        cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  70        if (ignore_crc)
  71                cmd.flags &= ~MMC_RSP_CRC;
  72
  73        err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  74        if (err)
  75                return err;
  76
  77        /* NOTE: callers are required to understand the difference
  78         * between "native" and SPI format status words!
  79         */
  80        if (status)
  81                *status = cmd.resp[0];
  82
  83        return 0;
  84}
  85
  86int mmc_send_status(struct mmc_card *card, u32 *status)
  87{
  88        return __mmc_send_status(card, status, false);
  89}
  90
  91static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  92{
  93        int err;
  94        struct mmc_command cmd = {0};
  95
  96        BUG_ON(!host);
  97
  98        cmd.opcode = MMC_SELECT_CARD;
  99
 100        if (card) {
 101                cmd.arg = card->rca << 16;
 102                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 103        } else {
 104                cmd.arg = 0;
 105                cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 106        }
 107
 108        err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 109        if (err)
 110                return err;
 111
 112        return 0;
 113}
 114
 115int mmc_select_card(struct mmc_card *card)
 116{
 117        BUG_ON(!card);
 118
 119        return _mmc_select_card(card->host, card);
 120}
 121
 122int mmc_deselect_cards(struct mmc_host *host)
 123{
 124        return _mmc_select_card(host, NULL);
 125}
 126
 127/*
 128 * Write the value specified in the device tree or board code into the optional
 129 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 130 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 131 * value is hardware dependant.
 132 * The presence of the DSR register can be determined from the CSD register,
 133 * bit 76.
 134 */
 135int mmc_set_dsr(struct mmc_host *host)
 136{
 137        struct mmc_command cmd = {0};
 138
 139        cmd.opcode = MMC_SET_DSR;
 140
 141        cmd.arg = (host->dsr << 16) | 0xffff;
 142        cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 143
 144        return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 145}
 146
 147int mmc_go_idle(struct mmc_host *host)
 148{
 149        int err;
 150        struct mmc_command cmd = {0};
 151
 152        /*
 153         * Non-SPI hosts need to prevent chipselect going active during
 154         * GO_IDLE; that would put chips into SPI mode.  Remind them of
 155         * that in case of hardware that won't pull up DAT3/nCS otherwise.
 156         *
 157         * SPI hosts ignore ios.chip_select; it's managed according to
 158         * rules that must accommodate non-MMC slaves which this layer
 159         * won't even know about.
 160         */
 161        if (!mmc_host_is_spi(host)) {
 162                mmc_set_chip_select(host, MMC_CS_HIGH);
 163                mmc_delay(1);
 164        }
 165
 166        cmd.opcode = MMC_GO_IDLE_STATE;
 167        cmd.arg = 0;
 168        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 169
 170        err = mmc_wait_for_cmd(host, &cmd, 0);
 171
 172        mmc_delay(1);
 173
 174        if (!mmc_host_is_spi(host)) {
 175                mmc_set_chip_select(host, MMC_CS_DONTCARE);
 176                mmc_delay(1);
 177        }
 178
 179        host->use_spi_crc = 0;
 180
 181        return err;
 182}
 183
 184int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 185{
 186        struct mmc_command cmd = {0};
 187        int i, err = 0;
 188
 189        BUG_ON(!host);
 190
 191        cmd.opcode = MMC_SEND_OP_COND;
 192        cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 193        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 194
 195        for (i = 100; i; i--) {
 196                err = mmc_wait_for_cmd(host, &cmd, 0);
 197                if (err)
 198                        break;
 199
 200                /* if we're just probing, do a single pass */
 201                if (ocr == 0)
 202                        break;
 203
 204                /* otherwise wait until reset completes */
 205                if (mmc_host_is_spi(host)) {
 206                        if (!(cmd.resp[0] & R1_SPI_IDLE))
 207                                break;
 208                } else {
 209                        if (cmd.resp[0] & MMC_CARD_BUSY)
 210                                break;
 211                }
 212
 213                err = -ETIMEDOUT;
 214
 215                mmc_delay(10);
 216        }
 217
 218        if (rocr && !mmc_host_is_spi(host))
 219                *rocr = cmd.resp[0];
 220
 221        return err;
 222}
 223
 224int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
 225{
 226        int err;
 227        struct mmc_command cmd = {0};
 228
 229        BUG_ON(!host);
 230        BUG_ON(!cid);
 231
 232        cmd.opcode = MMC_ALL_SEND_CID;
 233        cmd.arg = 0;
 234        cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
 235
 236        err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 237        if (err)
 238                return err;
 239
 240        memcpy(cid, cmd.resp, sizeof(u32) * 4);
 241
 242        return 0;
 243}
 244
 245int mmc_set_relative_addr(struct mmc_card *card)
 246{
 247        int err;
 248        struct mmc_command cmd = {0};
 249
 250        BUG_ON(!card);
 251        BUG_ON(!card->host);
 252
 253        cmd.opcode = MMC_SET_RELATIVE_ADDR;
 254        cmd.arg = card->rca << 16;
 255        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 256
 257        err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 258        if (err)
 259                return err;
 260
 261        return 0;
 262}
 263
 264static int
 265mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 266{
 267        int err;
 268        struct mmc_command cmd = {0};
 269
 270        BUG_ON(!host);
 271        BUG_ON(!cxd);
 272
 273        cmd.opcode = opcode;
 274        cmd.arg = arg;
 275        cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 276
 277        err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 278        if (err)
 279                return err;
 280
 281        memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 282
 283        return 0;
 284}
 285
 286/*
 287 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 288 * buffer or on-stack buffer (with some overhead in callee).
 289 */
 290static int
 291mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 292                u32 opcode, void *buf, unsigned len)
 293{
 294        struct mmc_request mrq = {NULL};
 295        struct mmc_command cmd = {0};
 296        struct mmc_data data = {0};
 297        struct scatterlist sg;
 298
 299        mrq.cmd = &cmd;
 300        mrq.data = &data;
 301
 302        cmd.opcode = opcode;
 303        cmd.arg = 0;
 304
 305        /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 306         * rely on callers to never use this with "native" calls for reading
 307         * CSD or CID.  Native versions of those commands use the R2 type,
 308         * not R1 plus a data block.
 309         */
 310        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 311
 312        data.blksz = len;
 313        data.blocks = 1;
 314        data.flags = MMC_DATA_READ;
 315        data.sg = &sg;
 316        data.sg_len = 1;
 317
 318        sg_init_one(&sg, buf, len);
 319
 320        if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 321                /*
 322                 * The spec states that CSR and CID accesses have a timeout
 323                 * of 64 clock cycles.
 324                 */
 325                data.timeout_ns = 0;
 326                data.timeout_clks = 64;
 327        } else
 328                mmc_set_data_timeout(&data, card);
 329
 330        mmc_wait_for_req(host, &mrq);
 331
 332        if (cmd.error)
 333                return cmd.error;
 334        if (data.error)
 335                return data.error;
 336
 337        return 0;
 338}
 339
 340int mmc_send_csd(struct mmc_card *card, u32 *csd)
 341{
 342        int ret, i;
 343        u32 *csd_tmp;
 344
 345        if (!mmc_host_is_spi(card->host))
 346                return mmc_send_cxd_native(card->host, card->rca << 16,
 347                                csd, MMC_SEND_CSD);
 348
 349        csd_tmp = kzalloc(16, GFP_KERNEL);
 350        if (!csd_tmp)
 351                return -ENOMEM;
 352
 353        ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
 354        if (ret)
 355                goto err;
 356
 357        for (i = 0;i < 4;i++)
 358                csd[i] = be32_to_cpu(csd_tmp[i]);
 359
 360err:
 361        kfree(csd_tmp);
 362        return ret;
 363}
 364
 365int mmc_send_cid(struct mmc_host *host, u32 *cid)
 366{
 367        int ret, i;
 368        u32 *cid_tmp;
 369
 370        if (!mmc_host_is_spi(host)) {
 371                if (!host->card)
 372                        return -EINVAL;
 373                return mmc_send_cxd_native(host, host->card->rca << 16,
 374                                cid, MMC_SEND_CID);
 375        }
 376
 377        cid_tmp = kzalloc(16, GFP_KERNEL);
 378        if (!cid_tmp)
 379                return -ENOMEM;
 380
 381        ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
 382        if (ret)
 383                goto err;
 384
 385        for (i = 0;i < 4;i++)
 386                cid[i] = be32_to_cpu(cid_tmp[i]);
 387
 388err:
 389        kfree(cid_tmp);
 390        return ret;
 391}
 392
 393int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 394{
 395        int err;
 396        u8 *ext_csd;
 397
 398        if (!card || !new_ext_csd)
 399                return -EINVAL;
 400
 401        if (!mmc_can_ext_csd(card))
 402                return -EOPNOTSUPP;
 403
 404        /*
 405         * As the ext_csd is so large and mostly unused, we don't store the
 406         * raw block in mmc_card.
 407         */
 408        ext_csd = kzalloc(512, GFP_KERNEL);
 409        if (!ext_csd)
 410                return -ENOMEM;
 411
 412        err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
 413                                512);
 414        if (err)
 415                kfree(ext_csd);
 416        else
 417                *new_ext_csd = ext_csd;
 418
 419        return err;
 420}
 421EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 422
 423int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 424{
 425        struct mmc_command cmd = {0};
 426        int err;
 427
 428        cmd.opcode = MMC_SPI_READ_OCR;
 429        cmd.arg = highcap ? (1 << 30) : 0;
 430        cmd.flags = MMC_RSP_SPI_R3;
 431
 432        err = mmc_wait_for_cmd(host, &cmd, 0);
 433
 434        *ocrp = cmd.resp[1];
 435        return err;
 436}
 437
 438int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 439{
 440        struct mmc_command cmd = {0};
 441        int err;
 442
 443        cmd.opcode = MMC_SPI_CRC_ON_OFF;
 444        cmd.flags = MMC_RSP_SPI_R1;
 445        cmd.arg = use_crc;
 446
 447        err = mmc_wait_for_cmd(host, &cmd, 0);
 448        if (!err)
 449                host->use_spi_crc = use_crc;
 450        return err;
 451}
 452
 453int mmc_switch_status_error(struct mmc_host *host, u32 status)
 454{
 455        if (mmc_host_is_spi(host)) {
 456                if (status & R1_SPI_ILLEGAL_COMMAND)
 457                        return -EBADMSG;
 458        } else {
 459                if (status & 0xFDFFA000)
 460                        pr_warn("%s: unexpected status %#x after switch\n",
 461                                mmc_hostname(host), status);
 462                if (status & R1_SWITCH_ERROR)
 463                        return -EBADMSG;
 464        }
 465        return 0;
 466}
 467
 468/**
 469 *      __mmc_switch - modify EXT_CSD register
 470 *      @card: the MMC card associated with the data transfer
 471 *      @set: cmd set values
 472 *      @index: EXT_CSD register index
 473 *      @value: value to program into EXT_CSD register
 474 *      @timeout_ms: timeout (ms) for operation performed by register write,
 475 *                   timeout of zero implies maximum possible timeout
 476 *      @use_busy_signal: use the busy signal as response type
 477 *      @send_status: send status cmd to poll for busy
 478 *      @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
 479 *
 480 *      Modifies the EXT_CSD register for selected card.
 481 */
 482int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 483                unsigned int timeout_ms, bool use_busy_signal, bool send_status,
 484                bool ignore_crc)
 485{
 486        struct mmc_host *host = card->host;
 487        int err;
 488        struct mmc_command cmd = {0};
 489        unsigned long timeout;
 490        u32 status = 0;
 491        bool use_r1b_resp = use_busy_signal;
 492
 493        mmc_retune_hold(host);
 494
 495        /*
 496         * If the cmd timeout and the max_busy_timeout of the host are both
 497         * specified, let's validate them. A failure means we need to prevent
 498         * the host from doing hw busy detection, which is done by converting
 499         * to a R1 response instead of a R1B.
 500         */
 501        if (timeout_ms && host->max_busy_timeout &&
 502                (timeout_ms > host->max_busy_timeout))
 503                use_r1b_resp = false;
 504
 505        cmd.opcode = MMC_SWITCH;
 506        cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 507                  (index << 16) |
 508                  (value << 8) |
 509                  set;
 510        cmd.flags = MMC_CMD_AC;
 511        if (use_r1b_resp) {
 512                cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 513                /*
 514                 * A busy_timeout of zero means the host can decide to use
 515                 * whatever value it finds suitable.
 516                 */
 517                cmd.busy_timeout = timeout_ms;
 518        } else {
 519                cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
 520        }
 521
 522        if (index == EXT_CSD_SANITIZE_START)
 523                cmd.sanitize_busy = true;
 524
 525        err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 526        if (err)
 527                goto out;
 528
 529        /* No need to check card status in case of unblocking command */
 530        if (!use_busy_signal)
 531                goto out;
 532
 533        /*
 534         * CRC errors shall only be ignored in cases were CMD13 is used to poll
 535         * to detect busy completion.
 536         */
 537        if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
 538                ignore_crc = false;
 539
 540        /* We have an unspecified cmd timeout, use the fallback value. */
 541        if (!timeout_ms)
 542                timeout_ms = MMC_OPS_TIMEOUT_MS;
 543
 544        /* Must check status to be sure of no errors. */
 545        timeout = jiffies + msecs_to_jiffies(timeout_ms);
 546        do {
 547                if (send_status) {
 548                        err = __mmc_send_status(card, &status, ignore_crc);
 549                        if (err)
 550                                goto out;
 551                }
 552                if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
 553                        break;
 554                if (mmc_host_is_spi(host))
 555                        break;
 556
 557                /*
 558                 * We are not allowed to issue a status command and the host
 559                 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
 560                 * rely on waiting for the stated timeout to be sufficient.
 561                 */
 562                if (!send_status) {
 563                        mmc_delay(timeout_ms);
 564                        goto out;
 565                }
 566
 567                /* Timeout if the device never leaves the program state. */
 568                if (time_after(jiffies, timeout)) {
 569                        pr_err("%s: Card stuck in programming state! %s\n",
 570                                mmc_hostname(host), __func__);
 571                        err = -ETIMEDOUT;
 572                        goto out;
 573                }
 574        } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
 575
 576        err = mmc_switch_status_error(host, status);
 577out:
 578        mmc_retune_release(host);
 579
 580        return err;
 581}
 582EXPORT_SYMBOL_GPL(__mmc_switch);
 583
 584int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 585                unsigned int timeout_ms)
 586{
 587        return __mmc_switch(card, set, index, value, timeout_ms, true, true,
 588                                false);
 589}
 590EXPORT_SYMBOL_GPL(mmc_switch);
 591
 592int mmc_send_tuning(struct mmc_host *host)
 593{
 594        struct mmc_request mrq = {NULL};
 595        struct mmc_command cmd = {0};
 596        struct mmc_data data = {0};
 597        struct scatterlist sg;
 598        struct mmc_ios *ios = &host->ios;
 599        const u8 *tuning_block_pattern;
 600        int size, err = 0;
 601        u8 *data_buf;
 602        u32 opcode;
 603
 604        if (ios->bus_width == MMC_BUS_WIDTH_8) {
 605                tuning_block_pattern = tuning_blk_pattern_8bit;
 606                size = sizeof(tuning_blk_pattern_8bit);
 607                opcode = MMC_SEND_TUNING_BLOCK_HS200;
 608        } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 609                tuning_block_pattern = tuning_blk_pattern_4bit;
 610                size = sizeof(tuning_blk_pattern_4bit);
 611                opcode = MMC_SEND_TUNING_BLOCK;
 612        } else
 613                return -EINVAL;
 614
 615        data_buf = kzalloc(size, GFP_KERNEL);
 616        if (!data_buf)
 617                return -ENOMEM;
 618
 619        mrq.cmd = &cmd;
 620        mrq.data = &data;
 621
 622        cmd.opcode = opcode;
 623        cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 624
 625        data.blksz = size;
 626        data.blocks = 1;
 627        data.flags = MMC_DATA_READ;
 628
 629        /*
 630         * According to the tuning specs, Tuning process
 631         * is normally shorter 40 executions of CMD19,
 632         * and timeout value should be shorter than 150 ms
 633         */
 634        data.timeout_ns = 150 * NSEC_PER_MSEC;
 635
 636        data.sg = &sg;
 637        data.sg_len = 1;
 638        sg_init_one(&sg, data_buf, size);
 639
 640        mmc_wait_for_req(host, &mrq);
 641
 642        if (cmd.error) {
 643                err = cmd.error;
 644                goto out;
 645        }
 646
 647        if (data.error) {
 648                err = data.error;
 649                goto out;
 650        }
 651
 652        if (memcmp(data_buf, tuning_block_pattern, size))
 653                err = -EIO;
 654
 655out:
 656        kfree(data_buf);
 657        return err;
 658}
 659EXPORT_SYMBOL_GPL(mmc_send_tuning);
 660
 661static int
 662mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 663                  u8 len)
 664{
 665        struct mmc_request mrq = {NULL};
 666        struct mmc_command cmd = {0};
 667        struct mmc_data data = {0};
 668        struct scatterlist sg;
 669        u8 *data_buf;
 670        u8 *test_buf;
 671        int i, err;
 672        static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 673        static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 674
 675        /* dma onto stack is unsafe/nonportable, but callers to this
 676         * routine normally provide temporary on-stack buffers ...
 677         */
 678        data_buf = kmalloc(len, GFP_KERNEL);
 679        if (!data_buf)
 680                return -ENOMEM;
 681
 682        if (len == 8)
 683                test_buf = testdata_8bit;
 684        else if (len == 4)
 685                test_buf = testdata_4bit;
 686        else {
 687                pr_err("%s: Invalid bus_width %d\n",
 688                       mmc_hostname(host), len);
 689                kfree(data_buf);
 690                return -EINVAL;
 691        }
 692
 693        if (opcode == MMC_BUS_TEST_W)
 694                memcpy(data_buf, test_buf, len);
 695
 696        mrq.cmd = &cmd;
 697        mrq.data = &data;
 698        cmd.opcode = opcode;
 699        cmd.arg = 0;
 700
 701        /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 702         * rely on callers to never use this with "native" calls for reading
 703         * CSD or CID.  Native versions of those commands use the R2 type,
 704         * not R1 plus a data block.
 705         */
 706        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 707
 708        data.blksz = len;
 709        data.blocks = 1;
 710        if (opcode == MMC_BUS_TEST_R)
 711                data.flags = MMC_DATA_READ;
 712        else
 713                data.flags = MMC_DATA_WRITE;
 714
 715        data.sg = &sg;
 716        data.sg_len = 1;
 717        mmc_set_data_timeout(&data, card);
 718        sg_init_one(&sg, data_buf, len);
 719        mmc_wait_for_req(host, &mrq);
 720        err = 0;
 721        if (opcode == MMC_BUS_TEST_R) {
 722                for (i = 0; i < len / 4; i++)
 723                        if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 724                                err = -EIO;
 725                                break;
 726                        }
 727        }
 728        kfree(data_buf);
 729
 730        if (cmd.error)
 731                return cmd.error;
 732        if (data.error)
 733                return data.error;
 734
 735        return err;
 736}
 737
 738int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 739{
 740        int err, width;
 741
 742        if (bus_width == MMC_BUS_WIDTH_8)
 743                width = 8;
 744        else if (bus_width == MMC_BUS_WIDTH_4)
 745                width = 4;
 746        else if (bus_width == MMC_BUS_WIDTH_1)
 747                return 0; /* no need for test */
 748        else
 749                return -EINVAL;
 750
 751        /*
 752         * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 753         * is a problem.  This improves chances that the test will work.
 754         */
 755        mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 756        err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 757        return err;
 758}
 759
 760int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
 761{
 762        struct mmc_command cmd = {0};
 763        unsigned int opcode;
 764        int err;
 765
 766        if (!card->ext_csd.hpi) {
 767                pr_warn("%s: Card didn't support HPI command\n",
 768                        mmc_hostname(card->host));
 769                return -EINVAL;
 770        }
 771
 772        opcode = card->ext_csd.hpi_cmd;
 773        if (opcode == MMC_STOP_TRANSMISSION)
 774                cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 775        else if (opcode == MMC_SEND_STATUS)
 776                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 777
 778        cmd.opcode = opcode;
 779        cmd.arg = card->rca << 16 | 1;
 780
 781        err = mmc_wait_for_cmd(card->host, &cmd, 0);
 782        if (err) {
 783                pr_warn("%s: error %d interrupting operation. "
 784                        "HPI command response %#x\n", mmc_hostname(card->host),
 785                        err, cmd.resp[0]);
 786                return err;
 787        }
 788        if (status)
 789                *status = cmd.resp[0];
 790
 791        return 0;
 792}
 793
 794int mmc_can_ext_csd(struct mmc_card *card)
 795{
 796        return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 797}
 798