linux/drivers/mmc/core/core.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/mmc/core/core.c
   3 *
   4 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   5 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   6 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   7 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/completion.h>
  17#include <linux/device.h>
  18#include <linux/delay.h>
  19#include <linux/pagemap.h>
  20#include <linux/err.h>
  21#include <linux/leds.h>
  22#include <linux/scatterlist.h>
  23#include <linux/log2.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm_wakeup.h>
  27#include <linux/suspend.h>
  28#include <linux/fault-inject.h>
  29#include <linux/random.h>
  30#include <linux/slab.h>
  31#include <linux/of.h>
  32
  33#include <linux/mmc/card.h>
  34#include <linux/mmc/host.h>
  35#include <linux/mmc/mmc.h>
  36#include <linux/mmc/sd.h>
  37#include <linux/mmc/slot-gpio.h>
  38
  39#define CREATE_TRACE_POINTS
  40#include <trace/events/mmc.h>
  41
  42#include "core.h"
  43#include "card.h"
  44#include "bus.h"
  45#include "host.h"
  46#include "sdio_bus.h"
  47
  48#include "mmc_ops.h"
  49#include "sd_ops.h"
  50#include "sdio_ops.h"
  51
  52/* If the device is not responding */
  53#define MMC_CORE_TIMEOUT_MS     (10 * 60 * 1000) /* 10 minute timeout */
  54
  55/* The max erase timeout, used when host->max_busy_timeout isn't specified */
  56#define MMC_ERASE_TIMEOUT_MS    (60 * 1000) /* 60 s */
  57
  58static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  59
  60/*
  61 * Enabling software CRCs on the data blocks can be a significant (30%)
  62 * performance cost, and for other reasons may not always be desired.
  63 * So we allow it it to be disabled.
  64 */
  65bool use_spi_crc = 1;
  66module_param(use_spi_crc, bool, 0);
  67
  68static int mmc_schedule_delayed_work(struct delayed_work *work,
  69                                     unsigned long delay)
  70{
  71        /*
  72         * We use the system_freezable_wq, because of two reasons.
  73         * First, it allows several works (not the same work item) to be
  74         * executed simultaneously. Second, the queue becomes frozen when
  75         * userspace becomes frozen during system PM.
  76         */
  77        return queue_delayed_work(system_freezable_wq, work, delay);
  78}
  79
  80#ifdef CONFIG_FAIL_MMC_REQUEST
  81
  82/*
  83 * Internal function. Inject random data errors.
  84 * If mmc_data is NULL no errors are injected.
  85 */
  86static void mmc_should_fail_request(struct mmc_host *host,
  87                                    struct mmc_request *mrq)
  88{
  89        struct mmc_command *cmd = mrq->cmd;
  90        struct mmc_data *data = mrq->data;
  91        static const int data_errors[] = {
  92                -ETIMEDOUT,
  93                -EILSEQ,
  94                -EIO,
  95        };
  96
  97        if (!data)
  98                return;
  99
 100        if (cmd->error || data->error ||
 101            !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
 102                return;
 103
 104        data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
 105        data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
 106}
 107
 108#else /* CONFIG_FAIL_MMC_REQUEST */
 109
 110static inline void mmc_should_fail_request(struct mmc_host *host,
 111                                           struct mmc_request *mrq)
 112{
 113}
 114
 115#endif /* CONFIG_FAIL_MMC_REQUEST */
 116
 117static inline void mmc_complete_cmd(struct mmc_request *mrq)
 118{
 119        if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
 120                complete_all(&mrq->cmd_completion);
 121}
 122
 123void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
 124{
 125        if (!mrq->cap_cmd_during_tfr)
 126                return;
 127
 128        mmc_complete_cmd(mrq);
 129
 130        pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
 131                 mmc_hostname(host), mrq->cmd->opcode);
 132}
 133EXPORT_SYMBOL(mmc_command_done);
 134
 135/**
 136 *      mmc_request_done - finish processing an MMC request
 137 *      @host: MMC host which completed request
 138 *      @mrq: MMC request which request
 139 *
 140 *      MMC drivers should call this function when they have completed
 141 *      their processing of a request.
 142 */
 143void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
 144{
 145        struct mmc_command *cmd = mrq->cmd;
 146        int err = cmd->error;
 147
 148        /* Flag re-tuning needed on CRC errors */
 149        if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
 150            cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
 151            (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 152            (mrq->data && mrq->data->error == -EILSEQ) ||
 153            (mrq->stop && mrq->stop->error == -EILSEQ)))
 154                mmc_retune_needed(host);
 155
 156        if (err && cmd->retries && mmc_host_is_spi(host)) {
 157                if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
 158                        cmd->retries = 0;
 159        }
 160
 161        if (host->ongoing_mrq == mrq)
 162                host->ongoing_mrq = NULL;
 163
 164        mmc_complete_cmd(mrq);
 165
 166        trace_mmc_request_done(host, mrq);
 167
 168        /*
 169         * We list various conditions for the command to be considered
 170         * properly done:
 171         *
 172         * - There was no error, OK fine then
 173         * - We are not doing some kind of retry
 174         * - The card was removed (...so just complete everything no matter
 175         *   if there are errors or retries)
 176         */
 177        if (!err || !cmd->retries || mmc_card_removed(host->card)) {
 178                mmc_should_fail_request(host, mrq);
 179
 180                if (!host->ongoing_mrq)
 181                        led_trigger_event(host->led, LED_OFF);
 182
 183                if (mrq->sbc) {
 184                        pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
 185                                mmc_hostname(host), mrq->sbc->opcode,
 186                                mrq->sbc->error,
 187                                mrq->sbc->resp[0], mrq->sbc->resp[1],
 188                                mrq->sbc->resp[2], mrq->sbc->resp[3]);
 189                }
 190
 191                pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
 192                        mmc_hostname(host), cmd->opcode, err,
 193                        cmd->resp[0], cmd->resp[1],
 194                        cmd->resp[2], cmd->resp[3]);
 195
 196                if (mrq->data) {
 197                        pr_debug("%s:     %d bytes transferred: %d\n",
 198                                mmc_hostname(host),
 199                                mrq->data->bytes_xfered, mrq->data->error);
 200                }
 201
 202                if (mrq->stop) {
 203                        pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
 204                                mmc_hostname(host), mrq->stop->opcode,
 205                                mrq->stop->error,
 206                                mrq->stop->resp[0], mrq->stop->resp[1],
 207                                mrq->stop->resp[2], mrq->stop->resp[3]);
 208                }
 209        }
 210        /*
 211         * Request starter must handle retries - see
 212         * mmc_wait_for_req_done().
 213         */
 214        if (mrq->done)
 215                mrq->done(mrq);
 216}
 217
 218EXPORT_SYMBOL(mmc_request_done);
 219
 220static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 221{
 222        int err;
 223
 224        /* Assumes host controller has been runtime resumed by mmc_claim_host */
 225        err = mmc_retune(host);
 226        if (err) {
 227                mrq->cmd->error = err;
 228                mmc_request_done(host, mrq);
 229                return;
 230        }
 231
 232        /*
 233         * For sdio rw commands we must wait for card busy otherwise some
 234         * sdio devices won't work properly.
 235         * And bypass I/O abort, reset and bus suspend operations.
 236         */
 237        if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
 238            host->ops->card_busy) {
 239                int tries = 500; /* Wait aprox 500ms at maximum */
 240
 241                while (host->ops->card_busy(host) && --tries)
 242                        mmc_delay(1);
 243
 244                if (tries == 0) {
 245                        mrq->cmd->error = -EBUSY;
 246                        mmc_request_done(host, mrq);
 247                        return;
 248                }
 249        }
 250
 251        if (mrq->cap_cmd_during_tfr) {
 252                host->ongoing_mrq = mrq;
 253                /*
 254                 * Retry path could come through here without having waiting on
 255                 * cmd_completion, so ensure it is reinitialised.
 256                 */
 257                reinit_completion(&mrq->cmd_completion);
 258        }
 259
 260        trace_mmc_request_start(host, mrq);
 261
 262        if (host->cqe_on)
 263                host->cqe_ops->cqe_off(host);
 264
 265        host->ops->request(host, mrq);
 266}
 267
 268static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
 269                             bool cqe)
 270{
 271        if (mrq->sbc) {
 272                pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
 273                         mmc_hostname(host), mrq->sbc->opcode,
 274                         mrq->sbc->arg, mrq->sbc->flags);
 275        }
 276
 277        if (mrq->cmd) {
 278                pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
 279                         mmc_hostname(host), cqe ? "CQE direct " : "",
 280                         mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
 281        } else if (cqe) {
 282                pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
 283                         mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
 284        }
 285
 286        if (mrq->data) {
 287                pr_debug("%s:     blksz %d blocks %d flags %08x "
 288                        "tsac %d ms nsac %d\n",
 289                        mmc_hostname(host), mrq->data->blksz,
 290                        mrq->data->blocks, mrq->data->flags,
 291                        mrq->data->timeout_ns / 1000000,
 292                        mrq->data->timeout_clks);
 293        }
 294
 295        if (mrq->stop) {
 296                pr_debug("%s:     CMD%u arg %08x flags %08x\n",
 297                         mmc_hostname(host), mrq->stop->opcode,
 298                         mrq->stop->arg, mrq->stop->flags);
 299        }
 300}
 301
 302static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
 303{
 304        unsigned int i, sz = 0;
 305        struct scatterlist *sg;
 306
 307        if (mrq->cmd) {
 308                mrq->cmd->error = 0;
 309                mrq->cmd->mrq = mrq;
 310                mrq->cmd->data = mrq->data;
 311        }
 312        if (mrq->sbc) {
 313                mrq->sbc->error = 0;
 314                mrq->sbc->mrq = mrq;
 315        }
 316        if (mrq->data) {
 317                if (mrq->data->blksz > host->max_blk_size ||
 318                    mrq->data->blocks > host->max_blk_count ||
 319                    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
 320                        return -EINVAL;
 321
 322                for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
 323                        sz += sg->length;
 324                if (sz != mrq->data->blocks * mrq->data->blksz)
 325                        return -EINVAL;
 326
 327                mrq->data->error = 0;
 328                mrq->data->mrq = mrq;
 329                if (mrq->stop) {
 330                        mrq->data->stop = mrq->stop;
 331                        mrq->stop->error = 0;
 332                        mrq->stop->mrq = mrq;
 333                }
 334        }
 335
 336        return 0;
 337}
 338
 339int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 340{
 341        int err;
 342
 343        init_completion(&mrq->cmd_completion);
 344
 345        mmc_retune_hold(host);
 346
 347        if (mmc_card_removed(host->card))
 348                return -ENOMEDIUM;
 349
 350        mmc_mrq_pr_debug(host, mrq, false);
 351
 352        WARN_ON(!host->claimed);
 353
 354        err = mmc_mrq_prep(host, mrq);
 355        if (err)
 356                return err;
 357
 358        led_trigger_event(host->led, LED_FULL);
 359        __mmc_start_request(host, mrq);
 360
 361        return 0;
 362}
 363EXPORT_SYMBOL(mmc_start_request);
 364
 365static void mmc_wait_done(struct mmc_request *mrq)
 366{
 367        complete(&mrq->completion);
 368}
 369
 370static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
 371{
 372        struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
 373
 374        /*
 375         * If there is an ongoing transfer, wait for the command line to become
 376         * available.
 377         */
 378        if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
 379                wait_for_completion(&ongoing_mrq->cmd_completion);
 380}
 381
 382static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 383{
 384        int err;
 385
 386        mmc_wait_ongoing_tfr_cmd(host);
 387
 388        init_completion(&mrq->completion);
 389        mrq->done = mmc_wait_done;
 390
 391        err = mmc_start_request(host, mrq);
 392        if (err) {
 393                mrq->cmd->error = err;
 394                mmc_complete_cmd(mrq);
 395                complete(&mrq->completion);
 396        }
 397
 398        return err;
 399}
 400
 401void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
 402{
 403        struct mmc_command *cmd;
 404
 405        while (1) {
 406                wait_for_completion(&mrq->completion);
 407
 408                cmd = mrq->cmd;
 409
 410                /*
 411                 * If host has timed out waiting for the sanitize
 412                 * to complete, card might be still in programming state
 413                 * so let's try to bring the card out of programming
 414                 * state.
 415                 */
 416                if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
 417                        if (!mmc_interrupt_hpi(host->card)) {
 418                                pr_warn("%s: %s: Interrupted sanitize\n",
 419                                        mmc_hostname(host), __func__);
 420                                cmd->error = 0;
 421                                break;
 422                        } else {
 423                                pr_err("%s: %s: Failed to interrupt sanitize\n",
 424                                       mmc_hostname(host), __func__);
 425                        }
 426                }
 427                if (!cmd->error || !cmd->retries ||
 428                    mmc_card_removed(host->card))
 429                        break;
 430
 431                mmc_retune_recheck(host);
 432
 433                pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 434                         mmc_hostname(host), cmd->opcode, cmd->error);
 435                cmd->retries--;
 436                cmd->error = 0;
 437                __mmc_start_request(host, mrq);
 438        }
 439
 440        mmc_retune_release(host);
 441}
 442EXPORT_SYMBOL(mmc_wait_for_req_done);
 443
 444/*
 445 * mmc_cqe_start_req - Start a CQE request.
 446 * @host: MMC host to start the request
 447 * @mrq: request to start
 448 *
 449 * Start the request, re-tuning if needed and it is possible. Returns an error
 450 * code if the request fails to start or -EBUSY if CQE is busy.
 451 */
 452int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
 453{
 454        int err;
 455
 456        /*
 457         * CQE cannot process re-tuning commands. Caller must hold retuning
 458         * while CQE is in use.  Re-tuning can happen here only when CQE has no
 459         * active requests i.e. this is the first.  Note, re-tuning will call
 460         * ->cqe_off().
 461         */
 462        err = mmc_retune(host);
 463        if (err)
 464                goto out_err;
 465
 466        mrq->host = host;
 467
 468        mmc_mrq_pr_debug(host, mrq, true);
 469
 470        err = mmc_mrq_prep(host, mrq);
 471        if (err)
 472                goto out_err;
 473
 474        err = host->cqe_ops->cqe_request(host, mrq);
 475        if (err)
 476                goto out_err;
 477
 478        trace_mmc_request_start(host, mrq);
 479
 480        return 0;
 481
 482out_err:
 483        if (mrq->cmd) {
 484                pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
 485                         mmc_hostname(host), mrq->cmd->opcode, err);
 486        } else {
 487                pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
 488                         mmc_hostname(host), mrq->tag, err);
 489        }
 490        return err;
 491}
 492EXPORT_SYMBOL(mmc_cqe_start_req);
 493
 494/**
 495 *      mmc_cqe_request_done - CQE has finished processing an MMC request
 496 *      @host: MMC host which completed request
 497 *      @mrq: MMC request which completed
 498 *
 499 *      CQE drivers should call this function when they have completed
 500 *      their processing of a request.
 501 */
 502void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
 503{
 504        mmc_should_fail_request(host, mrq);
 505
 506        /* Flag re-tuning needed on CRC errors */
 507        if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
 508            (mrq->data && mrq->data->error == -EILSEQ))
 509                mmc_retune_needed(host);
 510
 511        trace_mmc_request_done(host, mrq);
 512
 513        if (mrq->cmd) {
 514                pr_debug("%s: CQE req done (direct CMD%u): %d\n",
 515                         mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
 516        } else {
 517                pr_debug("%s: CQE transfer done tag %d\n",
 518                         mmc_hostname(host), mrq->tag);
 519        }
 520
 521        if (mrq->data) {
 522                pr_debug("%s:     %d bytes transferred: %d\n",
 523                         mmc_hostname(host),
 524                         mrq->data->bytes_xfered, mrq->data->error);
 525        }
 526
 527        mrq->done(mrq);
 528}
 529EXPORT_SYMBOL(mmc_cqe_request_done);
 530
 531/**
 532 *      mmc_cqe_post_req - CQE post process of a completed MMC request
 533 *      @host: MMC host
 534 *      @mrq: MMC request to be processed
 535 */
 536void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
 537{
 538        if (host->cqe_ops->cqe_post_req)
 539                host->cqe_ops->cqe_post_req(host, mrq);
 540}
 541EXPORT_SYMBOL(mmc_cqe_post_req);
 542
 543/* Arbitrary 1 second timeout */
 544#define MMC_CQE_RECOVERY_TIMEOUT        1000
 545
 546/*
 547 * mmc_cqe_recovery - Recover from CQE errors.
 548 * @host: MMC host to recover
 549 *
 550 * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
 551 * in eMMC, and discarding the queue in CQE. CQE must call
 552 * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
 553 * fails to discard its queue.
 554 */
 555int mmc_cqe_recovery(struct mmc_host *host)
 556{
 557        struct mmc_command cmd;
 558        int err;
 559
 560        mmc_retune_hold_now(host);
 561
 562        /*
 563         * Recovery is expected seldom, if at all, but it reduces performance,
 564         * so make sure it is not completely silent.
 565         */
 566        pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
 567
 568        host->cqe_ops->cqe_recovery_start(host);
 569
 570        memset(&cmd, 0, sizeof(cmd));
 571        cmd.opcode       = MMC_STOP_TRANSMISSION,
 572        cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC,
 573        cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 574        cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 575        mmc_wait_for_cmd(host, &cmd, 0);
 576
 577        memset(&cmd, 0, sizeof(cmd));
 578        cmd.opcode       = MMC_CMDQ_TASK_MGMT;
 579        cmd.arg          = 1; /* Discard entire queue */
 580        cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
 581        cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 582        cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 583        err = mmc_wait_for_cmd(host, &cmd, 0);
 584
 585        host->cqe_ops->cqe_recovery_finish(host);
 586
 587        mmc_retune_release(host);
 588
 589        return err;
 590}
 591EXPORT_SYMBOL(mmc_cqe_recovery);
 592
 593/**
 594 *      mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
 595 *      @host: MMC host
 596 *      @mrq: MMC request
 597 *
 598 *      mmc_is_req_done() is used with requests that have
 599 *      mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
 600 *      starting a request and before waiting for it to complete. That is,
 601 *      either in between calls to mmc_start_req(), or after mmc_wait_for_req()
 602 *      and before mmc_wait_for_req_done(). If it is called at other times the
 603 *      result is not meaningful.
 604 */
 605bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
 606{
 607        return completion_done(&mrq->completion);
 608}
 609EXPORT_SYMBOL(mmc_is_req_done);
 610
 611/**
 612 *      mmc_wait_for_req - start a request and wait for completion
 613 *      @host: MMC host to start command
 614 *      @mrq: MMC request to start
 615 *
 616 *      Start a new MMC custom command request for a host, and wait
 617 *      for the command to complete. In the case of 'cap_cmd_during_tfr'
 618 *      requests, the transfer is ongoing and the caller can issue further
 619 *      commands that do not use the data lines, and then wait by calling
 620 *      mmc_wait_for_req_done().
 621 *      Does not attempt to parse the response.
 622 */
 623void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 624{
 625        __mmc_start_req(host, mrq);
 626
 627        if (!mrq->cap_cmd_during_tfr)
 628                mmc_wait_for_req_done(host, mrq);
 629}
 630EXPORT_SYMBOL(mmc_wait_for_req);
 631
 632/**
 633 *      mmc_wait_for_cmd - start a command and wait for completion
 634 *      @host: MMC host to start command
 635 *      @cmd: MMC command to start
 636 *      @retries: maximum number of retries
 637 *
 638 *      Start a new MMC command for a host, and wait for the command
 639 *      to complete.  Return any error that occurred while the command
 640 *      was executing.  Do not attempt to parse the response.
 641 */
 642int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 643{
 644        struct mmc_request mrq = {};
 645
 646        WARN_ON(!host->claimed);
 647
 648        memset(cmd->resp, 0, sizeof(cmd->resp));
 649        cmd->retries = retries;
 650
 651        mrq.cmd = cmd;
 652        cmd->data = NULL;
 653
 654        mmc_wait_for_req(host, &mrq);
 655
 656        return cmd->error;
 657}
 658
 659EXPORT_SYMBOL(mmc_wait_for_cmd);
 660
 661/**
 662 *      mmc_set_data_timeout - set the timeout for a data command
 663 *      @data: data phase for command
 664 *      @card: the MMC card associated with the data transfer
 665 *
 666 *      Computes the data timeout parameters according to the
 667 *      correct algorithm given the card type.
 668 */
 669void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 670{
 671        unsigned int mult;
 672
 673        /*
 674         * SDIO cards only define an upper 1 s limit on access.
 675         */
 676        if (mmc_card_sdio(card)) {
 677                data->timeout_ns = 1000000000;
 678                data->timeout_clks = 0;
 679                return;
 680        }
 681
 682        /*
 683         * SD cards use a 100 multiplier rather than 10
 684         */
 685        mult = mmc_card_sd(card) ? 100 : 10;
 686
 687        /*
 688         * Scale up the multiplier (and therefore the timeout) by
 689         * the r2w factor for writes.
 690         */
 691        if (data->flags & MMC_DATA_WRITE)
 692                mult <<= card->csd.r2w_factor;
 693
 694        data->timeout_ns = card->csd.taac_ns * mult;
 695        data->timeout_clks = card->csd.taac_clks * mult;
 696
 697        /*
 698         * SD cards also have an upper limit on the timeout.
 699         */
 700        if (mmc_card_sd(card)) {
 701                unsigned int timeout_us, limit_us;
 702
 703                timeout_us = data->timeout_ns / 1000;
 704                if (card->host->ios.clock)
 705                        timeout_us += data->timeout_clks * 1000 /
 706                                (card->host->ios.clock / 1000);
 707
 708                if (data->flags & MMC_DATA_WRITE)
 709                        /*
 710                         * The MMC spec "It is strongly recommended
 711                         * for hosts to implement more than 500ms
 712                         * timeout value even if the card indicates
 713                         * the 250ms maximum busy length."  Even the
 714                         * previous value of 300ms is known to be
 715                         * insufficient for some cards.
 716                         */
 717                        limit_us = 3000000;
 718                else
 719                        limit_us = 100000;
 720
 721                /*
 722                 * SDHC cards always use these fixed values.
 723                 */
 724                if (timeout_us > limit_us) {
 725                        data->timeout_ns = limit_us * 1000;
 726                        data->timeout_clks = 0;
 727                }
 728
 729                /* assign limit value if invalid */
 730                if (timeout_us == 0)
 731                        data->timeout_ns = limit_us * 1000;
 732        }
 733
 734        /*
 735         * Some cards require longer data read timeout than indicated in CSD.
 736         * Address this by setting the read timeout to a "reasonably high"
 737         * value. For the cards tested, 600ms has proven enough. If necessary,
 738         * this value can be increased if other problematic cards require this.
 739         */
 740        if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
 741                data->timeout_ns = 600000000;
 742                data->timeout_clks = 0;
 743        }
 744
 745        /*
 746         * Some cards need very high timeouts if driven in SPI mode.
 747         * The worst observed timeout was 900ms after writing a
 748         * continuous stream of data until the internal logic
 749         * overflowed.
 750         */
 751        if (mmc_host_is_spi(card->host)) {
 752                if (data->flags & MMC_DATA_WRITE) {
 753                        if (data->timeout_ns < 1000000000)
 754                                data->timeout_ns = 1000000000;  /* 1s */
 755                } else {
 756                        if (data->timeout_ns < 100000000)
 757                                data->timeout_ns =  100000000;  /* 100ms */
 758                }
 759        }
 760}
 761EXPORT_SYMBOL(mmc_set_data_timeout);
 762
 763/**
 764 *      mmc_align_data_size - pads a transfer size to a more optimal value
 765 *      @card: the MMC card associated with the data transfer
 766 *      @sz: original transfer size
 767 *
 768 *      Pads the original data size with a number of extra bytes in
 769 *      order to avoid controller bugs and/or performance hits
 770 *      (e.g. some controllers revert to PIO for certain sizes).
 771 *
 772 *      Returns the improved size, which might be unmodified.
 773 *
 774 *      Note that this function is only relevant when issuing a
 775 *      single scatter gather entry.
 776 */
 777unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
 778{
 779        /*
 780         * FIXME: We don't have a system for the controller to tell
 781         * the core about its problems yet, so for now we just 32-bit
 782         * align the size.
 783         */
 784        sz = ((sz + 3) / 4) * 4;
 785
 786        return sz;
 787}
 788EXPORT_SYMBOL(mmc_align_data_size);
 789
 790/*
 791 * Allow claiming an already claimed host if the context is the same or there is
 792 * no context but the task is the same.
 793 */
 794static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
 795                                   struct task_struct *task)
 796{
 797        return host->claimer == ctx ||
 798               (!ctx && task && host->claimer->task == task);
 799}
 800
 801static inline void mmc_ctx_set_claimer(struct mmc_host *host,
 802                                       struct mmc_ctx *ctx,
 803                                       struct task_struct *task)
 804{
 805        if (!host->claimer) {
 806                if (ctx)
 807                        host->claimer = ctx;
 808                else
 809                        host->claimer = &host->default_ctx;
 810        }
 811        if (task)
 812                host->claimer->task = task;
 813}
 814
 815/**
 816 *      __mmc_claim_host - exclusively claim a host
 817 *      @host: mmc host to claim
 818 *      @ctx: context that claims the host or NULL in which case the default
 819 *      context will be used
 820 *      @abort: whether or not the operation should be aborted
 821 *
 822 *      Claim a host for a set of operations.  If @abort is non null and
 823 *      dereference a non-zero value then this will return prematurely with
 824 *      that non-zero value without acquiring the lock.  Returns zero
 825 *      with the lock held otherwise.
 826 */
 827int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
 828                     atomic_t *abort)
 829{
 830        struct task_struct *task = ctx ? NULL : current;
 831        DECLARE_WAITQUEUE(wait, current);
 832        unsigned long flags;
 833        int stop;
 834        bool pm = false;
 835
 836        might_sleep();
 837
 838        add_wait_queue(&host->wq, &wait);
 839        spin_lock_irqsave(&host->lock, flags);
 840        while (1) {
 841                set_current_state(TASK_UNINTERRUPTIBLE);
 842                stop = abort ? atomic_read(abort) : 0;
 843                if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
 844                        break;
 845                spin_unlock_irqrestore(&host->lock, flags);
 846                schedule();
 847                spin_lock_irqsave(&host->lock, flags);
 848        }
 849        set_current_state(TASK_RUNNING);
 850        if (!stop) {
 851                host->claimed = 1;
 852                mmc_ctx_set_claimer(host, ctx, task);
 853                host->claim_cnt += 1;
 854                if (host->claim_cnt == 1)
 855                        pm = true;
 856        } else
 857                wake_up(&host->wq);
 858        spin_unlock_irqrestore(&host->lock, flags);
 859        remove_wait_queue(&host->wq, &wait);
 860
 861        if (pm)
 862                pm_runtime_get_sync(mmc_dev(host));
 863
 864        return stop;
 865}
 866EXPORT_SYMBOL(__mmc_claim_host);
 867
 868/**
 869 *      mmc_release_host - release a host
 870 *      @host: mmc host to release
 871 *
 872 *      Release a MMC host, allowing others to claim the host
 873 *      for their operations.
 874 */
 875void mmc_release_host(struct mmc_host *host)
 876{
 877        unsigned long flags;
 878
 879        WARN_ON(!host->claimed);
 880
 881        spin_lock_irqsave(&host->lock, flags);
 882        if (--host->claim_cnt) {
 883                /* Release for nested claim */
 884                spin_unlock_irqrestore(&host->lock, flags);
 885        } else {
 886                host->claimed = 0;
 887                host->claimer->task = NULL;
 888                host->claimer = NULL;
 889                spin_unlock_irqrestore(&host->lock, flags);
 890                wake_up(&host->wq);
 891                pm_runtime_mark_last_busy(mmc_dev(host));
 892                pm_runtime_put_autosuspend(mmc_dev(host));
 893        }
 894}
 895EXPORT_SYMBOL(mmc_release_host);
 896
 897/*
 898 * This is a helper function, which fetches a runtime pm reference for the
 899 * card device and also claims the host.
 900 */
 901void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
 902{
 903        pm_runtime_get_sync(&card->dev);
 904        __mmc_claim_host(card->host, ctx, NULL);
 905}
 906EXPORT_SYMBOL(mmc_get_card);
 907
 908/*
 909 * This is a helper function, which releases the host and drops the runtime
 910 * pm reference for the card device.
 911 */
 912void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
 913{
 914        struct mmc_host *host = card->host;
 915
 916        WARN_ON(ctx && host->claimer != ctx);
 917
 918        mmc_release_host(host);
 919        pm_runtime_mark_last_busy(&card->dev);
 920        pm_runtime_put_autosuspend(&card->dev);
 921}
 922EXPORT_SYMBOL(mmc_put_card);
 923
 924/*
 925 * Internal function that does the actual ios call to the host driver,
 926 * optionally printing some debug output.
 927 */
 928static inline void mmc_set_ios(struct mmc_host *host)
 929{
 930        struct mmc_ios *ios = &host->ios;
 931
 932        pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
 933                "width %u timing %u\n",
 934                 mmc_hostname(host), ios->clock, ios->bus_mode,
 935                 ios->power_mode, ios->chip_select, ios->vdd,
 936                 1 << ios->bus_width, ios->timing);
 937
 938        host->ops->set_ios(host, ios);
 939}
 940
 941/*
 942 * Control chip select pin on a host.
 943 */
 944void mmc_set_chip_select(struct mmc_host *host, int mode)
 945{
 946        host->ios.chip_select = mode;
 947        mmc_set_ios(host);
 948}
 949
 950/*
 951 * Sets the host clock to the highest possible frequency that
 952 * is below "hz".
 953 */
 954void mmc_set_clock(struct mmc_host *host, unsigned int hz)
 955{
 956        WARN_ON(hz && hz < host->f_min);
 957
 958        if (hz > host->f_max)
 959                hz = host->f_max;
 960
 961        host->ios.clock = hz;
 962        mmc_set_ios(host);
 963}
 964
 965int mmc_execute_tuning(struct mmc_card *card)
 966{
 967        struct mmc_host *host = card->host;
 968        u32 opcode;
 969        int err;
 970
 971        if (!host->ops->execute_tuning)
 972                return 0;
 973
 974        if (host->cqe_on)
 975                host->cqe_ops->cqe_off(host);
 976
 977        if (mmc_card_mmc(card))
 978                opcode = MMC_SEND_TUNING_BLOCK_HS200;
 979        else
 980                opcode = MMC_SEND_TUNING_BLOCK;
 981
 982        err = host->ops->execute_tuning(host, opcode);
 983
 984        if (err)
 985                pr_err("%s: tuning execution failed: %d\n",
 986                        mmc_hostname(host), err);
 987        else
 988                mmc_retune_enable(host);
 989
 990        return err;
 991}
 992
 993/*
 994 * Change the bus mode (open drain/push-pull) of a host.
 995 */
 996void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 997{
 998        host->ios.bus_mode = mode;
 999        mmc_set_ios(host);
1000}
1001
1002/*
1003 * Change data bus width of a host.
1004 */
1005void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1006{
1007        host->ios.bus_width = width;
1008        mmc_set_ios(host);
1009}
1010
1011/*
1012 * Set initial state after a power cycle or a hw_reset.
1013 */
1014void mmc_set_initial_state(struct mmc_host *host)
1015{
1016        if (host->cqe_on)
1017                host->cqe_ops->cqe_off(host);
1018
1019        mmc_retune_disable(host);
1020
1021        if (mmc_host_is_spi(host))
1022                host->ios.chip_select = MMC_CS_HIGH;
1023        else
1024                host->ios.chip_select = MMC_CS_DONTCARE;
1025        host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1026        host->ios.bus_width = MMC_BUS_WIDTH_1;
1027        host->ios.timing = MMC_TIMING_LEGACY;
1028        host->ios.drv_type = 0;
1029        host->ios.enhanced_strobe = false;
1030
1031        /*
1032         * Make sure we are in non-enhanced strobe mode before we
1033         * actually enable it in ext_csd.
1034         */
1035        if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1036             host->ops->hs400_enhanced_strobe)
1037                host->ops->hs400_enhanced_strobe(host, &host->ios);
1038
1039        mmc_set_ios(host);
1040}
1041
1042/**
1043 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1044 * @vdd:        voltage (mV)
1045 * @low_bits:   prefer low bits in boundary cases
1046 *
1047 * This function returns the OCR bit number according to the provided @vdd
1048 * value. If conversion is not possible a negative errno value returned.
1049 *
1050 * Depending on the @low_bits flag the function prefers low or high OCR bits
1051 * on boundary voltages. For example,
1052 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1053 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1054 *
1055 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1056 */
1057static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1058{
1059        const int max_bit = ilog2(MMC_VDD_35_36);
1060        int bit;
1061
1062        if (vdd < 1650 || vdd > 3600)
1063                return -EINVAL;
1064
1065        if (vdd >= 1650 && vdd <= 1950)
1066                return ilog2(MMC_VDD_165_195);
1067
1068        if (low_bits)
1069                vdd -= 1;
1070
1071        /* Base 2000 mV, step 100 mV, bit's base 8. */
1072        bit = (vdd - 2000) / 100 + 8;
1073        if (bit > max_bit)
1074                return max_bit;
1075        return bit;
1076}
1077
1078/**
1079 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1080 * @vdd_min:    minimum voltage value (mV)
1081 * @vdd_max:    maximum voltage value (mV)
1082 *
1083 * This function returns the OCR mask bits according to the provided @vdd_min
1084 * and @vdd_max values. If conversion is not possible the function returns 0.
1085 *
1086 * Notes wrt boundary cases:
1087 * This function sets the OCR bits for all boundary voltages, for example
1088 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1089 * MMC_VDD_34_35 mask.
1090 */
1091u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1092{
1093        u32 mask = 0;
1094
1095        if (vdd_max < vdd_min)
1096                return 0;
1097
1098        /* Prefer high bits for the boundary vdd_max values. */
1099        vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1100        if (vdd_max < 0)
1101                return 0;
1102
1103        /* Prefer low bits for the boundary vdd_min values. */
1104        vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1105        if (vdd_min < 0)
1106                return 0;
1107
1108        /* Fill the mask, from max bit to min bit. */
1109        while (vdd_max >= vdd_min)
1110                mask |= 1 << vdd_max--;
1111
1112        return mask;
1113}
1114EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1115
1116#ifdef CONFIG_OF
1117
1118/**
1119 * mmc_of_parse_voltage - return mask of supported voltages
1120 * @np: The device node need to be parsed.
1121 * @mask: mask of voltages available for MMC/SD/SDIO
1122 *
1123 * Parse the "voltage-ranges" DT property, returning zero if it is not
1124 * found, negative errno if the voltage-range specification is invalid,
1125 * or one if the voltage-range is specified and successfully parsed.
1126 */
1127int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1128{
1129        const u32 *voltage_ranges;
1130        int num_ranges, i;
1131
1132        voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1133        num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1134        if (!voltage_ranges) {
1135                pr_debug("%pOF: voltage-ranges unspecified\n", np);
1136                return 0;
1137        }
1138        if (!num_ranges) {
1139                pr_err("%pOF: voltage-ranges empty\n", np);
1140                return -EINVAL;
1141        }
1142
1143        for (i = 0; i < num_ranges; i++) {
1144                const int j = i * 2;
1145                u32 ocr_mask;
1146
1147                ocr_mask = mmc_vddrange_to_ocrmask(
1148                                be32_to_cpu(voltage_ranges[j]),
1149                                be32_to_cpu(voltage_ranges[j + 1]));
1150                if (!ocr_mask) {
1151                        pr_err("%pOF: voltage-range #%d is invalid\n",
1152                                np, i);
1153                        return -EINVAL;
1154                }
1155                *mask |= ocr_mask;
1156        }
1157
1158        return 1;
1159}
1160EXPORT_SYMBOL(mmc_of_parse_voltage);
1161
1162#endif /* CONFIG_OF */
1163
1164static int mmc_of_get_func_num(struct device_node *node)
1165{
1166        u32 reg;
1167        int ret;
1168
1169        ret = of_property_read_u32(node, "reg", &reg);
1170        if (ret < 0)
1171                return ret;
1172
1173        return reg;
1174}
1175
1176struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1177                unsigned func_num)
1178{
1179        struct device_node *node;
1180
1181        if (!host->parent || !host->parent->of_node)
1182                return NULL;
1183
1184        for_each_child_of_node(host->parent->of_node, node) {
1185                if (mmc_of_get_func_num(node) == func_num)
1186                        return node;
1187        }
1188
1189        return NULL;
1190}
1191
1192#ifdef CONFIG_REGULATOR
1193
1194/**
1195 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1196 * @vdd_bit:    OCR bit number
1197 * @min_uV:     minimum voltage value (mV)
1198 * @max_uV:     maximum voltage value (mV)
1199 *
1200 * This function returns the voltage range according to the provided OCR
1201 * bit number. If conversion is not possible a negative errno value returned.
1202 */
1203static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1204{
1205        int             tmp;
1206
1207        if (!vdd_bit)
1208                return -EINVAL;
1209
1210        /*
1211         * REVISIT mmc_vddrange_to_ocrmask() may have set some
1212         * bits this regulator doesn't quite support ... don't
1213         * be too picky, most cards and regulators are OK with
1214         * a 0.1V range goof (it's a small error percentage).
1215         */
1216        tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1217        if (tmp == 0) {
1218                *min_uV = 1650 * 1000;
1219                *max_uV = 1950 * 1000;
1220        } else {
1221                *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1222                *max_uV = *min_uV + 100 * 1000;
1223        }
1224
1225        return 0;
1226}
1227
1228/**
1229 * mmc_regulator_get_ocrmask - return mask of supported voltages
1230 * @supply: regulator to use
1231 *
1232 * This returns either a negative errno, or a mask of voltages that
1233 * can be provided to MMC/SD/SDIO devices using the specified voltage
1234 * regulator.  This would normally be called before registering the
1235 * MMC host adapter.
1236 */
1237int mmc_regulator_get_ocrmask(struct regulator *supply)
1238{
1239        int                     result = 0;
1240        int                     count;
1241        int                     i;
1242        int                     vdd_uV;
1243        int                     vdd_mV;
1244
1245        count = regulator_count_voltages(supply);
1246        if (count < 0)
1247                return count;
1248
1249        for (i = 0; i < count; i++) {
1250                vdd_uV = regulator_list_voltage(supply, i);
1251                if (vdd_uV <= 0)
1252                        continue;
1253
1254                vdd_mV = vdd_uV / 1000;
1255                result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1256        }
1257
1258        if (!result) {
1259                vdd_uV = regulator_get_voltage(supply);
1260                if (vdd_uV <= 0)
1261                        return vdd_uV;
1262
1263                vdd_mV = vdd_uV / 1000;
1264                result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1265        }
1266
1267        return result;
1268}
1269EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1270
1271/**
1272 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1273 * @mmc: the host to regulate
1274 * @supply: regulator to use
1275 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1276 *
1277 * Returns zero on success, else negative errno.
1278 *
1279 * MMC host drivers may use this to enable or disable a regulator using
1280 * a particular supply voltage.  This would normally be called from the
1281 * set_ios() method.
1282 */
1283int mmc_regulator_set_ocr(struct mmc_host *mmc,
1284                        struct regulator *supply,
1285                        unsigned short vdd_bit)
1286{
1287        int                     result = 0;
1288        int                     min_uV, max_uV;
1289
1290        if (vdd_bit) {
1291                mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1292
1293                result = regulator_set_voltage(supply, min_uV, max_uV);
1294                if (result == 0 && !mmc->regulator_enabled) {
1295                        result = regulator_enable(supply);
1296                        if (!result)
1297                                mmc->regulator_enabled = true;
1298                }
1299        } else if (mmc->regulator_enabled) {
1300                result = regulator_disable(supply);
1301                if (result == 0)
1302                        mmc->regulator_enabled = false;
1303        }
1304
1305        if (result)
1306                dev_err(mmc_dev(mmc),
1307                        "could not set regulator OCR (%d)\n", result);
1308        return result;
1309}
1310EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1311
1312static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1313                                                  int min_uV, int target_uV,
1314                                                  int max_uV)
1315{
1316        /*
1317         * Check if supported first to avoid errors since we may try several
1318         * signal levels during power up and don't want to show errors.
1319         */
1320        if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1321                return -EINVAL;
1322
1323        return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1324                                             max_uV);
1325}
1326
1327/**
1328 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1329 *
1330 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1331 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1332 * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1333 * SD card spec also define VQMMC in terms of VMMC.
1334 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1335 *
1336 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1337 * requested voltage.  This is definitely a good idea for UHS where there's a
1338 * separate regulator on the card that's trying to make 1.8V and it's best if
1339 * we match.
1340 *
1341 * This function is expected to be used by a controller's
1342 * start_signal_voltage_switch() function.
1343 */
1344int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1345{
1346        struct device *dev = mmc_dev(mmc);
1347        int ret, volt, min_uV, max_uV;
1348
1349        /* If no vqmmc supply then we can't change the voltage */
1350        if (IS_ERR(mmc->supply.vqmmc))
1351                return -EINVAL;
1352
1353        switch (ios->signal_voltage) {
1354        case MMC_SIGNAL_VOLTAGE_120:
1355                return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1356                                                1100000, 1200000, 1300000);
1357        case MMC_SIGNAL_VOLTAGE_180:
1358                return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1359                                                1700000, 1800000, 1950000);
1360        case MMC_SIGNAL_VOLTAGE_330:
1361                ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1362                if (ret < 0)
1363                        return ret;
1364
1365                dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1366                        __func__, volt, max_uV);
1367
1368                min_uV = max(volt - 300000, 2700000);
1369                max_uV = min(max_uV + 200000, 3600000);
1370
1371                /*
1372                 * Due to a limitation in the current implementation of
1373                 * regulator_set_voltage_triplet() which is taking the lowest
1374                 * voltage possible if below the target, search for a suitable
1375                 * voltage in two steps and try to stay close to vmmc
1376                 * with a 0.3V tolerance at first.
1377                 */
1378                if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1379                                                min_uV, volt, max_uV))
1380                        return 0;
1381
1382                return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1383                                                2700000, volt, 3600000);
1384        default:
1385                return -EINVAL;
1386        }
1387}
1388EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1389
1390#endif /* CONFIG_REGULATOR */
1391
1392int mmc_regulator_get_supply(struct mmc_host *mmc)
1393{
1394        struct device *dev = mmc_dev(mmc);
1395        int ret;
1396
1397        mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1398        mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1399
1400        if (IS_ERR(mmc->supply.vmmc)) {
1401                if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1402                        return -EPROBE_DEFER;
1403                dev_info(dev, "No vmmc regulator found\n");
1404        } else {
1405                ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1406                if (ret > 0)
1407                        mmc->ocr_avail = ret;
1408                else
1409                        dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1410        }
1411
1412        if (IS_ERR(mmc->supply.vqmmc)) {
1413                if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1414                        return -EPROBE_DEFER;
1415                dev_info(dev, "No vqmmc regulator found\n");
1416        }
1417
1418        return 0;
1419}
1420EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1421
1422/*
1423 * Mask off any voltages we don't support and select
1424 * the lowest voltage
1425 */
1426u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1427{
1428        int bit;
1429
1430        /*
1431         * Sanity check the voltages that the card claims to
1432         * support.
1433         */
1434        if (ocr & 0x7F) {
1435                dev_warn(mmc_dev(host),
1436                "card claims to support voltages below defined range\n");
1437                ocr &= ~0x7F;
1438        }
1439
1440        ocr &= host->ocr_avail;
1441        if (!ocr) {
1442                dev_warn(mmc_dev(host), "no support for card's volts\n");
1443                return 0;
1444        }
1445
1446        if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1447                bit = ffs(ocr) - 1;
1448                ocr &= 3 << bit;
1449                mmc_power_cycle(host, ocr);
1450        } else {
1451                bit = fls(ocr) - 1;
1452                ocr &= 3 << bit;
1453                if (bit != host->ios.vdd)
1454                        dev_warn(mmc_dev(host), "exceeding card's volts\n");
1455        }
1456
1457        return ocr;
1458}
1459
1460int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1461{
1462        int err = 0;
1463        int old_signal_voltage = host->ios.signal_voltage;
1464
1465        host->ios.signal_voltage = signal_voltage;
1466        if (host->ops->start_signal_voltage_switch)
1467                err = host->ops->start_signal_voltage_switch(host, &host->ios);
1468
1469        if (err)
1470                host->ios.signal_voltage = old_signal_voltage;
1471
1472        return err;
1473
1474}
1475
1476int mmc_host_set_uhs_voltage(struct mmc_host *host)
1477{
1478        u32 clock;
1479
1480        /*
1481         * During a signal voltage level switch, the clock must be gated
1482         * for 5 ms according to the SD spec
1483         */
1484        clock = host->ios.clock;
1485        host->ios.clock = 0;
1486        mmc_set_ios(host);
1487
1488        if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1489                return -EAGAIN;
1490
1491        /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1492        mmc_delay(10);
1493        host->ios.clock = clock;
1494        mmc_set_ios(host);
1495
1496        return 0;
1497}
1498
1499int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1500{
1501        struct mmc_command cmd = {};
1502        int err = 0;
1503
1504        /*
1505         * If we cannot switch voltages, return failure so the caller
1506         * can continue without UHS mode
1507         */
1508        if (!host->ops->start_signal_voltage_switch)
1509                return -EPERM;
1510        if (!host->ops->card_busy)
1511                pr_warn("%s: cannot verify signal voltage switch\n",
1512                        mmc_hostname(host));
1513
1514        cmd.opcode = SD_SWITCH_VOLTAGE;
1515        cmd.arg = 0;
1516        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1517
1518        err = mmc_wait_for_cmd(host, &cmd, 0);
1519        if (err)
1520                return err;
1521
1522        if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1523                return -EIO;
1524
1525        /*
1526         * The card should drive cmd and dat[0:3] low immediately
1527         * after the response of cmd11, but wait 1 ms to be sure
1528         */
1529        mmc_delay(1);
1530        if (host->ops->card_busy && !host->ops->card_busy(host)) {
1531                err = -EAGAIN;
1532                goto power_cycle;
1533        }
1534
1535        if (mmc_host_set_uhs_voltage(host)) {
1536                /*
1537                 * Voltages may not have been switched, but we've already
1538                 * sent CMD11, so a power cycle is required anyway
1539                 */
1540                err = -EAGAIN;
1541                goto power_cycle;
1542        }
1543
1544        /* Wait for at least 1 ms according to spec */
1545        mmc_delay(1);
1546
1547        /*
1548         * Failure to switch is indicated by the card holding
1549         * dat[0:3] low
1550         */
1551        if (host->ops->card_busy && host->ops->card_busy(host))
1552                err = -EAGAIN;
1553
1554power_cycle:
1555        if (err) {
1556                pr_debug("%s: Signal voltage switch failed, "
1557                        "power cycling card\n", mmc_hostname(host));
1558                mmc_power_cycle(host, ocr);
1559        }
1560
1561        return err;
1562}
1563
1564/*
1565 * Select timing parameters for host.
1566 */
1567void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1568{
1569        host->ios.timing = timing;
1570        mmc_set_ios(host);
1571}
1572
1573/*
1574 * Select appropriate driver type for host.
1575 */
1576void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1577{
1578        host->ios.drv_type = drv_type;
1579        mmc_set_ios(host);
1580}
1581
1582int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1583                              int card_drv_type, int *drv_type)
1584{
1585        struct mmc_host *host = card->host;
1586        int host_drv_type = SD_DRIVER_TYPE_B;
1587
1588        *drv_type = 0;
1589
1590        if (!host->ops->select_drive_strength)
1591                return 0;
1592
1593        /* Use SD definition of driver strength for hosts */
1594        if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1595                host_drv_type |= SD_DRIVER_TYPE_A;
1596
1597        if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1598                host_drv_type |= SD_DRIVER_TYPE_C;
1599
1600        if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1601                host_drv_type |= SD_DRIVER_TYPE_D;
1602
1603        /*
1604         * The drive strength that the hardware can support
1605         * depends on the board design.  Pass the appropriate
1606         * information and let the hardware specific code
1607         * return what is possible given the options
1608         */
1609        return host->ops->select_drive_strength(card, max_dtr,
1610                                                host_drv_type,
1611                                                card_drv_type,
1612                                                drv_type);
1613}
1614
1615/*
1616 * Apply power to the MMC stack.  This is a two-stage process.
1617 * First, we enable power to the card without the clock running.
1618 * We then wait a bit for the power to stabilise.  Finally,
1619 * enable the bus drivers and clock to the card.
1620 *
1621 * We must _NOT_ enable the clock prior to power stablising.
1622 *
1623 * If a host does all the power sequencing itself, ignore the
1624 * initial MMC_POWER_UP stage.
1625 */
1626void mmc_power_up(struct mmc_host *host, u32 ocr)
1627{
1628        if (host->ios.power_mode == MMC_POWER_ON)
1629                return;
1630
1631        host->ios.vdd = fls(ocr) - 1;
1632        host->ios.power_mode = MMC_POWER_UP;
1633        /* Set initial state and call mmc_set_ios */
1634        mmc_set_initial_state(host);
1635
1636        /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1637        if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1638                dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1639        else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1640                dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1641        else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1642                dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1643
1644        /*
1645         * This delay should be sufficient to allow the power supply
1646         * to reach the minimum voltage.
1647         */
1648        mmc_delay(10);
1649
1650        host->ios.clock = host->f_init;
1651
1652        host->ios.power_mode = MMC_POWER_ON;
1653        mmc_set_ios(host);
1654
1655        /*
1656         * This delay must be at least 74 clock sizes, or 1 ms, or the
1657         * time required to reach a stable voltage.
1658         */
1659        mmc_delay(10);
1660}
1661
1662void mmc_power_off(struct mmc_host *host)
1663{
1664        if (host->ios.power_mode == MMC_POWER_OFF)
1665                return;
1666
1667        host->ios.clock = 0;
1668        host->ios.vdd = 0;
1669
1670        host->ios.power_mode = MMC_POWER_OFF;
1671        /* Set initial state and call mmc_set_ios */
1672        mmc_set_initial_state(host);
1673
1674        /*
1675         * Some configurations, such as the 802.11 SDIO card in the OLPC
1676         * XO-1.5, require a short delay after poweroff before the card
1677         * can be successfully turned on again.
1678         */
1679        mmc_delay(1);
1680}
1681
1682void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1683{
1684        mmc_power_off(host);
1685        /* Wait at least 1 ms according to SD spec */
1686        mmc_delay(1);
1687        mmc_power_up(host, ocr);
1688}
1689
1690/*
1691 * Cleanup when the last reference to the bus operator is dropped.
1692 */
1693static void __mmc_release_bus(struct mmc_host *host)
1694{
1695        WARN_ON(!host->bus_dead);
1696
1697        host->bus_ops = NULL;
1698}
1699
1700/*
1701 * Increase reference count of bus operator
1702 */
1703static inline void mmc_bus_get(struct mmc_host *host)
1704{
1705        unsigned long flags;
1706
1707        spin_lock_irqsave(&host->lock, flags);
1708        host->bus_refs++;
1709        spin_unlock_irqrestore(&host->lock, flags);
1710}
1711
1712/*
1713 * Decrease reference count of bus operator and free it if
1714 * it is the last reference.
1715 */
1716static inline void mmc_bus_put(struct mmc_host *host)
1717{
1718        unsigned long flags;
1719
1720        spin_lock_irqsave(&host->lock, flags);
1721        host->bus_refs--;
1722        if ((host->bus_refs == 0) && host->bus_ops)
1723                __mmc_release_bus(host);
1724        spin_unlock_irqrestore(&host->lock, flags);
1725}
1726
1727/*
1728 * Assign a mmc bus handler to a host. Only one bus handler may control a
1729 * host at any given time.
1730 */
1731void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1732{
1733        unsigned long flags;
1734
1735        WARN_ON(!host->claimed);
1736
1737        spin_lock_irqsave(&host->lock, flags);
1738
1739        WARN_ON(host->bus_ops);
1740        WARN_ON(host->bus_refs);
1741
1742        host->bus_ops = ops;
1743        host->bus_refs = 1;
1744        host->bus_dead = 0;
1745
1746        spin_unlock_irqrestore(&host->lock, flags);
1747}
1748
1749/*
1750 * Remove the current bus handler from a host.
1751 */
1752void mmc_detach_bus(struct mmc_host *host)
1753{
1754        unsigned long flags;
1755
1756        WARN_ON(!host->claimed);
1757        WARN_ON(!host->bus_ops);
1758
1759        spin_lock_irqsave(&host->lock, flags);
1760
1761        host->bus_dead = 1;
1762
1763        spin_unlock_irqrestore(&host->lock, flags);
1764
1765        mmc_bus_put(host);
1766}
1767
1768static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1769                                bool cd_irq)
1770{
1771        /*
1772         * If the device is configured as wakeup, we prevent a new sleep for
1773         * 5 s to give provision for user space to consume the event.
1774         */
1775        if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1776                device_can_wakeup(mmc_dev(host)))
1777                pm_wakeup_event(mmc_dev(host), 5000);
1778
1779        host->detect_change = 1;
1780        mmc_schedule_delayed_work(&host->detect, delay);
1781}
1782
1783/**
1784 *      mmc_detect_change - process change of state on a MMC socket
1785 *      @host: host which changed state.
1786 *      @delay: optional delay to wait before detection (jiffies)
1787 *
1788 *      MMC drivers should call this when they detect a card has been
1789 *      inserted or removed. The MMC layer will confirm that any
1790 *      present card is still functional, and initialize any newly
1791 *      inserted.
1792 */
1793void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1794{
1795        _mmc_detect_change(host, delay, true);
1796}
1797EXPORT_SYMBOL(mmc_detect_change);
1798
1799void mmc_init_erase(struct mmc_card *card)
1800{
1801        unsigned int sz;
1802
1803        if (is_power_of_2(card->erase_size))
1804                card->erase_shift = ffs(card->erase_size) - 1;
1805        else
1806                card->erase_shift = 0;
1807
1808        /*
1809         * It is possible to erase an arbitrarily large area of an SD or MMC
1810         * card.  That is not desirable because it can take a long time
1811         * (minutes) potentially delaying more important I/O, and also the
1812         * timeout calculations become increasingly hugely over-estimated.
1813         * Consequently, 'pref_erase' is defined as a guide to limit erases
1814         * to that size and alignment.
1815         *
1816         * For SD cards that define Allocation Unit size, limit erases to one
1817         * Allocation Unit at a time.
1818         * For MMC, have a stab at ai good value and for modern cards it will
1819         * end up being 4MiB. Note that if the value is too small, it can end
1820         * up taking longer to erase. Also note, erase_size is already set to
1821         * High Capacity Erase Size if available when this function is called.
1822         */
1823        if (mmc_card_sd(card) && card->ssr.au) {
1824                card->pref_erase = card->ssr.au;
1825                card->erase_shift = ffs(card->ssr.au) - 1;
1826        } else if (card->erase_size) {
1827                sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1828                if (sz < 128)
1829                        card->pref_erase = 512 * 1024 / 512;
1830                else if (sz < 512)
1831                        card->pref_erase = 1024 * 1024 / 512;
1832                else if (sz < 1024)
1833                        card->pref_erase = 2 * 1024 * 1024 / 512;
1834                else
1835                        card->pref_erase = 4 * 1024 * 1024 / 512;
1836                if (card->pref_erase < card->erase_size) {
1837                        gmb();
1838                        card->pref_erase = card->erase_size;
1839                } else {
1840                        gmb();
1841                        sz = card->pref_erase % card->erase_size;
1842                        if (sz)
1843                                card->pref_erase += card->erase_size - sz;
1844                }
1845        } else
1846                card->pref_erase = 0;
1847}
1848
1849static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1850                                          unsigned int arg, unsigned int qty)
1851{
1852        unsigned int erase_timeout;
1853
1854        if (arg == MMC_DISCARD_ARG ||
1855            (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1856                erase_timeout = card->ext_csd.trim_timeout;
1857        } else if (card->ext_csd.erase_group_def & 1) {
1858                /* High Capacity Erase Group Size uses HC timeouts */
1859                if (arg == MMC_TRIM_ARG)
1860                        erase_timeout = card->ext_csd.trim_timeout;
1861                else
1862                        erase_timeout = card->ext_csd.hc_erase_timeout;
1863        } else {
1864                /* CSD Erase Group Size uses write timeout */
1865                unsigned int mult = (10 << card->csd.r2w_factor);
1866                unsigned int timeout_clks = card->csd.taac_clks * mult;
1867                unsigned int timeout_us;
1868
1869                /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
1870                if (card->csd.taac_ns < 1000000)
1871                        timeout_us = (card->csd.taac_ns * mult) / 1000;
1872                else
1873                        timeout_us = (card->csd.taac_ns / 1000) * mult;
1874
1875                /*
1876                 * ios.clock is only a target.  The real clock rate might be
1877                 * less but not that much less, so fudge it by multiplying by 2.
1878                 */
1879                timeout_clks <<= 1;
1880                timeout_us += (timeout_clks * 1000) /
1881                              (card->host->ios.clock / 1000);
1882
1883                erase_timeout = timeout_us / 1000;
1884
1885                /*
1886                 * Theoretically, the calculation could underflow so round up
1887                 * to 1ms in that case.
1888                 */
1889                if (!erase_timeout)
1890                        erase_timeout = 1;
1891        }
1892
1893        /* Multiplier for secure operations */
1894        if (arg & MMC_SECURE_ARGS) {
1895                if (arg == MMC_SECURE_ERASE_ARG)
1896                        erase_timeout *= card->ext_csd.sec_erase_mult;
1897                else
1898                        erase_timeout *= card->ext_csd.sec_trim_mult;
1899        }
1900
1901        erase_timeout *= qty;
1902
1903        /*
1904         * Ensure at least a 1 second timeout for SPI as per
1905         * 'mmc_set_data_timeout()'
1906         */
1907        if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1908                erase_timeout = 1000;
1909
1910        return erase_timeout;
1911}
1912
1913static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1914                                         unsigned int arg,
1915                                         unsigned int qty)
1916{
1917        unsigned int erase_timeout;
1918
1919        if (card->ssr.erase_timeout) {
1920                /* Erase timeout specified in SD Status Register (SSR) */
1921                erase_timeout = card->ssr.erase_timeout * qty +
1922                                card->ssr.erase_offset;
1923        } else {
1924                /*
1925                 * Erase timeout not specified in SD Status Register (SSR) so
1926                 * use 250ms per write block.
1927                 */
1928                erase_timeout = 250 * qty;
1929        }
1930
1931        /* Must not be less than 1 second */
1932        if (erase_timeout < 1000)
1933                erase_timeout = 1000;
1934
1935        return erase_timeout;
1936}
1937
1938static unsigned int mmc_erase_timeout(struct mmc_card *card,
1939                                      unsigned int arg,
1940                                      unsigned int qty)
1941{
1942        if (mmc_card_sd(card))
1943                return mmc_sd_erase_timeout(card, arg, qty);
1944        else
1945                return mmc_mmc_erase_timeout(card, arg, qty);
1946}
1947
1948static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1949                        unsigned int to, unsigned int arg)
1950{
1951        struct mmc_command cmd = {};
1952        unsigned int qty = 0, busy_timeout = 0;
1953        bool use_r1b_resp = false;
1954        unsigned long timeout;
1955        int err;
1956
1957        mmc_retune_hold(card->host);
1958
1959        /*
1960         * qty is used to calculate the erase timeout which depends on how many
1961         * erase groups (or allocation units in SD terminology) are affected.
1962         * We count erasing part of an erase group as one erase group.
1963         * For SD, the allocation units are always a power of 2.  For MMC, the
1964         * erase group size is almost certainly also power of 2, but it does not
1965         * seem to insist on that in the JEDEC standard, so we fall back to
1966         * division in that case.  SD may not specify an allocation unit size,
1967         * in which case the timeout is based on the number of write blocks.
1968         *
1969         * Note that the timeout for secure trim 2 will only be correct if the
1970         * number of erase groups specified is the same as the total of all
1971         * preceding secure trim 1 commands.  Since the power may have been
1972         * lost since the secure trim 1 commands occurred, it is generally
1973         * impossible to calculate the secure trim 2 timeout correctly.
1974         */
1975        if (card->erase_shift)
1976                qty += ((to >> card->erase_shift) -
1977                        (from >> card->erase_shift)) + 1;
1978        else if (mmc_card_sd(card))
1979                qty += to - from + 1;
1980        else
1981                qty += ((to / card->erase_size) -
1982                        (from / card->erase_size)) + 1;
1983
1984        if (!mmc_card_blockaddr(card)) {
1985                from <<= 9;
1986                to <<= 9;
1987        }
1988
1989        if (mmc_card_sd(card))
1990                cmd.opcode = SD_ERASE_WR_BLK_START;
1991        else
1992                cmd.opcode = MMC_ERASE_GROUP_START;
1993        cmd.arg = from;
1994        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1995        err = mmc_wait_for_cmd(card->host, &cmd, 0);
1996        if (err) {
1997                pr_err("mmc_erase: group start error %d, "
1998                       "status %#x\n", err, cmd.resp[0]);
1999                err = -EIO;
2000                goto out;
2001        }
2002
2003        memset(&cmd, 0, sizeof(struct mmc_command));
2004        if (mmc_card_sd(card))
2005                cmd.opcode = SD_ERASE_WR_BLK_END;
2006        else
2007                cmd.opcode = MMC_ERASE_GROUP_END;
2008        cmd.arg = to;
2009        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2010        err = mmc_wait_for_cmd(card->host, &cmd, 0);
2011        if (err) {
2012                pr_err("mmc_erase: group end error %d, status %#x\n",
2013                       err, cmd.resp[0]);
2014                err = -EIO;
2015                goto out;
2016        }
2017
2018        memset(&cmd, 0, sizeof(struct mmc_command));
2019        cmd.opcode = MMC_ERASE;
2020        cmd.arg = arg;
2021        busy_timeout = mmc_erase_timeout(card, arg, qty);
2022        /*
2023         * If the host controller supports busy signalling and the timeout for
2024         * the erase operation does not exceed the max_busy_timeout, we should
2025         * use R1B response. Or we need to prevent the host from doing hw busy
2026         * detection, which is done by converting to a R1 response instead.
2027         */
2028        if (card->host->max_busy_timeout &&
2029            busy_timeout > card->host->max_busy_timeout) {
2030                cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2031        } else {
2032                cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2033                cmd.busy_timeout = busy_timeout;
2034                use_r1b_resp = true;
2035        }
2036
2037        err = mmc_wait_for_cmd(card->host, &cmd, 0);
2038        if (err) {
2039                pr_err("mmc_erase: erase error %d, status %#x\n",
2040                       err, cmd.resp[0]);
2041                err = -EIO;
2042                goto out;
2043        }
2044
2045        if (mmc_host_is_spi(card->host))
2046                goto out;
2047
2048        /*
2049         * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2050         * shall be avoided.
2051         */
2052        if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2053                goto out;
2054
2055        timeout = jiffies + msecs_to_jiffies(busy_timeout);
2056        do {
2057                memset(&cmd, 0, sizeof(struct mmc_command));
2058                cmd.opcode = MMC_SEND_STATUS;
2059                cmd.arg = card->rca << 16;
2060                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2061                /* Do not retry else we can't see errors */
2062                err = mmc_wait_for_cmd(card->host, &cmd, 0);
2063                if (err || (cmd.resp[0] & 0xFDF92000)) {
2064                        pr_err("error %d requesting status %#x\n",
2065                                err, cmd.resp[0]);
2066                        err = -EIO;
2067                        goto out;
2068                }
2069
2070                /* Timeout if the device never becomes ready for data and
2071                 * never leaves the program state.
2072                 */
2073                if (time_after(jiffies, timeout)) {
2074                        pr_err("%s: Card stuck in programming state! %s\n",
2075                                mmc_hostname(card->host), __func__);
2076                        err =  -EIO;
2077                        goto out;
2078                }
2079
2080        } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2081                 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2082out:
2083        mmc_retune_release(card->host);
2084        return err;
2085}
2086
2087static unsigned int mmc_align_erase_size(struct mmc_card *card,
2088                                         unsigned int *from,
2089                                         unsigned int *to,
2090                                         unsigned int nr)
2091{
2092        unsigned int from_new = *from, nr_new = nr, rem;
2093
2094        /*
2095         * When the 'card->erase_size' is power of 2, we can use round_up/down()
2096         * to align the erase size efficiently.
2097         */
2098        if (is_power_of_2(card->erase_size)) {
2099                unsigned int temp = from_new;
2100
2101                from_new = round_up(temp, card->erase_size);
2102                rem = from_new - temp;
2103
2104                if (nr_new > rem)
2105                        nr_new -= rem;
2106                else
2107                        return 0;
2108
2109                nr_new = round_down(nr_new, card->erase_size);
2110        } else {
2111                rem = from_new % card->erase_size;
2112                if (rem) {
2113                        rem = card->erase_size - rem;
2114                        from_new += rem;
2115                        if (nr_new > rem)
2116                                nr_new -= rem;
2117                        else
2118                                return 0;
2119                }
2120
2121                rem = nr_new % card->erase_size;
2122                if (rem)
2123                        nr_new -= rem;
2124        }
2125
2126        if (nr_new == 0)
2127                return 0;
2128
2129        *to = from_new + nr_new;
2130        *from = from_new;
2131
2132        return nr_new;
2133}
2134
2135/**
2136 * mmc_erase - erase sectors.
2137 * @card: card to erase
2138 * @from: first sector to erase
2139 * @nr: number of sectors to erase
2140 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2141 *
2142 * Caller must claim host before calling this function.
2143 */
2144int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2145              unsigned int arg)
2146{
2147        unsigned int rem, to = from + nr;
2148        int err;
2149
2150        if (!(card->host->caps & MMC_CAP_ERASE) ||
2151            !(card->csd.cmdclass & CCC_ERASE))
2152                return -EOPNOTSUPP;
2153
2154        if (!card->erase_size)
2155                return -EOPNOTSUPP;
2156
2157        if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2158                return -EOPNOTSUPP;
2159
2160        if ((arg & MMC_SECURE_ARGS) &&
2161            !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2162                return -EOPNOTSUPP;
2163
2164        if ((arg & MMC_TRIM_ARGS) &&
2165            !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2166                return -EOPNOTSUPP;
2167
2168        if (arg == MMC_SECURE_ERASE_ARG) {
2169                if (from % card->erase_size || nr % card->erase_size)
2170                        return -EINVAL;
2171        }
2172
2173        if (arg == MMC_ERASE_ARG)
2174                nr = mmc_align_erase_size(card, &from, &to, nr);
2175
2176        if (nr == 0)
2177                return 0;
2178
2179        if (to <= from)
2180                return -EINVAL;
2181
2182        /* 'from' and 'to' are inclusive */
2183        to -= 1;
2184
2185        /*
2186         * Special case where only one erase-group fits in the timeout budget:
2187         * If the region crosses an erase-group boundary on this particular
2188         * case, we will be trimming more than one erase-group which, does not
2189         * fit in the timeout budget of the controller, so we need to split it
2190         * and call mmc_do_erase() twice if necessary. This special case is
2191         * identified by the card->eg_boundary flag.
2192         */
2193        rem = card->erase_size - (from % card->erase_size);
2194        if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2195                err = mmc_do_erase(card, from, from + rem - 1, arg);
2196                from += rem;
2197                if ((err) || (to <= from))
2198                        return err;
2199        }
2200
2201        return mmc_do_erase(card, from, to, arg);
2202}
2203EXPORT_SYMBOL(mmc_erase);
2204
2205int mmc_can_erase(struct mmc_card *card)
2206{
2207        if ((card->host->caps & MMC_CAP_ERASE) &&
2208            (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2209                return 1;
2210        return 0;
2211}
2212EXPORT_SYMBOL(mmc_can_erase);
2213
2214int mmc_can_trim(struct mmc_card *card)
2215{
2216        if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2217            (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2218                return 1;
2219        return 0;
2220}
2221EXPORT_SYMBOL(mmc_can_trim);
2222
2223int mmc_can_discard(struct mmc_card *card)
2224{
2225        /*
2226         * As there's no way to detect the discard support bit at v4.5
2227         * use the s/w feature support filed.
2228         */
2229        if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2230                return 1;
2231        return 0;
2232}
2233EXPORT_SYMBOL(mmc_can_discard);
2234
2235int mmc_can_sanitize(struct mmc_card *card)
2236{
2237        if (!mmc_can_trim(card) && !mmc_can_erase(card))
2238                return 0;
2239        if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2240                return 1;
2241        return 0;
2242}
2243EXPORT_SYMBOL(mmc_can_sanitize);
2244
2245int mmc_can_secure_erase_trim(struct mmc_card *card)
2246{
2247        if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2248            !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2249                return 1;
2250        return 0;
2251}
2252EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2253
2254int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2255                            unsigned int nr)
2256{
2257        if (!card->erase_size)
2258                return 0;
2259        if (from % card->erase_size || nr % card->erase_size)
2260                return 0;
2261        return 1;
2262}
2263EXPORT_SYMBOL(mmc_erase_group_aligned);
2264
2265static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2266                                            unsigned int arg)
2267{
2268        struct mmc_host *host = card->host;
2269        unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2270        unsigned int last_timeout = 0;
2271        unsigned int max_busy_timeout = host->max_busy_timeout ?
2272                        host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2273
2274        if (card->erase_shift) {
2275                max_qty = UINT_MAX >> card->erase_shift;
2276                min_qty = card->pref_erase >> card->erase_shift;
2277        } else if (mmc_card_sd(card)) {
2278                max_qty = UINT_MAX;
2279                min_qty = card->pref_erase;
2280        } else {
2281                max_qty = UINT_MAX / card->erase_size;
2282                min_qty = card->pref_erase / card->erase_size;
2283        }
2284
2285        /*
2286         * We should not only use 'host->max_busy_timeout' as the limitation
2287         * when deciding the max discard sectors. We should set a balance value
2288         * to improve the erase speed, and it can not get too long timeout at
2289         * the same time.
2290         *
2291         * Here we set 'card->pref_erase' as the minimal discard sectors no
2292         * matter what size of 'host->max_busy_timeout', but if the
2293         * 'host->max_busy_timeout' is large enough for more discard sectors,
2294         * then we can continue to increase the max discard sectors until we
2295         * get a balance value. In cases when the 'host->max_busy_timeout'
2296         * isn't specified, use the default max erase timeout.
2297         */
2298        do {
2299                y = 0;
2300                for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2301                        timeout = mmc_erase_timeout(card, arg, qty + x);
2302
2303                        if (qty + x > min_qty && timeout > max_busy_timeout)
2304                                break;
2305
2306                        if (timeout < last_timeout)
2307                                break;
2308                        last_timeout = timeout;
2309                        y = x;
2310                }
2311                qty += y;
2312        } while (y);
2313
2314        if (!qty)
2315                return 0;
2316
2317        /*
2318         * When specifying a sector range to trim, chances are we might cross
2319         * an erase-group boundary even if the amount of sectors is less than
2320         * one erase-group.
2321         * If we can only fit one erase-group in the controller timeout budget,
2322         * we have to care that erase-group boundaries are not crossed by a
2323         * single trim operation. We flag that special case with "eg_boundary".
2324         * In all other cases we can just decrement qty and pretend that we
2325         * always touch (qty + 1) erase-groups as a simple optimization.
2326         */
2327        if (qty == 1)
2328                card->eg_boundary = 1;
2329        else
2330                qty--;
2331
2332        /* Convert qty to sectors */
2333        if (card->erase_shift)
2334                max_discard = qty << card->erase_shift;
2335        else if (mmc_card_sd(card))
2336                max_discard = qty + 1;
2337        else
2338                max_discard = qty * card->erase_size;
2339
2340        return max_discard;
2341}
2342
2343unsigned int mmc_calc_max_discard(struct mmc_card *card)
2344{
2345        struct mmc_host *host = card->host;
2346        unsigned int max_discard, max_trim;
2347
2348        /*
2349         * Without erase_group_def set, MMC erase timeout depends on clock
2350         * frequence which can change.  In that case, the best choice is
2351         * just the preferred erase size.
2352         */
2353        if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2354                return card->pref_erase;
2355
2356        max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2357        if (mmc_can_trim(card)) {
2358                max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2359                if (max_trim < max_discard)
2360                        max_discard = max_trim;
2361        } else if (max_discard < card->erase_size) {
2362                max_discard = 0;
2363        }
2364        pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2365                mmc_hostname(host), max_discard, host->max_busy_timeout ?
2366                host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2367        return max_discard;
2368}
2369EXPORT_SYMBOL(mmc_calc_max_discard);
2370
2371bool mmc_card_is_blockaddr(struct mmc_card *card)
2372{
2373        return card ? mmc_card_blockaddr(card) : false;
2374}
2375EXPORT_SYMBOL(mmc_card_is_blockaddr);
2376
2377int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2378{
2379        struct mmc_command cmd = {};
2380
2381        if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2382            mmc_card_hs400(card) || mmc_card_hs400es(card))
2383                return 0;
2384
2385        cmd.opcode = MMC_SET_BLOCKLEN;
2386        cmd.arg = blocklen;
2387        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2388        return mmc_wait_for_cmd(card->host, &cmd, 5);
2389}
2390EXPORT_SYMBOL(mmc_set_blocklen);
2391
2392int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2393                        bool is_rel_write)
2394{
2395        struct mmc_command cmd = {};
2396
2397        cmd.opcode = MMC_SET_BLOCK_COUNT;
2398        cmd.arg = blockcount & 0x0000FFFF;
2399        if (is_rel_write)
2400                cmd.arg |= 1 << 31;
2401        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2402        return mmc_wait_for_cmd(card->host, &cmd, 5);
2403}
2404EXPORT_SYMBOL(mmc_set_blockcount);
2405
2406static void mmc_hw_reset_for_init(struct mmc_host *host)
2407{
2408        if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2409                return;
2410        host->ops->hw_reset(host);
2411}
2412
2413int mmc_hw_reset(struct mmc_host *host)
2414{
2415        int ret;
2416
2417        if (!host->card)
2418                return -EINVAL;
2419
2420        mmc_bus_get(host);
2421        if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2422                mmc_bus_put(host);
2423                return -EOPNOTSUPP;
2424        }
2425
2426        ret = host->bus_ops->reset(host);
2427        mmc_bus_put(host);
2428
2429        if (ret)
2430                pr_warn("%s: tried to reset card, got error %d\n",
2431                        mmc_hostname(host), ret);
2432
2433        return ret;
2434}
2435EXPORT_SYMBOL(mmc_hw_reset);
2436
2437static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2438{
2439        host->f_init = freq;
2440
2441        pr_debug("%s: %s: trying to init card at %u Hz\n",
2442                mmc_hostname(host), __func__, host->f_init);
2443
2444        mmc_power_up(host, host->ocr_avail);
2445
2446        /*
2447         * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2448         * do a hardware reset if possible.
2449         */
2450        mmc_hw_reset_for_init(host);
2451
2452        /*
2453         * sdio_reset sends CMD52 to reset card.  Since we do not know
2454         * if the card is being re-initialized, just send it.  CMD52
2455         * should be ignored by SD/eMMC cards.
2456         * Skip it if we already know that we do not support SDIO commands
2457         */
2458        if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2459                sdio_reset(host);
2460
2461        mmc_go_idle(host);
2462
2463        if (!(host->caps2 & MMC_CAP2_NO_SD))
2464                mmc_send_if_cond(host, host->ocr_avail);
2465
2466        /* Order's important: probe SDIO, then SD, then MMC */
2467        if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2468                if (!mmc_attach_sdio(host))
2469                        return 0;
2470
2471        if (!(host->caps2 & MMC_CAP2_NO_SD))
2472                if (!mmc_attach_sd(host))
2473                        return 0;
2474
2475        if (!(host->caps2 & MMC_CAP2_NO_MMC))
2476                if (!mmc_attach_mmc(host))
2477                        return 0;
2478
2479        mmc_power_off(host);
2480        return -EIO;
2481}
2482
2483int _mmc_detect_card_removed(struct mmc_host *host)
2484{
2485        int ret;
2486
2487        if (!host->card || mmc_card_removed(host->card))
2488                return 1;
2489
2490        ret = host->bus_ops->alive(host);
2491
2492        /*
2493         * Card detect status and alive check may be out of sync if card is
2494         * removed slowly, when card detect switch changes while card/slot
2495         * pads are still contacted in hardware (refer to "SD Card Mechanical
2496         * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2497         * detect work 200ms later for this case.
2498         */
2499        if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2500                mmc_detect_change(host, msecs_to_jiffies(200));
2501                pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2502        }
2503
2504        if (ret) {
2505                mmc_card_set_removed(host->card);
2506                pr_debug("%s: card remove detected\n", mmc_hostname(host));
2507        }
2508
2509        return ret;
2510}
2511
2512int mmc_detect_card_removed(struct mmc_host *host)
2513{
2514        struct mmc_card *card = host->card;
2515        int ret;
2516
2517        WARN_ON(!host->claimed);
2518
2519        if (!card)
2520                return 1;
2521
2522        if (!mmc_card_is_removable(host))
2523                return 0;
2524
2525        ret = mmc_card_removed(card);
2526        /*
2527         * The card will be considered unchanged unless we have been asked to
2528         * detect a change or host requires polling to provide card detection.
2529         */
2530        if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2531                return ret;
2532
2533        host->detect_change = 0;
2534        if (!ret) {
2535                ret = _mmc_detect_card_removed(host);
2536                if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2537                        /*
2538                         * Schedule a detect work as soon as possible to let a
2539                         * rescan handle the card removal.
2540                         */
2541                        cancel_delayed_work(&host->detect);
2542                        _mmc_detect_change(host, 0, false);
2543                }
2544        }
2545
2546        return ret;
2547}
2548EXPORT_SYMBOL(mmc_detect_card_removed);
2549
2550void mmc_rescan(struct work_struct *work)
2551{
2552        struct mmc_host *host =
2553                container_of(work, struct mmc_host, detect.work);
2554        int i;
2555
2556        if (host->rescan_disable)
2557                return;
2558
2559        /* If there is a non-removable card registered, only scan once */
2560        if (!mmc_card_is_removable(host) && host->rescan_entered)
2561                return;
2562        host->rescan_entered = 1;
2563
2564        if (host->trigger_card_event && host->ops->card_event) {
2565                mmc_claim_host(host);
2566                host->ops->card_event(host);
2567                mmc_release_host(host);
2568                host->trigger_card_event = false;
2569        }
2570
2571        mmc_bus_get(host);
2572
2573        /*
2574         * if there is a _removable_ card registered, check whether it is
2575         * still present
2576         */
2577        if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2578                host->bus_ops->detect(host);
2579
2580        host->detect_change = 0;
2581
2582        /*
2583         * Let mmc_bus_put() free the bus/bus_ops if we've found that
2584         * the card is no longer present.
2585         */
2586        mmc_bus_put(host);
2587        mmc_bus_get(host);
2588
2589        /* if there still is a card present, stop here */
2590        if (host->bus_ops != NULL) {
2591                mmc_bus_put(host);
2592                goto out;
2593        }
2594
2595        /*
2596         * Only we can add a new handler, so it's safe to
2597         * release the lock here.
2598         */
2599        mmc_bus_put(host);
2600
2601        mmc_claim_host(host);
2602        if (mmc_card_is_removable(host) && host->ops->get_cd &&
2603                        host->ops->get_cd(host) == 0) {
2604                mmc_power_off(host);
2605                mmc_release_host(host);
2606                goto out;
2607        }
2608
2609        for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2610                if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2611                        break;
2612                if (freqs[i] <= host->f_min)
2613                        break;
2614        }
2615        mmc_release_host(host);
2616
2617 out:
2618        if (host->caps & MMC_CAP_NEEDS_POLL)
2619                mmc_schedule_delayed_work(&host->detect, HZ);
2620}
2621
2622void mmc_start_host(struct mmc_host *host)
2623{
2624        host->f_init = max(freqs[0], host->f_min);
2625        host->rescan_disable = 0;
2626        host->ios.power_mode = MMC_POWER_UNDEFINED;
2627
2628        if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2629                mmc_claim_host(host);
2630                mmc_power_up(host, host->ocr_avail);
2631                mmc_release_host(host);
2632        }
2633
2634        mmc_gpiod_request_cd_irq(host);
2635        _mmc_detect_change(host, 0, false);
2636}
2637
2638void mmc_stop_host(struct mmc_host *host)
2639{
2640        if (host->slot.cd_irq >= 0) {
2641                if (host->slot.cd_wake_enabled)
2642                        disable_irq_wake(host->slot.cd_irq);
2643                disable_irq(host->slot.cd_irq);
2644        }
2645
2646        host->rescan_disable = 1;
2647        cancel_delayed_work_sync(&host->detect);
2648
2649        /* clear pm flags now and let card drivers set them as needed */
2650        host->pm_flags = 0;
2651
2652        mmc_bus_get(host);
2653        if (host->bus_ops && !host->bus_dead) {
2654                /* Calling bus_ops->remove() with a claimed host can deadlock */
2655                host->bus_ops->remove(host);
2656                mmc_claim_host(host);
2657                mmc_detach_bus(host);
2658                mmc_power_off(host);
2659                mmc_release_host(host);
2660                mmc_bus_put(host);
2661                return;
2662        }
2663        mmc_bus_put(host);
2664
2665        mmc_claim_host(host);
2666        mmc_power_off(host);
2667        mmc_release_host(host);
2668}
2669
2670int mmc_power_save_host(struct mmc_host *host)
2671{
2672        int ret = 0;
2673
2674        pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__);
2675
2676        mmc_bus_get(host);
2677
2678        if (!host->bus_ops || host->bus_dead) {
2679                mmc_bus_put(host);
2680                return -EINVAL;
2681        }
2682
2683        if (host->bus_ops->power_save)
2684                ret = host->bus_ops->power_save(host);
2685
2686        mmc_bus_put(host);
2687
2688        mmc_power_off(host);
2689
2690        return ret;
2691}
2692EXPORT_SYMBOL(mmc_power_save_host);
2693
2694int mmc_power_restore_host(struct mmc_host *host)
2695{
2696        int ret;
2697
2698        pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__);
2699
2700        mmc_bus_get(host);
2701
2702        if (!host->bus_ops || host->bus_dead) {
2703                mmc_bus_put(host);
2704                return -EINVAL;
2705        }
2706
2707        mmc_power_up(host, host->card->ocr);
2708        ret = host->bus_ops->power_restore(host);
2709
2710        mmc_bus_put(host);
2711
2712        return ret;
2713}
2714EXPORT_SYMBOL(mmc_power_restore_host);
2715
2716#ifdef CONFIG_PM_SLEEP
2717/* Do the card removal on suspend if card is assumed removeable
2718 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2719   to sync the card.
2720*/
2721static int mmc_pm_notify(struct notifier_block *notify_block,
2722                        unsigned long mode, void *unused)
2723{
2724        struct mmc_host *host = container_of(
2725                notify_block, struct mmc_host, pm_notify);
2726        unsigned long flags;
2727        int err = 0;
2728
2729        switch (mode) {
2730        case PM_HIBERNATION_PREPARE:
2731        case PM_SUSPEND_PREPARE:
2732        case PM_RESTORE_PREPARE:
2733                spin_lock_irqsave(&host->lock, flags);
2734                host->rescan_disable = 1;
2735                spin_unlock_irqrestore(&host->lock, flags);
2736                cancel_delayed_work_sync(&host->detect);
2737
2738                if (!host->bus_ops)
2739                        break;
2740
2741                /* Validate prerequisites for suspend */
2742                if (host->bus_ops->pre_suspend)
2743                        err = host->bus_ops->pre_suspend(host);
2744                if (!err)
2745                        break;
2746
2747                /* Calling bus_ops->remove() with a claimed host can deadlock */
2748                host->bus_ops->remove(host);
2749                mmc_claim_host(host);
2750                mmc_detach_bus(host);
2751                mmc_power_off(host);
2752                mmc_release_host(host);
2753                host->pm_flags = 0;
2754                break;
2755
2756        case PM_POST_SUSPEND:
2757        case PM_POST_HIBERNATION:
2758        case PM_POST_RESTORE:
2759
2760                spin_lock_irqsave(&host->lock, flags);
2761                host->rescan_disable = 0;
2762                spin_unlock_irqrestore(&host->lock, flags);
2763                _mmc_detect_change(host, 0, false);
2764
2765        }
2766
2767        return 0;
2768}
2769
2770void mmc_register_pm_notifier(struct mmc_host *host)
2771{
2772        host->pm_notify.notifier_call = mmc_pm_notify;
2773        register_pm_notifier(&host->pm_notify);
2774}
2775
2776void mmc_unregister_pm_notifier(struct mmc_host *host)
2777{
2778        unregister_pm_notifier(&host->pm_notify);
2779}
2780#endif
2781
2782static int __init mmc_init(void)
2783{
2784        int ret;
2785
2786        ret = mmc_register_bus();
2787        if (ret)
2788                return ret;
2789
2790        ret = mmc_register_host_class();
2791        if (ret)
2792                goto unregister_bus;
2793
2794        ret = sdio_register_bus();
2795        if (ret)
2796                goto unregister_host_class;
2797
2798        return 0;
2799
2800unregister_host_class:
2801        mmc_unregister_host_class();
2802unregister_bus:
2803        mmc_unregister_bus();
2804        return ret;
2805}
2806
2807static void __exit mmc_exit(void)
2808{
2809        sdio_unregister_bus();
2810        mmc_unregister_host_class();
2811        mmc_unregister_bus();
2812}
2813
2814subsys_initcall(mmc_init);
2815module_exit(mmc_exit);
2816
2817MODULE_LICENSE("GPL");
2818